summaryrefslogtreecommitdiff
path: root/target/linux/patches
diff options
context:
space:
mode:
authorWaldemar Brodkorb <wbx@uclibc-ng.org>2015-10-07 20:37:17 +0200
committerWaldemar Brodkorb <wbx@uclibc-ng.org>2015-10-07 20:37:35 +0200
commit1056ae1a7ef8a7019385296db7341a23110146b0 (patch)
treeec0ae52169164e04940160111585f362021d79c0 /target/linux/patches
parentc6dbc9ca685a52fdd32308fbd64841a12fe7ca4f (diff)
update rt patch
Diffstat (limited to 'target/linux/patches')
-rw-r--r--target/linux/patches/4.1.10/realtime.patch46477
1 files changed, 45133 insertions, 1344 deletions
diff --git a/target/linux/patches/4.1.10/realtime.patch b/target/linux/patches/4.1.10/realtime.patch
index 9b5b92ee1..e9d7af67d 100644
--- a/target/linux/patches/4.1.10/realtime.patch
+++ b/target/linux/patches/4.1.10/realtime.patch
@@ -1,6 +1,6 @@
-diff -Nur linux-4.1.6.orig/arch/alpha/mm/fault.c linux-4.1.6/arch/alpha/mm/fault.c
---- linux-4.1.6.orig/arch/alpha/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/alpha/mm/fault.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/alpha/mm/fault.c linux-4.1.10/arch/alpha/mm/fault.c
+--- linux-4.1.10.orig/arch/alpha/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/alpha/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -23,8 +23,7 @@
#include <linux/smp.h>
#include <linux/interrupt.h>
@@ -20,9 +20,9 @@ diff -Nur linux-4.1.6.orig/arch/alpha/mm/fault.c linux-4.1.6/arch/alpha/mm/fault
goto no_context;
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
-diff -Nur linux-4.1.6.orig/arch/arc/include/asm/futex.h linux-4.1.6/arch/arc/include/asm/futex.h
---- linux-4.1.6.orig/arch/arc/include/asm/futex.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arc/include/asm/futex.h 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arc/include/asm/futex.h linux-4.1.10/arch/arc/include/asm/futex.h
+--- linux-4.1.10.orig/arch/arc/include/asm/futex.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arc/include/asm/futex.h 2015-10-07 18:00:07.000000000 +0200
@@ -53,7 +53,7 @@
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
@@ -68,9 +68,9 @@ diff -Nur linux-4.1.6.orig/arch/arc/include/asm/futex.h linux-4.1.6/arch/arc/inc
*uval = val;
return val;
-diff -Nur linux-4.1.6.orig/arch/arc/mm/fault.c linux-4.1.6/arch/arc/mm/fault.c
---- linux-4.1.6.orig/arch/arc/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arc/mm/fault.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arc/mm/fault.c linux-4.1.10/arch/arc/mm/fault.c
+--- linux-4.1.10.orig/arch/arc/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arc/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -86,7 +86,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -80,9 +80,9 @@ diff -Nur linux-4.1.6.orig/arch/arc/mm/fault.c linux-4.1.6/arch/arc/mm/fault.c
goto no_context;
if (user_mode(regs))
-diff -Nur linux-4.1.6.orig/arch/arm/include/asm/cmpxchg.h linux-4.1.6/arch/arm/include/asm/cmpxchg.h
---- linux-4.1.6.orig/arch/arm/include/asm/cmpxchg.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/include/asm/cmpxchg.h 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/include/asm/cmpxchg.h linux-4.1.10/arch/arm/include/asm/cmpxchg.h
+--- linux-4.1.10.orig/arch/arm/include/asm/cmpxchg.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/include/asm/cmpxchg.h 2015-10-07 18:00:07.000000000 +0200
@@ -129,6 +129,8 @@
#else /* min ARCH >= ARMv6 */
@@ -92,9 +92,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/include/asm/cmpxchg.h linux-4.1.6/arch/arm/i
extern void __bad_cmpxchg(volatile void *ptr, int size);
/*
-diff -Nur linux-4.1.6.orig/arch/arm/include/asm/futex.h linux-4.1.6/arch/arm/include/asm/futex.h
---- linux-4.1.6.orig/arch/arm/include/asm/futex.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/include/asm/futex.h 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/include/asm/futex.h linux-4.1.10/arch/arm/include/asm/futex.h
+--- linux-4.1.10.orig/arch/arm/include/asm/futex.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/include/asm/futex.h 2015-10-07 18:00:07.000000000 +0200
@@ -93,6 +93,7 @@
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
@@ -136,9 +136,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/include/asm/futex.h linux-4.1.6/arch/arm/inc
if (!ret) {
switch (cmp) {
-diff -Nur linux-4.1.6.orig/arch/arm/include/asm/switch_to.h linux-4.1.6/arch/arm/include/asm/switch_to.h
---- linux-4.1.6.orig/arch/arm/include/asm/switch_to.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/include/asm/switch_to.h 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/include/asm/switch_to.h linux-4.1.10/arch/arm/include/asm/switch_to.h
+--- linux-4.1.10.orig/arch/arm/include/asm/switch_to.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/include/asm/switch_to.h 2015-10-07 18:00:07.000000000 +0200
@@ -3,6 +3,13 @@
#include <linux/thread_info.h>
@@ -161,9 +161,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/include/asm/switch_to.h linux-4.1.6/arch/arm
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
-diff -Nur linux-4.1.6.orig/arch/arm/include/asm/thread_info.h linux-4.1.6/arch/arm/include/asm/thread_info.h
---- linux-4.1.6.orig/arch/arm/include/asm/thread_info.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/include/asm/thread_info.h 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/include/asm/thread_info.h linux-4.1.10/arch/arm/include/asm/thread_info.h
+--- linux-4.1.10.orig/arch/arm/include/asm/thread_info.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/include/asm/thread_info.h 2015-10-07 18:00:07.000000000 +0200
@@ -50,6 +50,7 @@
struct thread_info {
unsigned long flags; /* low level flags */
@@ -188,9 +188,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/include/asm/thread_info.h linux-4.1.6/arch/a
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-diff -Nur linux-4.1.6.orig/arch/arm/Kconfig linux-4.1.6/arch/arm/Kconfig
---- linux-4.1.6.orig/arch/arm/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/Kconfig 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/Kconfig linux-4.1.10/arch/arm/Kconfig
+--- linux-4.1.10.orig/arch/arm/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/Kconfig 2015-10-07 18:00:07.000000000 +0200
@@ -31,7 +31,7 @@
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
@@ -208,9 +208,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/Kconfig linux-4.1.6/arch/arm/Kconfig
select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
-diff -Nur linux-4.1.6.orig/arch/arm/kernel/asm-offsets.c linux-4.1.6/arch/arm/kernel/asm-offsets.c
---- linux-4.1.6.orig/arch/arm/kernel/asm-offsets.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/kernel/asm-offsets.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/kernel/asm-offsets.c linux-4.1.10/arch/arm/kernel/asm-offsets.c
+--- linux-4.1.10.orig/arch/arm/kernel/asm-offsets.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/kernel/asm-offsets.c 2015-10-07 18:00:07.000000000 +0200
@@ -65,6 +65,7 @@
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
@@ -219,9 +219,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/kernel/asm-offsets.c linux-4.1.6/arch/arm/ke
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-diff -Nur linux-4.1.6.orig/arch/arm/kernel/entry-armv.S linux-4.1.6/arch/arm/kernel/entry-armv.S
---- linux-4.1.6.orig/arch/arm/kernel/entry-armv.S 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/kernel/entry-armv.S 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/kernel/entry-armv.S linux-4.1.10/arch/arm/kernel/entry-armv.S
+--- linux-4.1.10.orig/arch/arm/kernel/entry-armv.S 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/kernel/entry-armv.S 2015-10-07 18:00:07.000000000 +0200
@@ -208,11 +208,18 @@
#ifdef CONFIG_PREEMPT
get_thread_info tsk
@@ -252,9 +252,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/kernel/entry-armv.S linux-4.1.6/arch/arm/ker
reteq r8 @ go again
b 1b
#endif
-diff -Nur linux-4.1.6.orig/arch/arm/kernel/process.c linux-4.1.6/arch/arm/kernel/process.c
---- linux-4.1.6.orig/arch/arm/kernel/process.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/kernel/process.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/kernel/process.c linux-4.1.10/arch/arm/kernel/process.c
+--- linux-4.1.10.orig/arch/arm/kernel/process.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/kernel/process.c 2015-10-07 18:00:07.000000000 +0200
@@ -290,6 +290,30 @@
}
@@ -286,9 +286,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/kernel/process.c linux-4.1.6/arch/arm/kernel
#ifdef CONFIG_KUSER_HELPERS
/*
* The vectors page is always readable from user space for the
-diff -Nur linux-4.1.6.orig/arch/arm/kernel/signal.c linux-4.1.6/arch/arm/kernel/signal.c
---- linux-4.1.6.orig/arch/arm/kernel/signal.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/kernel/signal.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/kernel/signal.c linux-4.1.10/arch/arm/kernel/signal.c
+--- linux-4.1.10.orig/arch/arm/kernel/signal.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/kernel/signal.c 2015-10-07 18:00:07.000000000 +0200
@@ -563,7 +563,8 @@
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
@@ -299,9 +299,31 @@ diff -Nur linux-4.1.6.orig/arch/arm/kernel/signal.c linux-4.1.6/arch/arm/kernel/
schedule();
} else {
if (unlikely(!user_mode(regs)))
-diff -Nur linux-4.1.6.orig/arch/arm/kernel/unwind.c linux-4.1.6/arch/arm/kernel/unwind.c
---- linux-4.1.6.orig/arch/arm/kernel/unwind.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/kernel/unwind.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/kernel/smp.c linux-4.1.10/arch/arm/kernel/smp.c
+--- linux-4.1.10.orig/arch/arm/kernel/smp.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/kernel/smp.c 2015-10-07 18:00:07.000000000 +0200
+@@ -213,8 +213,6 @@
+ flush_cache_louis();
+ local_flush_tlb_all();
+
+- clear_tasks_mm_cpumask(cpu);
+-
+ return 0;
+ }
+
+@@ -230,6 +228,9 @@
+ pr_err("CPU%u: cpu didn't die\n", cpu);
+ return;
+ }
++
++ clear_tasks_mm_cpumask(cpu);
++
+ pr_notice("CPU%u: shutdown\n", cpu);
+
+ /*
+diff -Nur linux-4.1.10.orig/arch/arm/kernel/unwind.c linux-4.1.10/arch/arm/kernel/unwind.c
+--- linux-4.1.10.orig/arch/arm/kernel/unwind.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/kernel/unwind.c 2015-10-07 18:00:07.000000000 +0200
@@ -93,7 +93,7 @@
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
@@ -353,9 +375,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/kernel/unwind.c linux-4.1.6/arch/arm/kernel/
kfree(tab);
}
-diff -Nur linux-4.1.6.orig/arch/arm/kvm/arm.c linux-4.1.6/arch/arm/kvm/arm.c
---- linux-4.1.6.orig/arch/arm/kvm/arm.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/kvm/arm.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/kvm/arm.c linux-4.1.10/arch/arm/kvm/arm.c
+--- linux-4.1.10.orig/arch/arm/kvm/arm.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/kvm/arm.c 2015-10-07 18:00:07.000000000 +0200
@@ -474,9 +474,9 @@
static void vcpu_pause(struct kvm_vcpu *vcpu)
@@ -368,9 +390,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/kvm/arm.c linux-4.1.6/arch/arm/kvm/arm.c
}
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
-diff -Nur linux-4.1.6.orig/arch/arm/kvm/psci.c linux-4.1.6/arch/arm/kvm/psci.c
---- linux-4.1.6.orig/arch/arm/kvm/psci.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/kvm/psci.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/kvm/psci.c linux-4.1.10/arch/arm/kvm/psci.c
+--- linux-4.1.10.orig/arch/arm/kvm/psci.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/kvm/psci.c 2015-10-07 18:00:07.000000000 +0200
@@ -68,7 +68,7 @@
{
struct kvm *kvm = source_vcpu->kvm;
@@ -389,9 +411,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/kvm/psci.c linux-4.1.6/arch/arm/kvm/psci.c
return PSCI_RET_SUCCESS;
}
-diff -Nur linux-4.1.6.orig/arch/arm/mach-exynos/platsmp.c linux-4.1.6/arch/arm/mach-exynos/platsmp.c
---- linux-4.1.6.orig/arch/arm/mach-exynos/platsmp.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/mach-exynos/platsmp.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/mach-exynos/platsmp.c linux-4.1.10/arch/arm/mach-exynos/platsmp.c
+--- linux-4.1.10.orig/arch/arm/mach-exynos/platsmp.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/mach-exynos/platsmp.c 2015-10-07 18:00:07.000000000 +0200
@@ -231,7 +231,7 @@
return (void __iomem *)(S5P_VA_SCU);
}
@@ -439,9 +461,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/mach-exynos/platsmp.c linux-4.1.6/arch/arm/m
return pen_release != -1 ? ret : 0;
}
-diff -Nur linux-4.1.6.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.6/arch/arm/mach-hisi/platmcpm.c
---- linux-4.1.6.orig/arch/arm/mach-hisi/platmcpm.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/mach-hisi/platmcpm.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.10/arch/arm/mach-hisi/platmcpm.c
+--- linux-4.1.10.orig/arch/arm/mach-hisi/platmcpm.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/mach-hisi/platmcpm.c 2015-10-07 18:00:07.000000000 +0200
@@ -57,7 +57,7 @@
static void __iomem *sysctrl, *fabric;
@@ -544,9 +566,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/mach-hisi/platmcpm.c linux-4.1.6/arch/arm/ma
}
static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level)
-diff -Nur linux-4.1.6.orig/arch/arm/mach-omap2/omap-smp.c linux-4.1.6/arch/arm/mach-omap2/omap-smp.c
---- linux-4.1.6.orig/arch/arm/mach-omap2/omap-smp.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/mach-omap2/omap-smp.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/mach-omap2/omap-smp.c linux-4.1.10/arch/arm/mach-omap2/omap-smp.c
+--- linux-4.1.10.orig/arch/arm/mach-omap2/omap-smp.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/mach-omap2/omap-smp.c 2015-10-07 18:00:07.000000000 +0200
@@ -43,7 +43,7 @@
/* SCU base address */
static void __iomem *scu_base;
@@ -585,9 +607,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/mach-omap2/omap-smp.c linux-4.1.6/arch/arm/m
return 0;
}
-diff -Nur linux-4.1.6.orig/arch/arm/mach-prima2/platsmp.c linux-4.1.6/arch/arm/mach-prima2/platsmp.c
---- linux-4.1.6.orig/arch/arm/mach-prima2/platsmp.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/mach-prima2/platsmp.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/mach-prima2/platsmp.c linux-4.1.10/arch/arm/mach-prima2/platsmp.c
+--- linux-4.1.10.orig/arch/arm/mach-prima2/platsmp.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/mach-prima2/platsmp.c 2015-10-07 18:00:07.000000000 +0200
@@ -22,7 +22,7 @@
static void __iomem *clk_base;
@@ -626,9 +648,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/mach-prima2/platsmp.c linux-4.1.6/arch/arm/m
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-4.1.6.orig/arch/arm/mach-qcom/platsmp.c linux-4.1.6/arch/arm/mach-qcom/platsmp.c
---- linux-4.1.6.orig/arch/arm/mach-qcom/platsmp.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/mach-qcom/platsmp.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/mach-qcom/platsmp.c linux-4.1.10/arch/arm/mach-qcom/platsmp.c
+--- linux-4.1.10.orig/arch/arm/mach-qcom/platsmp.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/mach-qcom/platsmp.c 2015-10-07 18:00:07.000000000 +0200
@@ -46,7 +46,7 @@
extern void secondary_startup_arm(void);
@@ -667,9 +689,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/mach-qcom/platsmp.c linux-4.1.6/arch/arm/mac
return ret;
}
-diff -Nur linux-4.1.6.orig/arch/arm/mach-spear/platsmp.c linux-4.1.6/arch/arm/mach-spear/platsmp.c
---- linux-4.1.6.orig/arch/arm/mach-spear/platsmp.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/mach-spear/platsmp.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/mach-spear/platsmp.c linux-4.1.10/arch/arm/mach-spear/platsmp.c
+--- linux-4.1.10.orig/arch/arm/mach-spear/platsmp.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/mach-spear/platsmp.c 2015-10-07 18:00:07.000000000 +0200
@@ -32,7 +32,7 @@
sync_cache_w(&pen_release);
}
@@ -708,9 +730,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/mach-spear/platsmp.c linux-4.1.6/arch/arm/ma
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-4.1.6.orig/arch/arm/mach-sti/platsmp.c linux-4.1.6/arch/arm/mach-sti/platsmp.c
---- linux-4.1.6.orig/arch/arm/mach-sti/platsmp.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/mach-sti/platsmp.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/mach-sti/platsmp.c linux-4.1.10/arch/arm/mach-sti/platsmp.c
+--- linux-4.1.10.orig/arch/arm/mach-sti/platsmp.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/mach-sti/platsmp.c 2015-10-07 18:00:07.000000000 +0200
@@ -34,7 +34,7 @@
sync_cache_w(&pen_release);
}
@@ -749,9 +771,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/mach-sti/platsmp.c linux-4.1.6/arch/arm/mach
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-4.1.6.orig/arch/arm/mach-ux500/platsmp.c linux-4.1.6/arch/arm/mach-ux500/platsmp.c
---- linux-4.1.6.orig/arch/arm/mach-ux500/platsmp.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/mach-ux500/platsmp.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/mach-ux500/platsmp.c linux-4.1.10/arch/arm/mach-ux500/platsmp.c
+--- linux-4.1.10.orig/arch/arm/mach-ux500/platsmp.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/mach-ux500/platsmp.c 2015-10-07 18:00:07.000000000 +0200
@@ -51,7 +51,7 @@
return NULL;
}
@@ -790,9 +812,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/mach-ux500/platsmp.c linux-4.1.6/arch/arm/ma
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-4.1.6.orig/arch/arm/mm/fault.c linux-4.1.6/arch/arm/mm/fault.c
---- linux-4.1.6.orig/arch/arm/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/mm/fault.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/mm/fault.c linux-4.1.10/arch/arm/mm/fault.c
+--- linux-4.1.10.orig/arch/arm/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -276,7 +276,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -822,9 +844,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/mm/fault.c linux-4.1.6/arch/arm/mm/fault.c
do_bad_area(addr, fsr, regs);
return 0;
}
-diff -Nur linux-4.1.6.orig/arch/arm/mm/highmem.c linux-4.1.6/arch/arm/mm/highmem.c
---- linux-4.1.6.orig/arch/arm/mm/highmem.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/mm/highmem.c 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/mm/highmem.c linux-4.1.10/arch/arm/mm/highmem.c
+--- linux-4.1.10.orig/arch/arm/mm/highmem.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/mm/highmem.c 2015-10-07 18:00:07.000000000 +0200
@@ -54,11 +54,13 @@
void *kmap_atomic(struct page *page)
@@ -927,9 +949,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/mm/highmem.c linux-4.1.6/arch/arm/mm/highmem
+ }
+}
+#endif
-diff -Nur linux-4.1.6.orig/arch/arm/plat-versatile/platsmp.c linux-4.1.6/arch/arm/plat-versatile/platsmp.c
---- linux-4.1.6.orig/arch/arm/plat-versatile/platsmp.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm/plat-versatile/platsmp.c 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/arm/plat-versatile/platsmp.c linux-4.1.10/arch/arm/plat-versatile/platsmp.c
+--- linux-4.1.10.orig/arch/arm/plat-versatile/platsmp.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm/plat-versatile/platsmp.c 2015-10-07 18:00:07.000000000 +0200
@@ -30,7 +30,7 @@
sync_cache_w(&pen_release);
}
@@ -968,9 +990,9 @@ diff -Nur linux-4.1.6.orig/arch/arm/plat-versatile/platsmp.c linux-4.1.6/arch/ar
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-4.1.6.orig/arch/arm64/include/asm/futex.h linux-4.1.6/arch/arm64/include/asm/futex.h
---- linux-4.1.6.orig/arch/arm64/include/asm/futex.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm64/include/asm/futex.h 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/arm64/include/asm/futex.h linux-4.1.10/arch/arm64/include/asm/futex.h
+--- linux-4.1.10.orig/arch/arm64/include/asm/futex.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm64/include/asm/futex.h 2015-10-07 18:00:07.000000000 +0200
@@ -58,7 +58,7 @@
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
@@ -989,9 +1011,9 @@ diff -Nur linux-4.1.6.orig/arch/arm64/include/asm/futex.h linux-4.1.6/arch/arm64
if (!ret) {
switch (cmp) {
-diff -Nur linux-4.1.6.orig/arch/arm64/include/asm/thread_info.h linux-4.1.6/arch/arm64/include/asm/thread_info.h
---- linux-4.1.6.orig/arch/arm64/include/asm/thread_info.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm64/include/asm/thread_info.h 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/arm64/include/asm/thread_info.h linux-4.1.10/arch/arm64/include/asm/thread_info.h
+--- linux-4.1.10.orig/arch/arm64/include/asm/thread_info.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm64/include/asm/thread_info.h 2015-10-07 18:00:07.000000000 +0200
@@ -47,6 +47,7 @@
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
@@ -1016,9 +1038,9 @@ diff -Nur linux-4.1.6.orig/arch/arm64/include/asm/thread_info.h linux-4.1.6/arch
#define _TIF_NOHZ (1 << TIF_NOHZ)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-diff -Nur linux-4.1.6.orig/arch/arm64/Kconfig linux-4.1.6/arch/arm64/Kconfig
---- linux-4.1.6.orig/arch/arm64/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm64/Kconfig 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/arm64/Kconfig linux-4.1.10/arch/arm64/Kconfig
+--- linux-4.1.10.orig/arch/arm64/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm64/Kconfig 2015-10-07 18:00:07.000000000 +0200
@@ -69,8 +69,10 @@
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -1030,9 +1052,9 @@ diff -Nur linux-4.1.6.orig/arch/arm64/Kconfig linux-4.1.6/arch/arm64/Kconfig
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
select OF
-diff -Nur linux-4.1.6.orig/arch/arm64/kernel/asm-offsets.c linux-4.1.6/arch/arm64/kernel/asm-offsets.c
---- linux-4.1.6.orig/arch/arm64/kernel/asm-offsets.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm64/kernel/asm-offsets.c 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/arm64/kernel/asm-offsets.c linux-4.1.10/arch/arm64/kernel/asm-offsets.c
+--- linux-4.1.10.orig/arch/arm64/kernel/asm-offsets.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm64/kernel/asm-offsets.c 2015-10-07 18:00:07.000000000 +0200
@@ -35,6 +35,7 @@
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
@@ -1041,9 +1063,9 @@ diff -Nur linux-4.1.6.orig/arch/arm64/kernel/asm-offsets.c linux-4.1.6/arch/arm6
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-diff -Nur linux-4.1.6.orig/arch/arm64/kernel/entry.S linux-4.1.6/arch/arm64/kernel/entry.S
---- linux-4.1.6.orig/arch/arm64/kernel/entry.S 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm64/kernel/entry.S 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/arm64/kernel/entry.S linux-4.1.10/arch/arm64/kernel/entry.S
+--- linux-4.1.10.orig/arch/arm64/kernel/entry.S 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm64/kernel/entry.S 2015-10-07 18:00:07.000000000 +0200
@@ -367,11 +367,16 @@
#ifdef CONFIG_PREEMPT
get_thread_info tsk
@@ -1080,9 +1102,9 @@ diff -Nur linux-4.1.6.orig/arch/arm64/kernel/entry.S linux-4.1.6/arch/arm64/kern
/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
ldr x2, [sp, #S_PSTATE]
mov x0, sp // 'regs'
-diff -Nur linux-4.1.6.orig/arch/arm64/kernel/perf_event.c linux-4.1.6/arch/arm64/kernel/perf_event.c
---- linux-4.1.6.orig/arch/arm64/kernel/perf_event.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm64/kernel/perf_event.c 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/arm64/kernel/perf_event.c linux-4.1.10/arch/arm64/kernel/perf_event.c
+--- linux-4.1.10.orig/arch/arm64/kernel/perf_event.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm64/kernel/perf_event.c 2015-10-07 18:00:07.000000000 +0200
@@ -488,7 +488,7 @@
}
@@ -1092,9 +1114,9 @@ diff -Nur linux-4.1.6.orig/arch/arm64/kernel/perf_event.c linux-4.1.6/arch/arm64
"arm-pmu", armpmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
-diff -Nur linux-4.1.6.orig/arch/arm64/mm/fault.c linux-4.1.6/arch/arm64/mm/fault.c
---- linux-4.1.6.orig/arch/arm64/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/arm64/mm/fault.c 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/arm64/mm/fault.c linux-4.1.10/arch/arm64/mm/fault.c
+--- linux-4.1.10.orig/arch/arm64/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/arm64/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -211,7 +211,7 @@
* If we're in an interrupt or have no user context, we must not take
* the fault.
@@ -1104,9 +1126,9 @@ diff -Nur linux-4.1.6.orig/arch/arm64/mm/fault.c linux-4.1.6/arch/arm64/mm/fault
goto no_context;
if (user_mode(regs))
-diff -Nur linux-4.1.6.orig/arch/avr32/include/asm/uaccess.h linux-4.1.6/arch/avr32/include/asm/uaccess.h
---- linux-4.1.6.orig/arch/avr32/include/asm/uaccess.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/avr32/include/asm/uaccess.h 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/avr32/include/asm/uaccess.h linux-4.1.10/arch/avr32/include/asm/uaccess.h
+--- linux-4.1.10.orig/arch/avr32/include/asm/uaccess.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/avr32/include/asm/uaccess.h 2015-10-07 18:00:07.000000000 +0200
@@ -97,7 +97,8 @@
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
@@ -1147,9 +1169,9 @@ diff -Nur linux-4.1.6.orig/arch/avr32/include/asm/uaccess.h linux-4.1.6/arch/avr
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-diff -Nur linux-4.1.6.orig/arch/avr32/mm/fault.c linux-4.1.6/arch/avr32/mm/fault.c
---- linux-4.1.6.orig/arch/avr32/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/avr32/mm/fault.c 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/avr32/mm/fault.c linux-4.1.10/arch/avr32/mm/fault.c
+--- linux-4.1.10.orig/arch/avr32/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/avr32/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -14,11 +14,11 @@
#include <linux/pagemap.h>
#include <linux/kdebug.h>
@@ -1172,9 +1194,9 @@ diff -Nur linux-4.1.6.orig/arch/avr32/mm/fault.c linux-4.1.6/arch/avr32/mm/fault
goto no_context;
local_irq_enable();
-diff -Nur linux-4.1.6.orig/arch/cris/mm/fault.c linux-4.1.6/arch/cris/mm/fault.c
---- linux-4.1.6.orig/arch/cris/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/cris/mm/fault.c 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/cris/mm/fault.c linux-4.1.10/arch/cris/mm/fault.c
+--- linux-4.1.10.orig/arch/cris/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/cris/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -8,7 +8,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -1198,9 +1220,9 @@ diff -Nur linux-4.1.6.orig/arch/cris/mm/fault.c linux-4.1.6/arch/cris/mm/fault.c
goto no_context;
if (user_mode(regs))
-diff -Nur linux-4.1.6.orig/arch/frv/mm/fault.c linux-4.1.6/arch/frv/mm/fault.c
---- linux-4.1.6.orig/arch/frv/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/frv/mm/fault.c 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/frv/mm/fault.c linux-4.1.10/arch/frv/mm/fault.c
+--- linux-4.1.10.orig/arch/frv/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/frv/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -19,9 +19,9 @@
#include <linux/kernel.h>
#include <linux/ptrace.h>
@@ -1221,9 +1243,9 @@ diff -Nur linux-4.1.6.orig/arch/frv/mm/fault.c linux-4.1.6/arch/frv/mm/fault.c
goto no_context;
if (user_mode(__frame))
-diff -Nur linux-4.1.6.orig/arch/frv/mm/highmem.c linux-4.1.6/arch/frv/mm/highmem.c
---- linux-4.1.6.orig/arch/frv/mm/highmem.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/frv/mm/highmem.c 2015-09-08 23:49:03.502377871 +0200
+diff -Nur linux-4.1.10.orig/arch/frv/mm/highmem.c linux-4.1.10/arch/frv/mm/highmem.c
+--- linux-4.1.10.orig/arch/frv/mm/highmem.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/frv/mm/highmem.c 2015-10-07 18:00:07.000000000 +0200
@@ -42,6 +42,7 @@
unsigned long paddr;
int type;
@@ -1239,9 +1261,9 @@ diff -Nur linux-4.1.6.orig/arch/frv/mm/highmem.c linux-4.1.6/arch/frv/mm/highmem
+ preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.6.orig/arch/hexagon/include/asm/uaccess.h linux-4.1.6/arch/hexagon/include/asm/uaccess.h
---- linux-4.1.6.orig/arch/hexagon/include/asm/uaccess.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/hexagon/include/asm/uaccess.h 2015-09-08 23:49:04.386279854 +0200
+diff -Nur linux-4.1.10.orig/arch/hexagon/include/asm/uaccess.h linux-4.1.10/arch/hexagon/include/asm/uaccess.h
+--- linux-4.1.10.orig/arch/hexagon/include/asm/uaccess.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/hexagon/include/asm/uaccess.h 2015-10-07 18:00:07.000000000 +0200
@@ -36,7 +36,8 @@
* @addr: User space pointer to start of block to check
* @size: Size of block to check
@@ -1252,9 +1274,9 @@ diff -Nur linux-4.1.6.orig/arch/hexagon/include/asm/uaccess.h linux-4.1.6/arch/h
*
* Checks if a pointer to a block of memory in user space is valid.
*
-diff -Nur linux-4.1.6.orig/arch/ia64/mm/fault.c linux-4.1.6/arch/ia64/mm/fault.c
---- linux-4.1.6.orig/arch/ia64/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/ia64/mm/fault.c 2015-09-08 23:49:04.658249697 +0200
+diff -Nur linux-4.1.10.orig/arch/ia64/mm/fault.c linux-4.1.10/arch/ia64/mm/fault.c
+--- linux-4.1.10.orig/arch/ia64/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/ia64/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -11,10 +11,10 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
@@ -1276,9 +1298,9 @@ diff -Nur linux-4.1.6.orig/arch/ia64/mm/fault.c linux-4.1.6/arch/ia64/mm/fault.c
goto no_context;
#ifdef CONFIG_VIRTUAL_MEM_MAP
-diff -Nur linux-4.1.6.orig/arch/Kconfig linux-4.1.6/arch/Kconfig
---- linux-4.1.6.orig/arch/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/Kconfig 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/arch/Kconfig linux-4.1.10/arch/Kconfig
+--- linux-4.1.10.orig/arch/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/Kconfig 2015-10-07 18:00:07.000000000 +0200
@@ -6,6 +6,7 @@
tristate "OProfile system profiling"
depends on PROFILING
@@ -1287,9 +1309,9 @@ diff -Nur linux-4.1.6.orig/arch/Kconfig linux-4.1.6/arch/Kconfig
select RING_BUFFER
select RING_BUFFER_ALLOW_SWAP
help
-diff -Nur linux-4.1.6.orig/arch/m32r/include/asm/uaccess.h linux-4.1.6/arch/m32r/include/asm/uaccess.h
---- linux-4.1.6.orig/arch/m32r/include/asm/uaccess.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/m32r/include/asm/uaccess.h 2015-09-08 23:49:04.658249697 +0200
+diff -Nur linux-4.1.10.orig/arch/m32r/include/asm/uaccess.h linux-4.1.10/arch/m32r/include/asm/uaccess.h
+--- linux-4.1.10.orig/arch/m32r/include/asm/uaccess.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/m32r/include/asm/uaccess.h 2015-10-07 18:00:07.000000000 +0200
@@ -91,7 +91,8 @@
* @addr: User space pointer to start of block to check
* @size: Size of block to check
@@ -1390,9 +1412,9 @@ diff -Nur linux-4.1.6.orig/arch/m32r/include/asm/uaccess.h linux-4.1.6/arch/m32r
*
* Get the size of a NUL-terminated string in user space.
*
-diff -Nur linux-4.1.6.orig/arch/m32r/mm/fault.c linux-4.1.6/arch/m32r/mm/fault.c
---- linux-4.1.6.orig/arch/m32r/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/m32r/mm/fault.c 2015-09-08 23:49:04.658249697 +0200
+diff -Nur linux-4.1.10.orig/arch/m32r/mm/fault.c linux-4.1.10/arch/m32r/mm/fault.c
+--- linux-4.1.10.orig/arch/m32r/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/m32r/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -24,9 +24,9 @@
#include <linux/vt_kern.h> /* For unblank_screen() */
#include <linux/highmem.h>
@@ -1418,9 +1440,9 @@ diff -Nur linux-4.1.6.orig/arch/m32r/mm/fault.c linux-4.1.6/arch/m32r/mm/fault.c
goto bad_area_nosemaphore;
if (error_code & ACE_USERMODE)
-diff -Nur linux-4.1.6.orig/arch/m68k/mm/fault.c linux-4.1.6/arch/m68k/mm/fault.c
---- linux-4.1.6.orig/arch/m68k/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/m68k/mm/fault.c 2015-09-08 23:49:04.658249697 +0200
+diff -Nur linux-4.1.10.orig/arch/m68k/mm/fault.c linux-4.1.10/arch/m68k/mm/fault.c
+--- linux-4.1.10.orig/arch/m68k/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/m68k/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -10,10 +10,10 @@
#include <linux/ptrace.h>
#include <linux/interrupt.h>
@@ -1442,9 +1464,9 @@ diff -Nur linux-4.1.6.orig/arch/m68k/mm/fault.c linux-4.1.6/arch/m68k/mm/fault.c
goto no_context;
if (user_mode(regs))
-diff -Nur linux-4.1.6.orig/arch/metag/mm/fault.c linux-4.1.6/arch/metag/mm/fault.c
---- linux-4.1.6.orig/arch/metag/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/metag/mm/fault.c 2015-09-08 23:49:04.658249697 +0200
+diff -Nur linux-4.1.10.orig/arch/metag/mm/fault.c linux-4.1.10/arch/metag/mm/fault.c
+--- linux-4.1.10.orig/arch/metag/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/metag/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -105,7 +105,7 @@
mm = tsk->mm;
@@ -1454,9 +1476,9 @@ diff -Nur linux-4.1.6.orig/arch/metag/mm/fault.c linux-4.1.6/arch/metag/mm/fault
goto no_context;
if (user_mode(regs))
-diff -Nur linux-4.1.6.orig/arch/metag/mm/highmem.c linux-4.1.6/arch/metag/mm/highmem.c
---- linux-4.1.6.orig/arch/metag/mm/highmem.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/metag/mm/highmem.c 2015-09-08 23:49:04.662249253 +0200
+diff -Nur linux-4.1.10.orig/arch/metag/mm/highmem.c linux-4.1.10/arch/metag/mm/highmem.c
+--- linux-4.1.10.orig/arch/metag/mm/highmem.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/metag/mm/highmem.c 2015-10-07 18:00:07.000000000 +0200
@@ -43,7 +43,7 @@
unsigned long vaddr;
int type;
@@ -1482,9 +1504,9 @@ diff -Nur linux-4.1.6.orig/arch/metag/mm/highmem.c linux-4.1.6/arch/metag/mm/hig
pagefault_disable();
type = kmap_atomic_idx_push();
-diff -Nur linux-4.1.6.orig/arch/microblaze/include/asm/uaccess.h linux-4.1.6/arch/microblaze/include/asm/uaccess.h
---- linux-4.1.6.orig/arch/microblaze/include/asm/uaccess.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/microblaze/include/asm/uaccess.h 2015-09-08 23:49:04.662249253 +0200
+diff -Nur linux-4.1.10.orig/arch/microblaze/include/asm/uaccess.h linux-4.1.10/arch/microblaze/include/asm/uaccess.h
+--- linux-4.1.10.orig/arch/microblaze/include/asm/uaccess.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/microblaze/include/asm/uaccess.h 2015-10-07 18:00:07.000000000 +0200
@@ -178,7 +178,8 @@
* @x: Variable to store result.
* @ptr: Source address, in user space.
@@ -1505,9 +1527,9 @@ diff -Nur linux-4.1.6.orig/arch/microblaze/include/asm/uaccess.h linux-4.1.6/arc
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-diff -Nur linux-4.1.6.orig/arch/microblaze/mm/fault.c linux-4.1.6/arch/microblaze/mm/fault.c
---- linux-4.1.6.orig/arch/microblaze/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/microblaze/mm/fault.c 2015-09-08 23:49:04.670248364 +0200
+diff -Nur linux-4.1.10.orig/arch/microblaze/mm/fault.c linux-4.1.10/arch/microblaze/mm/fault.c
+--- linux-4.1.10.orig/arch/microblaze/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/microblaze/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -107,14 +107,14 @@
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
is_write = 0;
@@ -1527,9 +1549,9 @@ diff -Nur linux-4.1.6.orig/arch/microblaze/mm/fault.c linux-4.1.6/arch/microblaz
pr_emerg("r15 = %lx MSR = %lx\n",
regs->r15, regs->msr);
die("Weird page fault", regs, SIGSEGV);
-diff -Nur linux-4.1.6.orig/arch/microblaze/mm/highmem.c linux-4.1.6/arch/microblaze/mm/highmem.c
---- linux-4.1.6.orig/arch/microblaze/mm/highmem.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/microblaze/mm/highmem.c 2015-09-08 23:49:04.670248364 +0200
+diff -Nur linux-4.1.10.orig/arch/microblaze/mm/highmem.c linux-4.1.10/arch/microblaze/mm/highmem.c
+--- linux-4.1.10.orig/arch/microblaze/mm/highmem.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/microblaze/mm/highmem.c 2015-10-07 18:00:07.000000000 +0200
@@ -37,7 +37,7 @@
unsigned long vaddr;
int idx, type;
@@ -1554,9 +1576,9 @@ diff -Nur linux-4.1.6.orig/arch/microblaze/mm/highmem.c linux-4.1.6/arch/microbl
+ preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.6.orig/arch/mips/include/asm/uaccess.h linux-4.1.6/arch/mips/include/asm/uaccess.h
---- linux-4.1.6.orig/arch/mips/include/asm/uaccess.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/mips/include/asm/uaccess.h 2015-09-08 23:49:04.670248364 +0200
+diff -Nur linux-4.1.10.orig/arch/mips/include/asm/uaccess.h linux-4.1.10/arch/mips/include/asm/uaccess.h
+--- linux-4.1.10.orig/arch/mips/include/asm/uaccess.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/mips/include/asm/uaccess.h 2015-10-07 18:00:07.000000000 +0200
@@ -103,7 +103,8 @@
* @addr: User space pointer to start of block to check
* @size: Size of block to check
@@ -1707,9 +1729,9 @@ diff -Nur linux-4.1.6.orig/arch/mips/include/asm/uaccess.h linux-4.1.6/arch/mips
*
* Get the size of a NUL-terminated string in user space.
*
-diff -Nur linux-4.1.6.orig/arch/mips/Kconfig linux-4.1.6/arch/mips/Kconfig
---- linux-4.1.6.orig/arch/mips/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/mips/Kconfig 2015-09-08 23:49:04.670248364 +0200
+diff -Nur linux-4.1.10.orig/arch/mips/Kconfig linux-4.1.10/arch/mips/Kconfig
+--- linux-4.1.10.orig/arch/mips/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/mips/Kconfig 2015-10-07 18:00:07.000000000 +0200
@@ -2366,7 +2366,7 @@
#
config HIGHMEM
@@ -1719,9 +1741,9 @@ diff -Nur linux-4.1.6.orig/arch/mips/Kconfig linux-4.1.6/arch/mips/Kconfig
config CPU_SUPPORTS_HIGHMEM
bool
-diff -Nur linux-4.1.6.orig/arch/mips/kernel/signal-common.h linux-4.1.6/arch/mips/kernel/signal-common.h
---- linux-4.1.6.orig/arch/mips/kernel/signal-common.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/mips/kernel/signal-common.h 2015-09-08 23:49:04.674247921 +0200
+diff -Nur linux-4.1.10.orig/arch/mips/kernel/signal-common.h linux-4.1.10/arch/mips/kernel/signal-common.h
+--- linux-4.1.10.orig/arch/mips/kernel/signal-common.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/mips/kernel/signal-common.h 2015-10-07 18:00:07.000000000 +0200
@@ -28,12 +28,7 @@
extern int fpcsr_pending(unsigned int __user *fpcsr);
@@ -1737,9 +1759,9 @@ diff -Nur linux-4.1.6.orig/arch/mips/kernel/signal-common.h linux-4.1.6/arch/mip
+#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
#endif /* __SIGNAL_COMMON_H */
-diff -Nur linux-4.1.6.orig/arch/mips/mm/fault.c linux-4.1.6/arch/mips/mm/fault.c
---- linux-4.1.6.orig/arch/mips/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/mips/mm/fault.c 2015-09-08 23:49:04.674247921 +0200
+diff -Nur linux-4.1.10.orig/arch/mips/mm/fault.c linux-4.1.10/arch/mips/mm/fault.c
+--- linux-4.1.10.orig/arch/mips/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/mips/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -21,10 +21,10 @@
#include <linux/module.h>
#include <linux/kprobes.h>
@@ -1761,9 +1783,9 @@ diff -Nur linux-4.1.6.orig/arch/mips/mm/fault.c linux-4.1.6/arch/mips/mm/fault.c
goto bad_area_nosemaphore;
if (user_mode(regs))
-diff -Nur linux-4.1.6.orig/arch/mips/mm/highmem.c linux-4.1.6/arch/mips/mm/highmem.c
---- linux-4.1.6.orig/arch/mips/mm/highmem.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/mips/mm/highmem.c 2015-09-08 23:49:04.674247921 +0200
+diff -Nur linux-4.1.10.orig/arch/mips/mm/highmem.c linux-4.1.10/arch/mips/mm/highmem.c
+--- linux-4.1.10.orig/arch/mips/mm/highmem.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/mips/mm/highmem.c 2015-10-07 18:00:07.000000000 +0200
@@ -47,7 +47,7 @@
unsigned long vaddr;
int idx, type;
@@ -1797,9 +1819,9 @@ diff -Nur linux-4.1.6.orig/arch/mips/mm/highmem.c linux-4.1.6/arch/mips/mm/highm
pagefault_disable();
type = kmap_atomic_idx_push();
-diff -Nur linux-4.1.6.orig/arch/mips/mm/init.c linux-4.1.6/arch/mips/mm/init.c
---- linux-4.1.6.orig/arch/mips/mm/init.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/mips/mm/init.c 2015-09-08 23:49:04.674247921 +0200
+diff -Nur linux-4.1.10.orig/arch/mips/mm/init.c linux-4.1.10/arch/mips/mm/init.c
+--- linux-4.1.10.orig/arch/mips/mm/init.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/mips/mm/init.c 2015-10-07 18:00:07.000000000 +0200
@@ -90,6 +90,7 @@
BUG_ON(Page_dcache_dirty(page));
@@ -1816,9 +1838,9 @@ diff -Nur linux-4.1.6.orig/arch/mips/mm/init.c linux-4.1.6/arch/mips/mm/init.c
}
void copy_user_highpage(struct page *to, struct page *from,
-diff -Nur linux-4.1.6.orig/arch/mn10300/include/asm/highmem.h linux-4.1.6/arch/mn10300/include/asm/highmem.h
---- linux-4.1.6.orig/arch/mn10300/include/asm/highmem.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/mn10300/include/asm/highmem.h 2015-09-08 23:49:04.674247921 +0200
+diff -Nur linux-4.1.10.orig/arch/mn10300/include/asm/highmem.h linux-4.1.10/arch/mn10300/include/asm/highmem.h
+--- linux-4.1.10.orig/arch/mn10300/include/asm/highmem.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/mn10300/include/asm/highmem.h 2015-10-07 18:00:07.000000000 +0200
@@ -75,6 +75,7 @@
unsigned long vaddr;
int idx, type;
@@ -1843,9 +1865,9 @@ diff -Nur linux-4.1.6.orig/arch/mn10300/include/asm/highmem.h linux-4.1.6/arch/m
}
#endif /* __KERNEL__ */
-diff -Nur linux-4.1.6.orig/arch/mn10300/mm/fault.c linux-4.1.6/arch/mn10300/mm/fault.c
---- linux-4.1.6.orig/arch/mn10300/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/mn10300/mm/fault.c 2015-09-08 23:49:04.674247921 +0200
+diff -Nur linux-4.1.10.orig/arch/mn10300/mm/fault.c linux-4.1.10/arch/mn10300/mm/fault.c
+--- linux-4.1.10.orig/arch/mn10300/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/mn10300/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -23,8 +23,8 @@
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -1865,9 +1887,9 @@ diff -Nur linux-4.1.6.orig/arch/mn10300/mm/fault.c linux-4.1.6/arch/mn10300/mm/f
goto no_context;
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
-diff -Nur linux-4.1.6.orig/arch/nios2/mm/fault.c linux-4.1.6/arch/nios2/mm/fault.c
---- linux-4.1.6.orig/arch/nios2/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/nios2/mm/fault.c 2015-09-08 23:49:04.674247921 +0200
+diff -Nur linux-4.1.10.orig/arch/nios2/mm/fault.c linux-4.1.10/arch/nios2/mm/fault.c
+--- linux-4.1.10.orig/arch/nios2/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/nios2/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -77,7 +77,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -1877,9 +1899,9 @@ diff -Nur linux-4.1.6.orig/arch/nios2/mm/fault.c linux-4.1.6/arch/nios2/mm/fault
goto bad_area_nosemaphore;
if (user_mode(regs))
-diff -Nur linux-4.1.6.orig/arch/parisc/include/asm/cacheflush.h linux-4.1.6/arch/parisc/include/asm/cacheflush.h
---- linux-4.1.6.orig/arch/parisc/include/asm/cacheflush.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/parisc/include/asm/cacheflush.h 2015-09-08 23:49:04.674247921 +0200
+diff -Nur linux-4.1.10.orig/arch/parisc/include/asm/cacheflush.h linux-4.1.10/arch/parisc/include/asm/cacheflush.h
+--- linux-4.1.10.orig/arch/parisc/include/asm/cacheflush.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/parisc/include/asm/cacheflush.h 2015-10-07 18:00:07.000000000 +0200
@@ -142,6 +142,7 @@
static inline void *kmap_atomic(struct page *page)
@@ -1896,9 +1918,9 @@ diff -Nur linux-4.1.6.orig/arch/parisc/include/asm/cacheflush.h linux-4.1.6/arch
}
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
-diff -Nur linux-4.1.6.orig/arch/parisc/kernel/traps.c linux-4.1.6/arch/parisc/kernel/traps.c
---- linux-4.1.6.orig/arch/parisc/kernel/traps.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/parisc/kernel/traps.c 2015-09-08 23:49:04.678247479 +0200
+diff -Nur linux-4.1.10.orig/arch/parisc/kernel/traps.c linux-4.1.10/arch/parisc/kernel/traps.c
+--- linux-4.1.10.orig/arch/parisc/kernel/traps.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/parisc/kernel/traps.c 2015-10-07 18:00:07.000000000 +0200
@@ -26,9 +26,9 @@
#include <linux/console.h>
#include <linux/bug.h>
@@ -1919,9 +1941,9 @@ diff -Nur linux-4.1.6.orig/arch/parisc/kernel/traps.c linux-4.1.6/arch/parisc/ke
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Kernel Fault", regs, code, fault_address);
-diff -Nur linux-4.1.6.orig/arch/parisc/mm/fault.c linux-4.1.6/arch/parisc/mm/fault.c
---- linux-4.1.6.orig/arch/parisc/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/parisc/mm/fault.c 2015-09-08 23:49:04.678247479 +0200
+diff -Nur linux-4.1.10.orig/arch/parisc/mm/fault.c linux-4.1.10/arch/parisc/mm/fault.c
+--- linux-4.1.10.orig/arch/parisc/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/parisc/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -15,8 +15,8 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
@@ -1941,9 +1963,9 @@ diff -Nur linux-4.1.6.orig/arch/parisc/mm/fault.c linux-4.1.6/arch/parisc/mm/fau
goto no_context;
tsk = current;
-diff -Nur linux-4.1.6.orig/arch/powerpc/include/asm/kvm_host.h linux-4.1.6/arch/powerpc/include/asm/kvm_host.h
---- linux-4.1.6.orig/arch/powerpc/include/asm/kvm_host.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/include/asm/kvm_host.h 2015-09-08 23:49:04.810232841 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/include/asm/kvm_host.h linux-4.1.10/arch/powerpc/include/asm/kvm_host.h
+--- linux-4.1.10.orig/arch/powerpc/include/asm/kvm_host.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/include/asm/kvm_host.h 2015-10-07 18:00:07.000000000 +0200
@@ -280,7 +280,7 @@
u8 in_guest;
struct list_head runnable_threads;
@@ -1962,9 +1984,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/include/asm/kvm_host.h linux-4.1.6/arch/
struct kvmppc_vcore *vcore;
int ret;
int trap;
-diff -Nur linux-4.1.6.orig/arch/powerpc/include/asm/thread_info.h linux-4.1.6/arch/powerpc/include/asm/thread_info.h
---- linux-4.1.6.orig/arch/powerpc/include/asm/thread_info.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/include/asm/thread_info.h 2015-09-08 23:49:04.810232841 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/include/asm/thread_info.h linux-4.1.10/arch/powerpc/include/asm/thread_info.h
+--- linux-4.1.10.orig/arch/powerpc/include/asm/thread_info.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/include/asm/thread_info.h 2015-10-07 18:00:07.000000000 +0200
@@ -42,6 +42,8 @@
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
@@ -2011,9 +2033,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/include/asm/thread_info.h linux-4.1.6/ar
/* Bits in local_flags */
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
-diff -Nur linux-4.1.6.orig/arch/powerpc/Kconfig linux-4.1.6/arch/powerpc/Kconfig
---- linux-4.1.6.orig/arch/powerpc/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/Kconfig 2015-09-08 23:49:04.810232841 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/Kconfig linux-4.1.10/arch/powerpc/Kconfig
+--- linux-4.1.10.orig/arch/powerpc/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/Kconfig 2015-10-07 18:00:07.000000000 +0200
@@ -60,10 +60,11 @@
config RWSEM_GENERIC_SPINLOCK
@@ -2044,9 +2066,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/Kconfig linux-4.1.6/arch/powerpc/Kconfig
source kernel/Kconfig.hz
source kernel/Kconfig.preempt
-diff -Nur linux-4.1.6.orig/arch/powerpc/kernel/asm-offsets.c linux-4.1.6/arch/powerpc/kernel/asm-offsets.c
---- linux-4.1.6.orig/arch/powerpc/kernel/asm-offsets.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/kernel/asm-offsets.c 2015-09-08 23:49:04.810232841 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/kernel/asm-offsets.c linux-4.1.10/arch/powerpc/kernel/asm-offsets.c
+--- linux-4.1.10.orig/arch/powerpc/kernel/asm-offsets.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/kernel/asm-offsets.c 2015-10-07 18:00:07.000000000 +0200
@@ -160,6 +160,7 @@
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
@@ -2055,9 +2077,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/kernel/asm-offsets.c linux-4.1.6/arch/po
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-diff -Nur linux-4.1.6.orig/arch/powerpc/kernel/entry_32.S linux-4.1.6/arch/powerpc/kernel/entry_32.S
---- linux-4.1.6.orig/arch/powerpc/kernel/entry_32.S 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/kernel/entry_32.S 2015-09-08 23:49:04.810232841 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/kernel/entry_32.S linux-4.1.10/arch/powerpc/kernel/entry_32.S
+--- linux-4.1.10.orig/arch/powerpc/kernel/entry_32.S 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/kernel/entry_32.S 2015-10-07 18:00:07.000000000 +0200
@@ -813,7 +813,14 @@
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
@@ -2106,9 +2128,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/kernel/entry_32.S linux-4.1.6/arch/power
bne- do_resched
andi. r0,r9,_TIF_USER_WORK_MASK
beq restore_user
-diff -Nur linux-4.1.6.orig/arch/powerpc/kernel/entry_64.S linux-4.1.6/arch/powerpc/kernel/entry_64.S
---- linux-4.1.6.orig/arch/powerpc/kernel/entry_64.S 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/kernel/entry_64.S 2015-09-08 23:49:04.818231956 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/kernel/entry_64.S linux-4.1.10/arch/powerpc/kernel/entry_64.S
+--- linux-4.1.10.orig/arch/powerpc/kernel/entry_64.S 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/kernel/entry_64.S 2015-10-07 18:00:07.000000000 +0200
@@ -636,7 +636,7 @@
#else
beq restore
@@ -2147,9 +2169,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/kernel/entry_64.S linux-4.1.6/arch/power
bne 1b
/*
-diff -Nur linux-4.1.6.orig/arch/powerpc/kernel/irq.c linux-4.1.6/arch/powerpc/kernel/irq.c
---- linux-4.1.6.orig/arch/powerpc/kernel/irq.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/kernel/irq.c 2015-09-08 23:49:04.818231956 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/kernel/irq.c linux-4.1.10/arch/powerpc/kernel/irq.c
+--- linux-4.1.10.orig/arch/powerpc/kernel/irq.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/kernel/irq.c 2015-10-07 18:00:07.000000000 +0200
@@ -614,6 +614,7 @@
}
}
@@ -2166,9 +2188,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/kernel/irq.c linux-4.1.6/arch/powerpc/ke
irq_hw_number_t virq_to_hw(unsigned int virq)
{
-diff -Nur linux-4.1.6.orig/arch/powerpc/kernel/misc_32.S linux-4.1.6/arch/powerpc/kernel/misc_32.S
---- linux-4.1.6.orig/arch/powerpc/kernel/misc_32.S 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/kernel/misc_32.S 2015-09-08 23:49:04.818231956 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/kernel/misc_32.S linux-4.1.10/arch/powerpc/kernel/misc_32.S
+--- linux-4.1.10.orig/arch/powerpc/kernel/misc_32.S 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/kernel/misc_32.S 2015-10-07 18:00:07.000000000 +0200
@@ -40,6 +40,7 @@
* We store the saved ksp_limit in the unused part
* of the STACK_FRAME_OVERHEAD
@@ -2185,9 +2207,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/kernel/misc_32.S linux-4.1.6/arch/powerp
/*
* void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
-diff -Nur linux-4.1.6.orig/arch/powerpc/kernel/misc_64.S linux-4.1.6/arch/powerpc/kernel/misc_64.S
---- linux-4.1.6.orig/arch/powerpc/kernel/misc_64.S 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/kernel/misc_64.S 2015-09-08 23:49:04.822231513 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/kernel/misc_64.S linux-4.1.10/arch/powerpc/kernel/misc_64.S
+--- linux-4.1.10.orig/arch/powerpc/kernel/misc_64.S 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/kernel/misc_64.S 2015-10-07 18:00:07.000000000 +0200
@@ -29,6 +29,7 @@
.text
@@ -2204,9 +2226,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/kernel/misc_64.S linux-4.1.6/arch/powerp
_GLOBAL(call_do_irq)
mflr r0
-diff -Nur linux-4.1.6.orig/arch/powerpc/kvm/book3s_hv.c linux-4.1.6/arch/powerpc/kvm/book3s_hv.c
---- linux-4.1.6.orig/arch/powerpc/kvm/book3s_hv.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/kvm/book3s_hv.c 2015-09-08 23:49:04.826231069 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/kvm/book3s_hv.c linux-4.1.10/arch/powerpc/kvm/book3s_hv.c
+--- linux-4.1.10.orig/arch/powerpc/kvm/book3s_hv.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/kvm/book3s_hv.c 2015-10-07 18:00:07.000000000 +0200
@@ -115,11 +115,11 @@
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{
@@ -2282,9 +2304,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/kvm/book3s_hv.c linux-4.1.6/arch/powerpc
}
}
-diff -Nur linux-4.1.6.orig/arch/powerpc/kvm/Kconfig linux-4.1.6/arch/powerpc/kvm/Kconfig
---- linux-4.1.6.orig/arch/powerpc/kvm/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/kvm/Kconfig 2015-09-08 23:49:04.822231513 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/kvm/Kconfig linux-4.1.10/arch/powerpc/kvm/Kconfig
+--- linux-4.1.10.orig/arch/powerpc/kvm/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/kvm/Kconfig 2015-10-07 18:00:07.000000000 +0200
@@ -172,6 +172,7 @@
config KVM_MPIC
bool "KVM in-kernel MPIC emulation"
@@ -2293,9 +2315,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/kvm/Kconfig linux-4.1.6/arch/powerpc/kvm
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING
-diff -Nur linux-4.1.6.orig/arch/powerpc/mm/fault.c linux-4.1.6/arch/powerpc/mm/fault.c
---- linux-4.1.6.orig/arch/powerpc/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/mm/fault.c 2015-09-08 23:49:05.246184500 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/mm/fault.c linux-4.1.10/arch/powerpc/mm/fault.c
+--- linux-4.1.10.orig/arch/powerpc/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -33,13 +33,13 @@
#include <linux/ratelimit.h>
#include <linux/context_tracking.h>
@@ -2331,9 +2353,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/mm/fault.c linux-4.1.6/arch/powerpc/mm/f
printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
regs->nip, regs->msr);
die("Weird page fault", regs, SIGSEGV);
-diff -Nur linux-4.1.6.orig/arch/powerpc/mm/highmem.c linux-4.1.6/arch/powerpc/mm/highmem.c
---- linux-4.1.6.orig/arch/powerpc/mm/highmem.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/mm/highmem.c 2015-09-08 23:49:05.250184055 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/mm/highmem.c linux-4.1.10/arch/powerpc/mm/highmem.c
+--- linux-4.1.10.orig/arch/powerpc/mm/highmem.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/mm/highmem.c 2015-10-07 18:00:07.000000000 +0200
@@ -34,7 +34,7 @@
unsigned long vaddr;
int idx, type;
@@ -2358,9 +2380,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/mm/highmem.c linux-4.1.6/arch/powerpc/mm
+ preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.6.orig/arch/powerpc/platforms/ps3/device-init.c linux-4.1.6/arch/powerpc/platforms/ps3/device-init.c
---- linux-4.1.6.orig/arch/powerpc/platforms/ps3/device-init.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/powerpc/platforms/ps3/device-init.c 2015-09-08 23:49:05.250184055 +0200
+diff -Nur linux-4.1.10.orig/arch/powerpc/platforms/ps3/device-init.c linux-4.1.10/arch/powerpc/platforms/ps3/device-init.c
+--- linux-4.1.10.orig/arch/powerpc/platforms/ps3/device-init.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/powerpc/platforms/ps3/device-init.c 2015-10-07 18:00:07.000000000 +0200
@@ -752,7 +752,7 @@
}
pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
@@ -2370,9 +2392,9 @@ diff -Nur linux-4.1.6.orig/arch/powerpc/platforms/ps3/device-init.c linux-4.1.6/
dev->done.done || kthread_should_stop());
if (kthread_should_stop())
res = -EINTR;
-diff -Nur linux-4.1.6.orig/arch/s390/include/asm/kvm_host.h linux-4.1.6/arch/s390/include/asm/kvm_host.h
---- linux-4.1.6.orig/arch/s390/include/asm/kvm_host.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/s390/include/asm/kvm_host.h 2015-09-08 23:49:05.250184055 +0200
+diff -Nur linux-4.1.10.orig/arch/s390/include/asm/kvm_host.h linux-4.1.10/arch/s390/include/asm/kvm_host.h
+--- linux-4.1.10.orig/arch/s390/include/asm/kvm_host.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/s390/include/asm/kvm_host.h 2015-10-07 18:00:07.000000000 +0200
@@ -419,7 +419,7 @@
struct kvm_s390_local_interrupt {
spinlock_t lock;
@@ -2382,9 +2404,9 @@ diff -Nur linux-4.1.6.orig/arch/s390/include/asm/kvm_host.h linux-4.1.6/arch/s39
atomic_t *cpuflags;
DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_irq_payload irq;
-diff -Nur linux-4.1.6.orig/arch/s390/include/asm/uaccess.h linux-4.1.6/arch/s390/include/asm/uaccess.h
---- linux-4.1.6.orig/arch/s390/include/asm/uaccess.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/s390/include/asm/uaccess.h 2015-09-08 23:49:05.250184055 +0200
+diff -Nur linux-4.1.10.orig/arch/s390/include/asm/uaccess.h linux-4.1.10/arch/s390/include/asm/uaccess.h
+--- linux-4.1.10.orig/arch/s390/include/asm/uaccess.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/s390/include/asm/uaccess.h 2015-10-07 18:00:07.000000000 +0200
@@ -98,7 +98,8 @@
* @from: Source address, in user space.
* @n: Number of bytes to copy.
@@ -2435,9 +2457,9 @@ diff -Nur linux-4.1.6.orig/arch/s390/include/asm/uaccess.h linux-4.1.6/arch/s390
*
* Get the size of a NUL-terminated string in user space.
*
-diff -Nur linux-4.1.6.orig/arch/s390/kvm/interrupt.c linux-4.1.6/arch/s390/kvm/interrupt.c
---- linux-4.1.6.orig/arch/s390/kvm/interrupt.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/s390/kvm/interrupt.c 2015-09-08 23:49:05.250184055 +0200
+diff -Nur linux-4.1.10.orig/arch/s390/kvm/interrupt.c linux-4.1.10/arch/s390/kvm/interrupt.c
+--- linux-4.1.10.orig/arch/s390/kvm/interrupt.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/s390/kvm/interrupt.c 2015-10-07 18:00:07.000000000 +0200
@@ -875,13 +875,13 @@
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
@@ -2472,9 +2494,9 @@ diff -Nur linux-4.1.6.orig/arch/s390/kvm/interrupt.c linux-4.1.6/arch/s390/kvm/i
spin_unlock(&li->lock);
return rc;
}
-diff -Nur linux-4.1.6.orig/arch/s390/mm/fault.c linux-4.1.6/arch/s390/mm/fault.c
---- linux-4.1.6.orig/arch/s390/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/s390/mm/fault.c 2015-09-08 23:49:05.254183611 +0200
+diff -Nur linux-4.1.10.orig/arch/s390/mm/fault.c linux-4.1.10/arch/s390/mm/fault.c
+--- linux-4.1.10.orig/arch/s390/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/s390/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -399,7 +399,7 @@
* user context.
*/
@@ -2484,9 +2506,9 @@ diff -Nur linux-4.1.6.orig/arch/s390/mm/fault.c linux-4.1.6/arch/s390/mm/fault.c
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
-diff -Nur linux-4.1.6.orig/arch/score/include/asm/uaccess.h linux-4.1.6/arch/score/include/asm/uaccess.h
---- linux-4.1.6.orig/arch/score/include/asm/uaccess.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/score/include/asm/uaccess.h 2015-09-08 23:49:05.254183611 +0200
+diff -Nur linux-4.1.10.orig/arch/score/include/asm/uaccess.h linux-4.1.10/arch/score/include/asm/uaccess.h
+--- linux-4.1.10.orig/arch/score/include/asm/uaccess.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/score/include/asm/uaccess.h 2015-10-07 18:00:07.000000000 +0200
@@ -36,7 +36,8 @@
* @addr: User space pointer to start of block to check
* @size: Size of block to check
@@ -2537,9 +2559,9 @@ diff -Nur linux-4.1.6.orig/arch/score/include/asm/uaccess.h linux-4.1.6/arch/sco
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
-diff -Nur linux-4.1.6.orig/arch/score/mm/fault.c linux-4.1.6/arch/score/mm/fault.c
---- linux-4.1.6.orig/arch/score/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/score/mm/fault.c 2015-09-08 23:49:05.254183611 +0200
+diff -Nur linux-4.1.10.orig/arch/score/mm/fault.c linux-4.1.10/arch/score/mm/fault.c
+--- linux-4.1.10.orig/arch/score/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/score/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -34,6 +34,7 @@
#include <linux/string.h>
#include <linux/types.h>
@@ -2557,9 +2579,9 @@ diff -Nur linux-4.1.6.orig/arch/score/mm/fault.c linux-4.1.6/arch/score/mm/fault
goto bad_area_nosemaphore;
if (user_mode(regs))
-diff -Nur linux-4.1.6.orig/arch/sh/kernel/irq.c linux-4.1.6/arch/sh/kernel/irq.c
---- linux-4.1.6.orig/arch/sh/kernel/irq.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/sh/kernel/irq.c 2015-09-08 23:49:05.254183611 +0200
+diff -Nur linux-4.1.10.orig/arch/sh/kernel/irq.c linux-4.1.10/arch/sh/kernel/irq.c
+--- linux-4.1.10.orig/arch/sh/kernel/irq.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/sh/kernel/irq.c 2015-10-07 18:00:07.000000000 +0200
@@ -147,6 +147,7 @@
hardirq_ctx[cpu] = NULL;
}
@@ -2576,9 +2598,9 @@ diff -Nur linux-4.1.6.orig/arch/sh/kernel/irq.c linux-4.1.6/arch/sh/kernel/irq.c
#else
static inline void handle_one_irq(unsigned int irq)
{
-diff -Nur linux-4.1.6.orig/arch/sh/mm/fault.c linux-4.1.6/arch/sh/mm/fault.c
---- linux-4.1.6.orig/arch/sh/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/sh/mm/fault.c 2015-09-08 23:49:05.258183169 +0200
+diff -Nur linux-4.1.10.orig/arch/sh/mm/fault.c linux-4.1.10/arch/sh/mm/fault.c
+--- linux-4.1.10.orig/arch/sh/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/sh/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -17,6 +17,7 @@
#include <linux/kprobes.h>
#include <linux/perf_event.h>
@@ -2599,9 +2621,9 @@ diff -Nur linux-4.1.6.orig/arch/sh/mm/fault.c linux-4.1.6/arch/sh/mm/fault.c
bad_area_nosemaphore(regs, error_code, address);
return;
}
-diff -Nur linux-4.1.6.orig/arch/sparc/Kconfig linux-4.1.6/arch/sparc/Kconfig
---- linux-4.1.6.orig/arch/sparc/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/sparc/Kconfig 2015-09-08 23:49:05.258183169 +0200
+diff -Nur linux-4.1.10.orig/arch/sparc/Kconfig linux-4.1.10/arch/sparc/Kconfig
+--- linux-4.1.10.orig/arch/sparc/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/sparc/Kconfig 2015-10-07 18:00:07.000000000 +0200
@@ -189,12 +189,10 @@
source kernel/Kconfig.hz
@@ -2617,9 +2639,9 @@ diff -Nur linux-4.1.6.orig/arch/sparc/Kconfig linux-4.1.6/arch/sparc/Kconfig
config GENERIC_HWEIGHT
bool
-diff -Nur linux-4.1.6.orig/arch/sparc/kernel/irq_64.c linux-4.1.6/arch/sparc/kernel/irq_64.c
---- linux-4.1.6.orig/arch/sparc/kernel/irq_64.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/sparc/kernel/irq_64.c 2015-09-08 23:49:05.258183169 +0200
+diff -Nur linux-4.1.10.orig/arch/sparc/kernel/irq_64.c linux-4.1.10/arch/sparc/kernel/irq_64.c
+--- linux-4.1.10.orig/arch/sparc/kernel/irq_64.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/sparc/kernel/irq_64.c 2015-10-07 18:00:07.000000000 +0200
@@ -849,6 +849,7 @@
set_irq_regs(old_regs);
}
@@ -2636,9 +2658,9 @@ diff -Nur linux-4.1.6.orig/arch/sparc/kernel/irq_64.c linux-4.1.6/arch/sparc/ker
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
-diff -Nur linux-4.1.6.orig/arch/sparc/mm/fault_32.c linux-4.1.6/arch/sparc/mm/fault_32.c
---- linux-4.1.6.orig/arch/sparc/mm/fault_32.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/sparc/mm/fault_32.c 2015-09-08 23:49:05.258183169 +0200
+diff -Nur linux-4.1.10.orig/arch/sparc/mm/fault_32.c linux-4.1.10/arch/sparc/mm/fault_32.c
+--- linux-4.1.10.orig/arch/sparc/mm/fault_32.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/sparc/mm/fault_32.c 2015-10-07 18:00:07.000000000 +0200
@@ -21,6 +21,7 @@
#include <linux/perf_event.h>
#include <linux/interrupt.h>
@@ -2664,9 +2686,9 @@ diff -Nur linux-4.1.6.orig/arch/sparc/mm/fault_32.c linux-4.1.6/arch/sparc/mm/fa
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-diff -Nur linux-4.1.6.orig/arch/sparc/mm/fault_64.c linux-4.1.6/arch/sparc/mm/fault_64.c
---- linux-4.1.6.orig/arch/sparc/mm/fault_64.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/sparc/mm/fault_64.c 2015-09-08 23:49:05.262182726 +0200
+diff -Nur linux-4.1.10.orig/arch/sparc/mm/fault_64.c linux-4.1.10/arch/sparc/mm/fault_64.c
+--- linux-4.1.10.orig/arch/sparc/mm/fault_64.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/sparc/mm/fault_64.c 2015-10-07 18:00:07.000000000 +0200
@@ -22,12 +22,12 @@
#include <linux/kdebug.h>
#include <linux/percpu.h>
@@ -2690,9 +2712,9 @@ diff -Nur linux-4.1.6.orig/arch/sparc/mm/fault_64.c linux-4.1.6/arch/sparc/mm/fa
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-diff -Nur linux-4.1.6.orig/arch/sparc/mm/highmem.c linux-4.1.6/arch/sparc/mm/highmem.c
---- linux-4.1.6.orig/arch/sparc/mm/highmem.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/sparc/mm/highmem.c 2015-09-08 23:49:05.262182726 +0200
+diff -Nur linux-4.1.10.orig/arch/sparc/mm/highmem.c linux-4.1.10/arch/sparc/mm/highmem.c
+--- linux-4.1.10.orig/arch/sparc/mm/highmem.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/sparc/mm/highmem.c 2015-10-07 18:00:07.000000000 +0200
@@ -53,7 +53,7 @@
unsigned long vaddr;
long idx, type;
@@ -2717,9 +2739,9 @@ diff -Nur linux-4.1.6.orig/arch/sparc/mm/highmem.c linux-4.1.6/arch/sparc/mm/hig
+ preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.6.orig/arch/sparc/mm/init_64.c linux-4.1.6/arch/sparc/mm/init_64.c
---- linux-4.1.6.orig/arch/sparc/mm/init_64.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/sparc/mm/init_64.c 2015-09-08 23:49:05.262182726 +0200
+diff -Nur linux-4.1.10.orig/arch/sparc/mm/init_64.c linux-4.1.10/arch/sparc/mm/init_64.c
+--- linux-4.1.10.orig/arch/sparc/mm/init_64.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/sparc/mm/init_64.c 2015-10-07 18:00:07.000000000 +0200
@@ -2738,7 +2738,7 @@
struct mm_struct *mm = current->mm;
struct tsb_config *tp;
@@ -2729,9 +2751,9 @@ diff -Nur linux-4.1.6.orig/arch/sparc/mm/init_64.c linux-4.1.6/arch/sparc/mm/ini
const struct exception_table_entry *entry;
entry = search_exception_tables(regs->tpc);
-diff -Nur linux-4.1.6.orig/arch/tile/include/asm/uaccess.h linux-4.1.6/arch/tile/include/asm/uaccess.h
---- linux-4.1.6.orig/arch/tile/include/asm/uaccess.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/tile/include/asm/uaccess.h 2015-09-08 23:49:05.266182281 +0200
+diff -Nur linux-4.1.10.orig/arch/tile/include/asm/uaccess.h linux-4.1.10/arch/tile/include/asm/uaccess.h
+--- linux-4.1.10.orig/arch/tile/include/asm/uaccess.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/tile/include/asm/uaccess.h 2015-10-07 18:00:07.000000000 +0200
@@ -78,7 +78,8 @@
* @addr: User space pointer to start of block to check
* @size: Size of block to check
@@ -2792,9 +2814,9 @@ diff -Nur linux-4.1.6.orig/arch/tile/include/asm/uaccess.h linux-4.1.6/arch/tile
*
* Copy data from user space to user space. Caller must check
* the specified blocks with access_ok() before calling this function.
-diff -Nur linux-4.1.6.orig/arch/tile/mm/fault.c linux-4.1.6/arch/tile/mm/fault.c
---- linux-4.1.6.orig/arch/tile/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/tile/mm/fault.c 2015-09-08 23:49:05.266182281 +0200
+diff -Nur linux-4.1.10.orig/arch/tile/mm/fault.c linux-4.1.10/arch/tile/mm/fault.c
+--- linux-4.1.10.orig/arch/tile/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/tile/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -354,9 +354,9 @@
/*
@@ -2807,9 +2829,9 @@ diff -Nur linux-4.1.6.orig/arch/tile/mm/fault.c linux-4.1.6/arch/tile/mm/fault.c
vma = NULL; /* happy compiler */
goto bad_area_nosemaphore;
}
-diff -Nur linux-4.1.6.orig/arch/tile/mm/highmem.c linux-4.1.6/arch/tile/mm/highmem.c
---- linux-4.1.6.orig/arch/tile/mm/highmem.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/tile/mm/highmem.c 2015-09-08 23:49:05.266182281 +0200
+diff -Nur linux-4.1.10.orig/arch/tile/mm/highmem.c linux-4.1.10/arch/tile/mm/highmem.c
+--- linux-4.1.10.orig/arch/tile/mm/highmem.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/tile/mm/highmem.c 2015-10-07 18:00:07.000000000 +0200
@@ -201,7 +201,7 @@
int idx, type;
pte_t *pte;
@@ -2827,9 +2849,9 @@ diff -Nur linux-4.1.6.orig/arch/tile/mm/highmem.c linux-4.1.6/arch/tile/mm/highm
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.6.orig/arch/um/kernel/trap.c linux-4.1.6/arch/um/kernel/trap.c
---- linux-4.1.6.orig/arch/um/kernel/trap.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/um/kernel/trap.c 2015-09-08 23:49:05.266182281 +0200
+diff -Nur linux-4.1.10.orig/arch/um/kernel/trap.c linux-4.1.10/arch/um/kernel/trap.c
+--- linux-4.1.10.orig/arch/um/kernel/trap.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/um/kernel/trap.c 2015-10-07 18:00:07.000000000 +0200
@@ -35,10 +35,10 @@
*code_out = SEGV_MAPERR;
@@ -2843,9 +2865,9 @@ diff -Nur linux-4.1.6.orig/arch/um/kernel/trap.c linux-4.1.6/arch/um/kernel/trap
goto out_nosemaphore;
if (is_user)
-diff -Nur linux-4.1.6.orig/arch/unicore32/mm/fault.c linux-4.1.6/arch/unicore32/mm/fault.c
---- linux-4.1.6.orig/arch/unicore32/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/unicore32/mm/fault.c 2015-09-08 23:49:05.266182281 +0200
+diff -Nur linux-4.1.10.orig/arch/unicore32/mm/fault.c linux-4.1.10/arch/unicore32/mm/fault.c
+--- linux-4.1.10.orig/arch/unicore32/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/unicore32/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -218,7 +218,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -2855,9 +2877,9 @@ diff -Nur linux-4.1.6.orig/arch/unicore32/mm/fault.c linux-4.1.6/arch/unicore32/
goto no_context;
if (user_mode(regs))
-diff -Nur linux-4.1.6.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.1.6/arch/x86/crypto/aesni-intel_glue.c
---- linux-4.1.6.orig/arch/x86/crypto/aesni-intel_glue.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/crypto/aesni-intel_glue.c 2015-09-08 23:49:05.270181837 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.1.10/arch/x86/crypto/aesni-intel_glue.c
+--- linux-4.1.10.orig/arch/x86/crypto/aesni-intel_glue.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/crypto/aesni-intel_glue.c 2015-10-07 18:00:07.000000000 +0200
@@ -382,14 +382,14 @@
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -2950,9 +2972,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.1.6/arch/x
return err;
}
-diff -Nur linux-4.1.6.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.1.6/arch/x86/crypto/cast5_avx_glue.c
---- linux-4.1.6.orig/arch/x86/crypto/cast5_avx_glue.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/crypto/cast5_avx_glue.c 2015-09-08 23:49:05.270181837 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.1.10/arch/x86/crypto/cast5_avx_glue.c
+--- linux-4.1.10.orig/arch/x86/crypto/cast5_avx_glue.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/crypto/cast5_avx_glue.c 2015-10-07 18:00:07.000000000 +0200
@@ -60,7 +60,7 @@
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
bool enc)
@@ -3032,9 +3054,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.1.6/arch/x86
if (walk.nbytes) {
ctr_crypt_final(desc, &walk);
err = blkcipher_walk_done(desc, &walk, 0);
-diff -Nur linux-4.1.6.orig/arch/x86/crypto/glue_helper.c linux-4.1.6/arch/x86/crypto/glue_helper.c
---- linux-4.1.6.orig/arch/x86/crypto/glue_helper.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/crypto/glue_helper.c 2015-09-08 23:49:05.270181837 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/crypto/glue_helper.c linux-4.1.10/arch/x86/crypto/glue_helper.c
+--- linux-4.1.10.orig/arch/x86/crypto/glue_helper.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/crypto/glue_helper.c 2015-10-07 18:00:07.000000000 +0200
@@ -39,7 +39,7 @@
void *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = 128 / 8;
@@ -3150,9 +3172,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/crypto/glue_helper.c linux-4.1.6/arch/x86/cr
return err;
}
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
-diff -Nur linux-4.1.6.orig/arch/x86/include/asm/preempt.h linux-4.1.6/arch/x86/include/asm/preempt.h
---- linux-4.1.6.orig/arch/x86/include/asm/preempt.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/include/asm/preempt.h 2015-09-08 23:49:05.270181837 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/include/asm/preempt.h linux-4.1.10/arch/x86/include/asm/preempt.h
+--- linux-4.1.10.orig/arch/x86/include/asm/preempt.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/include/asm/preempt.h 2015-10-07 18:00:07.000000000 +0200
@@ -82,17 +82,33 @@
* a decrement which hits zero means we have no preempt_count and should
* reschedule.
@@ -3188,9 +3210,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/include/asm/preempt.h linux-4.1.6/arch/x86/i
}
#ifdef CONFIG_PREEMPT
-diff -Nur linux-4.1.6.orig/arch/x86/include/asm/signal.h linux-4.1.6/arch/x86/include/asm/signal.h
---- linux-4.1.6.orig/arch/x86/include/asm/signal.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/include/asm/signal.h 2015-09-08 23:49:05.270181837 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/include/asm/signal.h linux-4.1.10/arch/x86/include/asm/signal.h
+--- linux-4.1.10.orig/arch/x86/include/asm/signal.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/include/asm/signal.h 2015-10-07 18:00:07.000000000 +0200
@@ -23,6 +23,19 @@
unsigned long sig[_NSIG_WORDS];
} sigset_t;
@@ -3211,9 +3233,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/include/asm/signal.h linux-4.1.6/arch/x86/in
#ifndef CONFIG_COMPAT
typedef sigset_t compat_sigset_t;
#endif
-diff -Nur linux-4.1.6.orig/arch/x86/include/asm/stackprotector.h linux-4.1.6/arch/x86/include/asm/stackprotector.h
---- linux-4.1.6.orig/arch/x86/include/asm/stackprotector.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/include/asm/stackprotector.h 2015-09-08 23:49:05.298178732 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/include/asm/stackprotector.h linux-4.1.10/arch/x86/include/asm/stackprotector.h
+--- linux-4.1.10.orig/arch/x86/include/asm/stackprotector.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/include/asm/stackprotector.h 2015-10-07 18:00:07.000000000 +0200
@@ -57,7 +57,7 @@
*/
static __always_inline void boot_init_stack_canary(void)
@@ -3240,9 +3262,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/include/asm/stackprotector.h linux-4.1.6/arc
tsc = __native_read_tsc();
canary += tsc + (tsc << 32UL);
-diff -Nur linux-4.1.6.orig/arch/x86/include/asm/thread_info.h linux-4.1.6/arch/x86/include/asm/thread_info.h
---- linux-4.1.6.orig/arch/x86/include/asm/thread_info.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/include/asm/thread_info.h 2015-09-08 23:49:05.514154783 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/include/asm/thread_info.h linux-4.1.10/arch/x86/include/asm/thread_info.h
+--- linux-4.1.10.orig/arch/x86/include/asm/thread_info.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/include/asm/thread_info.h 2015-10-07 18:00:07.000000000 +0200
@@ -55,6 +55,8 @@
__u32 status; /* thread synchronous flags */
__u32 cpu; /* current CPU */
@@ -3277,9 +3299,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/include/asm/thread_info.h linux-4.1.6/arch/x
#define STACK_WARN (THREAD_SIZE/8)
/*
-diff -Nur linux-4.1.6.orig/arch/x86/include/asm/uaccess_32.h linux-4.1.6/arch/x86/include/asm/uaccess_32.h
---- linux-4.1.6.orig/arch/x86/include/asm/uaccess_32.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/include/asm/uaccess_32.h 2015-09-08 23:49:05.514154783 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/include/asm/uaccess_32.h linux-4.1.10/arch/x86/include/asm/uaccess_32.h
+--- linux-4.1.10.orig/arch/x86/include/asm/uaccess_32.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/include/asm/uaccess_32.h 2015-10-07 18:00:07.000000000 +0200
@@ -70,7 +70,8 @@
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
@@ -3300,9 +3322,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/include/asm/uaccess_32.h linux-4.1.6/arch/x8
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
-diff -Nur linux-4.1.6.orig/arch/x86/include/asm/uaccess.h linux-4.1.6/arch/x86/include/asm/uaccess.h
---- linux-4.1.6.orig/arch/x86/include/asm/uaccess.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/include/asm/uaccess.h 2015-09-08 23:49:05.514154783 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/include/asm/uaccess.h linux-4.1.10/arch/x86/include/asm/uaccess.h
+--- linux-4.1.10.orig/arch/x86/include/asm/uaccess.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/include/asm/uaccess.h 2015-10-07 18:00:07.000000000 +0200
@@ -74,7 +74,8 @@
* @addr: User space pointer to start of block to check
* @size: Size of block to check
@@ -3353,9 +3375,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/include/asm/uaccess.h linux-4.1.6/arch/x86/i
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
-diff -Nur linux-4.1.6.orig/arch/x86/include/asm/uv/uv_bau.h linux-4.1.6/arch/x86/include/asm/uv/uv_bau.h
---- linux-4.1.6.orig/arch/x86/include/asm/uv/uv_bau.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/include/asm/uv/uv_bau.h 2015-09-08 23:49:05.514154783 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/include/asm/uv/uv_bau.h linux-4.1.10/arch/x86/include/asm/uv/uv_bau.h
+--- linux-4.1.10.orig/arch/x86/include/asm/uv/uv_bau.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/include/asm/uv/uv_bau.h 2015-10-07 18:00:07.000000000 +0200
@@ -615,9 +615,9 @@
cycles_t send_message;
cycles_t period_end;
@@ -3389,9 +3411,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/include/asm/uv/uv_bau.h linux-4.1.6/arch/x86
return 1;
}
-diff -Nur linux-4.1.6.orig/arch/x86/include/asm/uv/uv_hub.h linux-4.1.6/arch/x86/include/asm/uv/uv_hub.h
---- linux-4.1.6.orig/arch/x86/include/asm/uv/uv_hub.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/include/asm/uv/uv_hub.h 2015-09-08 23:49:05.518154339 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/include/asm/uv/uv_hub.h linux-4.1.10/arch/x86/include/asm/uv/uv_hub.h
+--- linux-4.1.10.orig/arch/x86/include/asm/uv/uv_hub.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/include/asm/uv/uv_hub.h 2015-10-07 18:00:07.000000000 +0200
@@ -492,7 +492,7 @@
unsigned short nr_online_cpus;
unsigned short pnode;
@@ -3401,9 +3423,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/include/asm/uv/uv_hub.h linux-4.1.6/arch/x86
unsigned long nmi_count; /* obsolete, see uv_hub_nmi */
};
extern struct uv_blade_info *uv_blade_info;
-diff -Nur linux-4.1.6.orig/arch/x86/Kconfig linux-4.1.6/arch/x86/Kconfig
---- linux-4.1.6.orig/arch/x86/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/Kconfig 2015-09-08 23:49:05.270181837 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/Kconfig linux-4.1.10/arch/x86/Kconfig
+--- linux-4.1.10.orig/arch/x86/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/Kconfig 2015-10-07 18:00:07.000000000 +0200
@@ -22,6 +22,7 @@
### Arch settings
config X86
@@ -3434,9 +3456,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/Kconfig linux-4.1.6/arch/x86/Kconfig
---help---
Enable maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
-diff -Nur linux-4.1.6.orig/arch/x86/kernel/apic/io_apic.c linux-4.1.6/arch/x86/kernel/apic/io_apic.c
---- linux-4.1.6.orig/arch/x86/kernel/apic/io_apic.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kernel/apic/io_apic.c 2015-09-08 23:49:05.518154339 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kernel/apic/io_apic.c linux-4.1.10/arch/x86/kernel/apic/io_apic.c
+--- linux-4.1.10.orig/arch/x86/kernel/apic/io_apic.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kernel/apic/io_apic.c 2015-10-07 18:00:07.000000000 +0200
@@ -1891,7 +1891,8 @@
static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
{
@@ -3447,9 +3469,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kernel/apic/io_apic.c linux-4.1.6/arch/x86/k
mask_ioapic(cfg);
return true;
}
-diff -Nur linux-4.1.6.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-4.1.6/arch/x86/kernel/apic/x2apic_uv_x.c
---- linux-4.1.6.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kernel/apic/x2apic_uv_x.c 2015-09-08 23:49:05.518154339 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-4.1.10/arch/x86/kernel/apic/x2apic_uv_x.c
+--- linux-4.1.10.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kernel/apic/x2apic_uv_x.c 2015-10-07 18:00:07.000000000 +0200
@@ -949,7 +949,7 @@
uv_blade_info[blade].pnode = pnode;
uv_blade_info[blade].nr_possible_cpus = 0;
@@ -3459,9 +3481,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-4.1.6/arch/x
min_pnode = min(pnode, min_pnode);
max_pnode = max(pnode, max_pnode);
blade++;
-diff -Nur linux-4.1.6.orig/arch/x86/kernel/asm-offsets.c linux-4.1.6/arch/x86/kernel/asm-offsets.c
---- linux-4.1.6.orig/arch/x86/kernel/asm-offsets.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kernel/asm-offsets.c 2015-09-08 23:49:05.522153895 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kernel/asm-offsets.c linux-4.1.10/arch/x86/kernel/asm-offsets.c
+--- linux-4.1.10.orig/arch/x86/kernel/asm-offsets.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kernel/asm-offsets.c 2015-10-07 18:00:07.000000000 +0200
@@ -32,6 +32,7 @@
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_status, thread_info, status);
@@ -3476,9 +3498,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kernel/asm-offsets.c linux-4.1.6/arch/x86/ke
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
}
-diff -Nur linux-4.1.6.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.6/arch/x86/kernel/cpu/mcheck/mce.c
---- linux-4.1.6.orig/arch/x86/kernel/cpu/mcheck/mce.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kernel/cpu/mcheck/mce.c 2015-09-08 23:49:05.522153895 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.10/arch/x86/kernel/cpu/mcheck/mce.c
+--- linux-4.1.10.orig/arch/x86/kernel/cpu/mcheck/mce.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kernel/cpu/mcheck/mce.c 2015-10-07 18:00:07.000000000 +0200
@@ -41,6 +41,8 @@
#include <linux/debugfs.h>
#include <linux/irq_work.h>
@@ -3720,9 +3742,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.1.6/arch/x86
if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
err = -ENOMEM;
goto err_out;
-diff -Nur linux-4.1.6.orig/arch/x86/kernel/dumpstack_32.c linux-4.1.6/arch/x86/kernel/dumpstack_32.c
---- linux-4.1.6.orig/arch/x86/kernel/dumpstack_32.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kernel/dumpstack_32.c 2015-09-08 23:49:05.526153452 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kernel/dumpstack_32.c linux-4.1.10/arch/x86/kernel/dumpstack_32.c
+--- linux-4.1.10.orig/arch/x86/kernel/dumpstack_32.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kernel/dumpstack_32.c 2015-10-07 18:00:07.000000000 +0200
@@ -42,7 +42,7 @@
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
@@ -3741,9 +3763,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kernel/dumpstack_32.c linux-4.1.6/arch/x86/k
}
EXPORT_SYMBOL(dump_trace);
-diff -Nur linux-4.1.6.orig/arch/x86/kernel/dumpstack_64.c linux-4.1.6/arch/x86/kernel/dumpstack_64.c
---- linux-4.1.6.orig/arch/x86/kernel/dumpstack_64.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kernel/dumpstack_64.c 2015-09-08 23:49:05.526153452 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kernel/dumpstack_64.c linux-4.1.10/arch/x86/kernel/dumpstack_64.c
+--- linux-4.1.10.orig/arch/x86/kernel/dumpstack_64.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kernel/dumpstack_64.c 2015-10-07 18:00:07.000000000 +0200
@@ -152,7 +152,7 @@
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data)
@@ -3780,9 +3802,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kernel/dumpstack_64.c linux-4.1.6/arch/x86/k
pr_cont("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
-diff -Nur linux-4.1.6.orig/arch/x86/kernel/entry_32.S linux-4.1.6/arch/x86/kernel/entry_32.S
---- linux-4.1.6.orig/arch/x86/kernel/entry_32.S 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kernel/entry_32.S 2015-09-08 23:49:05.526153452 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kernel/entry_32.S linux-4.1.10/arch/x86/kernel/entry_32.S
+--- linux-4.1.10.orig/arch/x86/kernel/entry_32.S 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kernel/entry_32.S 2015-10-07 18:00:07.000000000 +0200
@@ -359,8 +359,24 @@
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
@@ -3826,9 +3848,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kernel/entry_32.S linux-4.1.6/arch/x86/kerne
jnz work_resched
work_notifysig: # deal with pending signals and
-diff -Nur linux-4.1.6.orig/arch/x86/kernel/entry_64.S linux-4.1.6/arch/x86/kernel/entry_64.S
---- linux-4.1.6.orig/arch/x86/kernel/entry_64.S 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kernel/entry_64.S 2015-09-08 23:49:05.530153010 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kernel/entry_64.S linux-4.1.10/arch/x86/kernel/entry_64.S
+--- linux-4.1.10.orig/arch/x86/kernel/entry_64.S 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kernel/entry_64.S 2015-10-07 18:00:07.000000000 +0200
@@ -370,8 +370,8 @@
/* First do a reschedule test. */
/* edx: work, edi: workmask */
@@ -3891,9 +3913,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kernel/entry_64.S linux-4.1.6/arch/x86/kerne
#ifdef CONFIG_XEN
idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-diff -Nur linux-4.1.6.orig/arch/x86/kernel/irq_32.c linux-4.1.6/arch/x86/kernel/irq_32.c
---- linux-4.1.6.orig/arch/x86/kernel/irq_32.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kernel/irq_32.c 2015-09-08 23:49:05.530153010 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kernel/irq_32.c linux-4.1.10/arch/x86/kernel/irq_32.c
+--- linux-4.1.10.orig/arch/x86/kernel/irq_32.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kernel/irq_32.c 2015-10-07 18:00:07.000000000 +0200
@@ -135,6 +135,7 @@
cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
}
@@ -3910,9 +3932,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kernel/irq_32.c linux-4.1.6/arch/x86/kernel/
bool handle_irq(unsigned irq, struct pt_regs *regs)
{
-diff -Nur linux-4.1.6.orig/arch/x86/kernel/process_32.c linux-4.1.6/arch/x86/kernel/process_32.c
---- linux-4.1.6.orig/arch/x86/kernel/process_32.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kernel/process_32.c 2015-09-08 23:49:05.530153010 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kernel/process_32.c linux-4.1.10/arch/x86/kernel/process_32.c
+--- linux-4.1.10.orig/arch/x86/kernel/process_32.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kernel/process_32.c 2015-10-07 18:00:07.000000000 +0200
@@ -35,6 +35,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
@@ -3966,10 +3988,10 @@ diff -Nur linux-4.1.6.orig/arch/x86/kernel/process_32.c linux-4.1.6/arch/x86/ker
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
-diff -Nur linux-4.1.6.orig/arch/x86/kernel/signal.c linux-4.1.6/arch/x86/kernel/signal.c
---- linux-4.1.6.orig/arch/x86/kernel/signal.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kernel/signal.c 2015-09-08 23:49:05.534152567 +0200
-@@ -727,6 +727,14 @@
+diff -Nur linux-4.1.10.orig/arch/x86/kernel/signal.c linux-4.1.10/arch/x86/kernel/signal.c
+--- linux-4.1.10.orig/arch/x86/kernel/signal.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kernel/signal.c 2015-10-07 18:00:07.000000000 +0200
+@@ -723,6 +723,14 @@
{
user_exit();
@@ -3984,9 +4006,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kernel/signal.c linux-4.1.6/arch/x86/kernel/
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
-diff -Nur linux-4.1.6.orig/arch/x86/kernel/traps.c linux-4.1.6/arch/x86/kernel/traps.c
---- linux-4.1.6.orig/arch/x86/kernel/traps.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kernel/traps.c 2015-09-08 23:49:05.534152567 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kernel/traps.c linux-4.1.10/arch/x86/kernel/traps.c
+--- linux-4.1.10.orig/arch/x86/kernel/traps.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kernel/traps.c 2015-10-07 18:00:07.000000000 +0200
@@ -88,9 +88,21 @@
local_irq_enable();
}
@@ -4061,9 +4083,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kernel/traps.c linux-4.1.6/arch/x86/kernel/t
debug_stack_usage_dec();
exit:
-diff -Nur linux-4.1.6.orig/arch/x86/kvm/lapic.c linux-4.1.6/arch/x86/kvm/lapic.c
---- linux-4.1.6.orig/arch/x86/kvm/lapic.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kvm/lapic.c 2015-09-08 23:49:05.534152567 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kvm/lapic.c linux-4.1.10/arch/x86/kvm/lapic.c
+--- linux-4.1.10.orig/arch/x86/kvm/lapic.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kvm/lapic.c 2015-10-07 18:00:07.000000000 +0200
@@ -1104,7 +1104,7 @@
static void apic_timer_expired(struct kvm_lapic *apic)
{
@@ -4164,9 +4186,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kvm/lapic.c linux-4.1.6/arch/x86/kvm/lapic.c
}
/*
-diff -Nur linux-4.1.6.orig/arch/x86/kvm/x86.c linux-4.1.6/arch/x86/kvm/x86.c
---- linux-4.1.6.orig/arch/x86/kvm/x86.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/kvm/x86.c 2015-09-08 23:49:05.538152123 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/kvm/x86.c linux-4.1.10/arch/x86/kvm/x86.c
+--- linux-4.1.10.orig/arch/x86/kvm/x86.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/kvm/x86.c 2015-10-07 18:00:07.000000000 +0200
@@ -5813,6 +5813,13 @@
goto out;
}
@@ -4181,9 +4203,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/kvm/x86.c linux-4.1.6/arch/x86/kvm/x86.c
r = kvm_mmu_module_init();
if (r)
goto out_free_percpu;
-diff -Nur linux-4.1.6.orig/arch/x86/lib/usercopy_32.c linux-4.1.6/arch/x86/lib/usercopy_32.c
---- linux-4.1.6.orig/arch/x86/lib/usercopy_32.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/lib/usercopy_32.c 2015-09-08 23:49:05.754128173 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/lib/usercopy_32.c linux-4.1.10/arch/x86/lib/usercopy_32.c
+--- linux-4.1.10.orig/arch/x86/lib/usercopy_32.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/lib/usercopy_32.c 2015-10-07 18:00:07.000000000 +0200
@@ -647,7 +647,8 @@
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
@@ -4204,9 +4226,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/lib/usercopy_32.c linux-4.1.6/arch/x86/lib/u
*
* Copy data from user space to kernel space.
*
-diff -Nur linux-4.1.6.orig/arch/x86/mm/fault.c linux-4.1.6/arch/x86/mm/fault.c
---- linux-4.1.6.orig/arch/x86/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/mm/fault.c 2015-09-08 23:49:05.754128173 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/mm/fault.c linux-4.1.10/arch/x86/mm/fault.c
+--- linux-4.1.10.orig/arch/x86/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -13,6 +13,7 @@
#include <linux/hugetlb.h> /* hstate_index_to_shift */
#include <linux/prefetch.h> /* prefetchw */
@@ -4227,9 +4249,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/mm/fault.c linux-4.1.6/arch/x86/mm/fault.c
bad_area_nosemaphore(regs, error_code, address);
return;
}
-diff -Nur linux-4.1.6.orig/arch/x86/mm/highmem_32.c linux-4.1.6/arch/x86/mm/highmem_32.c
---- linux-4.1.6.orig/arch/x86/mm/highmem_32.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/mm/highmem_32.c 2015-09-08 23:49:05.754128173 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/mm/highmem_32.c linux-4.1.10/arch/x86/mm/highmem_32.c
+--- linux-4.1.10.orig/arch/x86/mm/highmem_32.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/mm/highmem_32.c 2015-10-07 18:00:07.000000000 +0200
@@ -32,10 +32,11 @@
*/
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
@@ -4273,9 +4295,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/mm/highmem_32.c linux-4.1.6/arch/x86/mm/high
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.6.orig/arch/x86/mm/iomap_32.c linux-4.1.6/arch/x86/mm/iomap_32.c
---- linux-4.1.6.orig/arch/x86/mm/iomap_32.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/mm/iomap_32.c 2015-09-08 23:49:05.754128173 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/mm/iomap_32.c linux-4.1.10/arch/x86/mm/iomap_32.c
+--- linux-4.1.10.orig/arch/x86/mm/iomap_32.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/mm/iomap_32.c 2015-10-07 18:00:07.000000000 +0200
@@ -56,15 +56,22 @@
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
@@ -4315,9 +4337,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/mm/iomap_32.c linux-4.1.6/arch/x86/mm/iomap_
+ preempt_enable();
}
EXPORT_SYMBOL_GPL(iounmap_atomic);
-diff -Nur linux-4.1.6.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.6/arch/x86/platform/uv/tlb_uv.c
---- linux-4.1.6.orig/arch/x86/platform/uv/tlb_uv.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/platform/uv/tlb_uv.c 2015-09-08 23:49:05.758127730 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.10/arch/x86/platform/uv/tlb_uv.c
+--- linux-4.1.10.orig/arch/x86/platform/uv/tlb_uv.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/platform/uv/tlb_uv.c 2015-10-07 18:00:07.000000000 +0200
@@ -714,9 +714,9 @@
quiesce_local_uvhub(hmaster);
@@ -4404,9 +4426,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/platform/uv/tlb_uv.c linux-4.1.6/arch/x86/pl
}
}
-diff -Nur linux-4.1.6.orig/arch/x86/platform/uv/uv_time.c linux-4.1.6/arch/x86/platform/uv/uv_time.c
---- linux-4.1.6.orig/arch/x86/platform/uv/uv_time.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/x86/platform/uv/uv_time.c 2015-09-08 23:49:05.758127730 +0200
+diff -Nur linux-4.1.10.orig/arch/x86/platform/uv/uv_time.c linux-4.1.10/arch/x86/platform/uv/uv_time.c
+--- linux-4.1.10.orig/arch/x86/platform/uv/uv_time.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/x86/platform/uv/uv_time.c 2015-10-07 18:00:07.000000000 +0200
@@ -58,7 +58,7 @@
/* There is one of these allocated per node */
@@ -4487,9 +4509,9 @@ diff -Nur linux-4.1.6.orig/arch/x86/platform/uv/uv_time.c linux-4.1.6/arch/x86/p
}
/*
-diff -Nur linux-4.1.6.orig/arch/xtensa/mm/fault.c linux-4.1.6/arch/xtensa/mm/fault.c
---- linux-4.1.6.orig/arch/xtensa/mm/fault.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/xtensa/mm/fault.c 2015-09-08 23:49:05.758127730 +0200
+diff -Nur linux-4.1.10.orig/arch/xtensa/mm/fault.c linux-4.1.10/arch/xtensa/mm/fault.c
+--- linux-4.1.10.orig/arch/xtensa/mm/fault.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/xtensa/mm/fault.c 2015-10-07 18:00:07.000000000 +0200
@@ -15,10 +15,10 @@
#include <linux/mm.h>
#include <linux/module.h>
@@ -4511,9 +4533,9 @@ diff -Nur linux-4.1.6.orig/arch/xtensa/mm/fault.c linux-4.1.6/arch/xtensa/mm/fau
bad_page_fault(regs, address, SIGSEGV);
return;
}
-diff -Nur linux-4.1.6.orig/arch/xtensa/mm/highmem.c linux-4.1.6/arch/xtensa/mm/highmem.c
---- linux-4.1.6.orig/arch/xtensa/mm/highmem.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/arch/xtensa/mm/highmem.c 2015-09-08 23:49:05.758127730 +0200
+diff -Nur linux-4.1.10.orig/arch/xtensa/mm/highmem.c linux-4.1.10/arch/xtensa/mm/highmem.c
+--- linux-4.1.10.orig/arch/xtensa/mm/highmem.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/arch/xtensa/mm/highmem.c 2015-10-07 18:00:07.000000000 +0200
@@ -42,6 +42,7 @@
enum fixed_addresses idx;
unsigned long vaddr;
@@ -4530,9 +4552,9 @@ diff -Nur linux-4.1.6.orig/arch/xtensa/mm/highmem.c linux-4.1.6/arch/xtensa/mm/h
}
EXPORT_SYMBOL(__kunmap_atomic);
-diff -Nur linux-4.1.6.orig/block/blk-core.c linux-4.1.6/block/blk-core.c
---- linux-4.1.6.orig/block/blk-core.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/block/blk-core.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/block/blk-core.c linux-4.1.10/block/blk-core.c
+--- linux-4.1.10.orig/block/blk-core.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/block/blk-core.c 2015-10-07 18:00:07.000000000 +0200
@@ -100,6 +100,9 @@
INIT_LIST_HEAD(&rq->queuelist);
@@ -4608,9 +4630,9 @@ diff -Nur linux-4.1.6.orig/block/blk-core.c linux-4.1.6/block/blk-core.c
}
void blk_finish_plug(struct blk_plug *plug)
-diff -Nur linux-4.1.6.orig/block/blk-ioc.c linux-4.1.6/block/blk-ioc.c
---- linux-4.1.6.orig/block/blk-ioc.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/block/blk-ioc.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/block/blk-ioc.c linux-4.1.10/block/blk-ioc.c
+--- linux-4.1.10.orig/block/blk-ioc.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/block/blk-ioc.c 2015-10-07 18:00:07.000000000 +0200
@@ -7,6 +7,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -4637,9 +4659,9 @@ diff -Nur linux-4.1.6.orig/block/blk-ioc.c linux-4.1.6/block/blk-ioc.c
goto retry;
}
}
-diff -Nur linux-4.1.6.orig/block/blk-iopoll.c linux-4.1.6/block/blk-iopoll.c
---- linux-4.1.6.orig/block/blk-iopoll.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/block/blk-iopoll.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/block/blk-iopoll.c linux-4.1.10/block/blk-iopoll.c
+--- linux-4.1.10.orig/block/blk-iopoll.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/block/blk-iopoll.c 2015-10-07 18:00:07.000000000 +0200
@@ -35,6 +35,7 @@
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
@@ -4664,9 +4686,9 @@ diff -Nur linux-4.1.6.orig/block/blk-iopoll.c linux-4.1.6/block/blk-iopoll.c
}
return NOTIFY_OK;
-diff -Nur linux-4.1.6.orig/block/blk-mq.c linux-4.1.6/block/blk-mq.c
---- linux-4.1.6.orig/block/blk-mq.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/block/blk-mq.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/block/blk-mq.c linux-4.1.10/block/blk-mq.c
+--- linux-4.1.10.orig/block/blk-mq.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/block/blk-mq.c 2015-10-07 18:00:07.000000000 +0200
@@ -88,7 +88,7 @@
if (!(gfp & __GFP_WAIT))
return -EBUSY;
@@ -4802,9 +4824,9 @@ diff -Nur linux-4.1.6.orig/block/blk-mq.c linux-4.1.6/block/blk-mq.c
return blk_mq_hctx_cpu_offline(hctx, cpu);
/*
-diff -Nur linux-4.1.6.orig/block/blk-mq-cpu.c linux-4.1.6/block/blk-mq-cpu.c
---- linux-4.1.6.orig/block/blk-mq-cpu.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/block/blk-mq-cpu.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/block/blk-mq-cpu.c linux-4.1.10/block/blk-mq-cpu.c
+--- linux-4.1.10.orig/block/blk-mq-cpu.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/block/blk-mq-cpu.c 2015-10-07 18:00:07.000000000 +0200
@@ -16,7 +16,7 @@
#include "blk-mq.h"
@@ -4856,9 +4878,9 @@ diff -Nur linux-4.1.6.orig/block/blk-mq-cpu.c linux-4.1.6/block/blk-mq-cpu.c
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
-diff -Nur linux-4.1.6.orig/block/blk-mq.h linux-4.1.6/block/blk-mq.h
---- linux-4.1.6.orig/block/blk-mq.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/block/blk-mq.h 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/block/blk-mq.h linux-4.1.10/block/blk-mq.h
+--- linux-4.1.10.orig/block/blk-mq.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/block/blk-mq.h 2015-10-07 18:00:07.000000000 +0200
@@ -76,7 +76,10 @@
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
@@ -4886,9 +4908,9 @@ diff -Nur linux-4.1.6.orig/block/blk-mq.h linux-4.1.6/block/blk-mq.h
}
struct blk_mq_alloc_data {
-diff -Nur linux-4.1.6.orig/block/blk-softirq.c linux-4.1.6/block/blk-softirq.c
---- linux-4.1.6.orig/block/blk-softirq.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/block/blk-softirq.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/block/blk-softirq.c linux-4.1.10/block/blk-softirq.c
+--- linux-4.1.10.orig/block/blk-softirq.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/block/blk-softirq.c 2015-10-07 18:00:07.000000000 +0200
@@ -51,6 +51,7 @@
raise_softirq_irqoff(BLOCK_SOFTIRQ);
@@ -4913,9 +4935,9 @@ diff -Nur linux-4.1.6.orig/block/blk-softirq.c linux-4.1.6/block/blk-softirq.c
}
/**
-diff -Nur linux-4.1.6.orig/block/bounce.c linux-4.1.6/block/bounce.c
---- linux-4.1.6.orig/block/bounce.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/block/bounce.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/block/bounce.c linux-4.1.10/block/bounce.c
+--- linux-4.1.10.orig/block/bounce.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/block/bounce.c 2015-10-07 18:00:07.000000000 +0200
@@ -54,11 +54,11 @@
unsigned long flags;
unsigned char *vto;
@@ -4930,9 +4952,9 @@ diff -Nur linux-4.1.6.orig/block/bounce.c linux-4.1.6/block/bounce.c
}
#else /* CONFIG_HIGHMEM */
-diff -Nur linux-4.1.6.orig/crypto/algapi.c linux-4.1.6/crypto/algapi.c
---- linux-4.1.6.orig/crypto/algapi.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/crypto/algapi.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/crypto/algapi.c linux-4.1.10/crypto/algapi.c
+--- linux-4.1.10.orig/crypto/algapi.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/crypto/algapi.c 2015-10-07 18:00:07.000000000 +0200
@@ -695,13 +695,13 @@
int crypto_register_notifier(struct notifier_block *nb)
@@ -4949,9 +4971,9 @@ diff -Nur linux-4.1.6.orig/crypto/algapi.c linux-4.1.6/crypto/algapi.c
}
EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
-diff -Nur linux-4.1.6.orig/crypto/api.c linux-4.1.6/crypto/api.c
---- linux-4.1.6.orig/crypto/api.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/crypto/api.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/crypto/api.c linux-4.1.10/crypto/api.c
+--- linux-4.1.10.orig/crypto/api.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/crypto/api.c 2015-10-07 18:00:07.000000000 +0200
@@ -31,7 +31,7 @@
DECLARE_RWSEM(crypto_alg_sem);
EXPORT_SYMBOL_GPL(crypto_alg_sem);
@@ -4974,9 +4996,9 @@ diff -Nur linux-4.1.6.orig/crypto/api.c linux-4.1.6/crypto/api.c
}
return ok;
-diff -Nur linux-4.1.6.orig/crypto/internal.h linux-4.1.6/crypto/internal.h
---- linux-4.1.6.orig/crypto/internal.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/crypto/internal.h 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/crypto/internal.h linux-4.1.10/crypto/internal.h
+--- linux-4.1.10.orig/crypto/internal.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/crypto/internal.h 2015-10-07 18:00:07.000000000 +0200
@@ -48,7 +48,7 @@
extern struct list_head crypto_alg_list;
@@ -4995,9 +5017,9 @@ diff -Nur linux-4.1.6.orig/crypto/internal.h linux-4.1.6/crypto/internal.h
}
#endif /* _CRYPTO_INTERNAL_H */
-diff -Nur linux-4.1.6.orig/Documentation/hwlat_detector.txt linux-4.1.6/Documentation/hwlat_detector.txt
---- linux-4.1.6.orig/Documentation/hwlat_detector.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/Documentation/hwlat_detector.txt 2015-09-08 23:49:03.494378758 +0200
+diff -Nur linux-4.1.10.orig/Documentation/hwlat_detector.txt linux-4.1.10/Documentation/hwlat_detector.txt
+--- linux-4.1.10.orig/Documentation/hwlat_detector.txt 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/Documentation/hwlat_detector.txt 2015-10-07 18:00:07.000000000 +0200
@@ -0,0 +1,64 @@
+Introduction:
+-------------
@@ -5063,9 +5085,9 @@ diff -Nur linux-4.1.6.orig/Documentation/hwlat_detector.txt linux-4.1.6/Document
+observe any latencies that exceed the threshold (initially 100 usecs),
+then we write to a global sample ring buffer of 8K samples, which is
+consumed by reading from the "sample" (pipe) debugfs file interface.
-diff -Nur linux-4.1.6.orig/Documentation/sysrq.txt linux-4.1.6/Documentation/sysrq.txt
---- linux-4.1.6.orig/Documentation/sysrq.txt 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/Documentation/sysrq.txt 2015-09-08 23:49:03.494378758 +0200
+diff -Nur linux-4.1.10.orig/Documentation/sysrq.txt linux-4.1.10/Documentation/sysrq.txt
+--- linux-4.1.10.orig/Documentation/sysrq.txt 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/Documentation/sysrq.txt 2015-10-07 18:00:07.000000000 +0200
@@ -59,10 +59,17 @@
On other - If you know of the key combos for other architectures, please
let me know so I can add them to this section.
@@ -5086,9 +5108,9 @@ diff -Nur linux-4.1.6.orig/Documentation/sysrq.txt linux-4.1.6/Documentation/sys
* What are the 'command' keys?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'b' - Will immediately reboot the system without syncing or unmounting
-diff -Nur linux-4.1.6.orig/Documentation/trace/histograms.txt linux-4.1.6/Documentation/trace/histograms.txt
---- linux-4.1.6.orig/Documentation/trace/histograms.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/Documentation/trace/histograms.txt 2015-09-08 23:49:03.498378314 +0200
+diff -Nur linux-4.1.10.orig/Documentation/trace/histograms.txt linux-4.1.10/Documentation/trace/histograms.txt
+--- linux-4.1.10.orig/Documentation/trace/histograms.txt 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/Documentation/trace/histograms.txt 2015-10-07 18:00:07.000000000 +0200
@@ -0,0 +1,186 @@
+ Using the Linux Kernel Latency Histograms
+
@@ -5276,9 +5298,9 @@ diff -Nur linux-4.1.6.orig/Documentation/trace/histograms.txt linux-4.1.6/Docume
+is provided.
+
+These data are also reset when the wakeup histogram is reset.
-diff -Nur linux-4.1.6.orig/drivers/acpi/acpica/acglobal.h linux-4.1.6/drivers/acpi/acpica/acglobal.h
---- linux-4.1.6.orig/drivers/acpi/acpica/acglobal.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/acpi/acpica/acglobal.h 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/drivers/acpi/acpica/acglobal.h linux-4.1.10/drivers/acpi/acpica/acglobal.h
+--- linux-4.1.10.orig/drivers/acpi/acpica/acglobal.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/acpi/acpica/acglobal.h 2015-10-07 18:00:07.000000000 +0200
@@ -112,7 +112,7 @@
* interrupt level
*/
@@ -5288,9 +5310,9 @@ diff -Nur linux-4.1.6.orig/drivers/acpi/acpica/acglobal.h linux-4.1.6/drivers/ac
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
/* Mutex for _OSI support */
-diff -Nur linux-4.1.6.orig/drivers/acpi/acpica/hwregs.c linux-4.1.6/drivers/acpi/acpica/hwregs.c
---- linux-4.1.6.orig/drivers/acpi/acpica/hwregs.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/acpi/acpica/hwregs.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/drivers/acpi/acpica/hwregs.c linux-4.1.10/drivers/acpi/acpica/hwregs.c
+--- linux-4.1.10.orig/drivers/acpi/acpica/hwregs.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/acpi/acpica/hwregs.c 2015-10-07 18:00:07.000000000 +0200
@@ -269,14 +269,14 @@
ACPI_BITMASK_ALL_FIXED_STATUS,
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
@@ -5308,9 +5330,9 @@ diff -Nur linux-4.1.6.orig/drivers/acpi/acpica/hwregs.c linux-4.1.6/drivers/acpi
if (ACPI_FAILURE(status)) {
goto exit;
-diff -Nur linux-4.1.6.orig/drivers/acpi/acpica/hwxface.c linux-4.1.6/drivers/acpi/acpica/hwxface.c
---- linux-4.1.6.orig/drivers/acpi/acpica/hwxface.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/acpi/acpica/hwxface.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/drivers/acpi/acpica/hwxface.c linux-4.1.10/drivers/acpi/acpica/hwxface.c
+--- linux-4.1.10.orig/drivers/acpi/acpica/hwxface.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/acpi/acpica/hwxface.c 2015-10-07 18:00:07.000000000 +0200
@@ -374,7 +374,7 @@
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -5329,9 +5351,9 @@ diff -Nur linux-4.1.6.orig/drivers/acpi/acpica/hwxface.c linux-4.1.6/drivers/acp
return_ACPI_STATUS(status);
}
-diff -Nur linux-4.1.6.orig/drivers/acpi/acpica/utmutex.c linux-4.1.6/drivers/acpi/acpica/utmutex.c
---- linux-4.1.6.orig/drivers/acpi/acpica/utmutex.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/acpi/acpica/utmutex.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/drivers/acpi/acpica/utmutex.c linux-4.1.10/drivers/acpi/acpica/utmutex.c
+--- linux-4.1.10.orig/drivers/acpi/acpica/utmutex.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/acpi/acpica/utmutex.c 2015-10-07 18:00:07.000000000 +0200
@@ -88,7 +88,7 @@
return_ACPI_STATUS (status);
}
@@ -5350,9 +5372,9 @@ diff -Nur linux-4.1.6.orig/drivers/acpi/acpica/utmutex.c linux-4.1.6/drivers/acp
acpi_os_delete_lock(acpi_gbl_reference_count_lock);
/* Delete the reader/writer lock */
-diff -Nur linux-4.1.6.orig/drivers/ata/libata-sff.c linux-4.1.6/drivers/ata/libata-sff.c
---- linux-4.1.6.orig/drivers/ata/libata-sff.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/ata/libata-sff.c 2015-09-08 23:49:05.762127285 +0200
+diff -Nur linux-4.1.10.orig/drivers/ata/libata-sff.c linux-4.1.10/drivers/ata/libata-sff.c
+--- linux-4.1.10.orig/drivers/ata/libata-sff.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/ata/libata-sff.c 2015-10-07 18:00:07.000000000 +0200
@@ -678,9 +678,9 @@
unsigned long flags;
unsigned int consumed;
@@ -5401,9 +5423,9 @@ diff -Nur linux-4.1.6.orig/drivers/ata/libata-sff.c linux-4.1.6/drivers/ata/liba
} else {
buf = page_address(page);
consumed = ap->ops->sff_data_xfer(dev, buf + offset,
-diff -Nur linux-4.1.6.orig/drivers/char/random.c linux-4.1.6/drivers/char/random.c
---- linux-4.1.6.orig/drivers/char/random.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/char/random.c 2015-09-08 23:49:05.766126841 +0200
+diff -Nur linux-4.1.10.orig/drivers/char/random.c linux-4.1.10/drivers/char/random.c
+--- linux-4.1.10.orig/drivers/char/random.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/char/random.c 2015-10-07 18:00:07.000000000 +0200
@@ -776,8 +776,6 @@
} sample;
long delta, delta2, delta3;
@@ -5455,9 +5477,9 @@ diff -Nur linux-4.1.6.orig/drivers/char/random.c linux-4.1.6/drivers/char/random
fast_mix(fast_pool);
add_interrupt_bench(cycles);
-diff -Nur linux-4.1.6.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.6/drivers/clocksource/tcb_clksrc.c
---- linux-4.1.6.orig/drivers/clocksource/tcb_clksrc.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/clocksource/tcb_clksrc.c 2015-09-08 23:49:05.766126841 +0200
+diff -Nur linux-4.1.10.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.10/drivers/clocksource/tcb_clksrc.c
+--- linux-4.1.10.orig/drivers/clocksource/tcb_clksrc.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/clocksource/tcb_clksrc.c 2015-10-07 18:00:08.000000000 +0200
@@ -23,8 +23,7 @@
* this 32 bit free-running counter. the second channel is not used.
*
@@ -5572,9 +5594,9 @@ diff -Nur linux-4.1.6.orig/drivers/clocksource/tcb_clksrc.c linux-4.1.6/drivers/
if (ret)
goto err_unregister_clksrc;
-diff -Nur linux-4.1.6.orig/drivers/clocksource/timer-atmel-pit.c linux-4.1.6/drivers/clocksource/timer-atmel-pit.c
---- linux-4.1.6.orig/drivers/clocksource/timer-atmel-pit.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/clocksource/timer-atmel-pit.c 2015-09-08 23:49:05.766126841 +0200
+diff -Nur linux-4.1.10.orig/drivers/clocksource/timer-atmel-pit.c linux-4.1.10/drivers/clocksource/timer-atmel-pit.c
+--- linux-4.1.10.orig/drivers/clocksource/timer-atmel-pit.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/clocksource/timer-atmel-pit.c 2015-10-07 18:00:08.000000000 +0200
@@ -90,6 +90,7 @@
return elapsed;
}
@@ -5600,9 +5622,9 @@ diff -Nur linux-4.1.6.orig/drivers/clocksource/timer-atmel-pit.c linux-4.1.6/dri
break;
case CLOCK_EVT_MODE_RESUME:
break;
-diff -Nur linux-4.1.6.orig/drivers/clocksource/timer-atmel-st.c linux-4.1.6/drivers/clocksource/timer-atmel-st.c
---- linux-4.1.6.orig/drivers/clocksource/timer-atmel-st.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/clocksource/timer-atmel-st.c 2015-09-08 23:49:05.766126841 +0200
+diff -Nur linux-4.1.10.orig/drivers/clocksource/timer-atmel-st.c linux-4.1.10/drivers/clocksource/timer-atmel-st.c
+--- linux-4.1.10.orig/drivers/clocksource/timer-atmel-st.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/clocksource/timer-atmel-st.c 2015-10-07 18:00:08.000000000 +0200
@@ -131,6 +131,7 @@
break;
case CLOCK_EVT_MODE_SHUTDOWN:
@@ -5611,9 +5633,9 @@ diff -Nur linux-4.1.6.orig/drivers/clocksource/timer-atmel-st.c linux-4.1.6/driv
case CLOCK_EVT_MODE_RESUME:
irqmask = 0;
break;
-diff -Nur linux-4.1.6.orig/drivers/cpufreq/cpufreq.c linux-4.1.6/drivers/cpufreq/cpufreq.c
---- linux-4.1.6.orig/drivers/cpufreq/cpufreq.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/cpufreq/cpufreq.c 2015-09-08 23:49:05.818121077 +0200
+diff -Nur linux-4.1.10.orig/drivers/cpufreq/cpufreq.c linux-4.1.10/drivers/cpufreq/cpufreq.c
+--- linux-4.1.10.orig/drivers/cpufreq/cpufreq.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/cpufreq/cpufreq.c 2015-10-07 18:00:08.000000000 +0200
@@ -64,12 +64,6 @@
return cpufreq_driver->target_index || cpufreq_driver->target;
}
@@ -5750,9 +5772,9 @@ diff -Nur linux-4.1.6.orig/drivers/cpufreq/cpufreq.c linux-4.1.6/drivers/cpufreq
return 0;
}
-diff -Nur linux-4.1.6.orig/drivers/cpufreq/Kconfig.x86 linux-4.1.6/drivers/cpufreq/Kconfig.x86
---- linux-4.1.6.orig/drivers/cpufreq/Kconfig.x86 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/cpufreq/Kconfig.x86 2015-09-08 23:49:05.818121077 +0200
+diff -Nur linux-4.1.10.orig/drivers/cpufreq/Kconfig.x86 linux-4.1.10/drivers/cpufreq/Kconfig.x86
+--- linux-4.1.10.orig/drivers/cpufreq/Kconfig.x86 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/cpufreq/Kconfig.x86 2015-10-07 18:00:08.000000000 +0200
@@ -123,7 +123,7 @@
config X86_POWERNOW_K8
@@ -5762,9 +5784,9 @@ diff -Nur linux-4.1.6.orig/drivers/cpufreq/Kconfig.x86 linux-4.1.6/drivers/cpufr
help
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
Support for K10 and newer processors is now in acpi-cpufreq.
-diff -Nur linux-4.1.6.orig/drivers/gpio/gpio-omap.c linux-4.1.6/drivers/gpio/gpio-omap.c
---- linux-4.1.6.orig/drivers/gpio/gpio-omap.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/gpio/gpio-omap.c 2015-09-08 23:49:05.818121077 +0200
+diff -Nur linux-4.1.10.orig/drivers/gpio/gpio-omap.c linux-4.1.10/drivers/gpio/gpio-omap.c
+--- linux-4.1.10.orig/drivers/gpio/gpio-omap.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/gpio/gpio-omap.c 2015-10-07 18:00:08.000000000 +0200
@@ -57,7 +57,7 @@
u32 saved_datain;
u32 level_mask;
@@ -6045,9 +6067,9 @@ diff -Nur linux-4.1.6.orig/drivers/gpio/gpio-omap.c linux-4.1.6/drivers/gpio/gpi
return 0;
}
-diff -Nur linux-4.1.6.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-4.1.6/drivers/gpu/drm/i915/i915_gem_execbuffer.c
---- linux-4.1.6.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-09-08 23:49:05.818121077 +0200
+diff -Nur linux-4.1.10.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-4.1.10/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+--- linux-4.1.10.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-10-07 18:00:08.000000000 +0200
@@ -32,6 +32,7 @@
#include "i915_trace.h"
#include "intel_drv.h"
@@ -6075,9 +6097,9 @@ diff -Nur linux-4.1.6.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-4.1.
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
-diff -Nur linux-4.1.6.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c linux-4.1.6/drivers/gpu/drm/i915/i915_gem_shrinker.c
---- linux-4.1.6.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/gpu/drm/i915/i915_gem_shrinker.c 2015-09-08 23:49:05.818121077 +0200
+diff -Nur linux-4.1.10.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c linux-4.1.10/drivers/gpu/drm/i915/i915_gem_shrinker.c
+--- linux-4.1.10.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/gpu/drm/i915/i915_gem_shrinker.c 2015-10-07 18:00:08.000000000 +0200
@@ -39,7 +39,7 @@
if (!mutex_is_locked(mutex))
return false;
@@ -6087,9 +6109,9 @@ diff -Nur linux-4.1.6.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c linux-4.1.6/
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
-diff -Nur linux-4.1.6.orig/drivers/gpu/drm/i915/intel_display.c linux-4.1.6/drivers/gpu/drm/i915/intel_display.c
---- linux-4.1.6.orig/drivers/gpu/drm/i915/intel_display.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/gpu/drm/i915/intel_display.c 2015-09-08 23:49:05.822120634 +0200
+diff -Nur linux-4.1.10.orig/drivers/gpu/drm/i915/intel_display.c linux-4.1.10/drivers/gpu/drm/i915/intel_display.c
+--- linux-4.1.10.orig/drivers/gpu/drm/i915/intel_display.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/gpu/drm/i915/intel_display.c 2015-10-07 18:00:08.000000000 +0200
@@ -10086,7 +10086,7 @@
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -6099,9 +6121,9 @@ diff -Nur linux-4.1.6.orig/drivers/gpu/drm/i915/intel_display.c linux-4.1.6/driv
if (crtc == NULL)
return;
-diff -Nur linux-4.1.6.orig/drivers/i2c/busses/i2c-omap.c linux-4.1.6/drivers/i2c/busses/i2c-omap.c
---- linux-4.1.6.orig/drivers/i2c/busses/i2c-omap.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/i2c/busses/i2c-omap.c 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/i2c/busses/i2c-omap.c linux-4.1.10/drivers/i2c/busses/i2c-omap.c
+--- linux-4.1.10.orig/drivers/i2c/busses/i2c-omap.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/i2c/busses/i2c-omap.c 2015-10-07 18:00:08.000000000 +0200
@@ -996,15 +996,12 @@
u16 mask;
u16 stat;
@@ -6119,9 +6141,9 @@ diff -Nur linux-4.1.6.orig/drivers/i2c/busses/i2c-omap.c linux-4.1.6/drivers/i2c
return ret;
}
-diff -Nur linux-4.1.6.orig/drivers/ide/alim15x3.c linux-4.1.6/drivers/ide/alim15x3.c
---- linux-4.1.6.orig/drivers/ide/alim15x3.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/ide/alim15x3.c 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/ide/alim15x3.c linux-4.1.10/drivers/ide/alim15x3.c
+--- linux-4.1.10.orig/drivers/ide/alim15x3.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/ide/alim15x3.c 2015-10-07 18:00:08.000000000 +0200
@@ -234,7 +234,7 @@
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
@@ -6140,9 +6162,9 @@ diff -Nur linux-4.1.6.orig/drivers/ide/alim15x3.c linux-4.1.6/drivers/ide/alim15
return 0;
}
-diff -Nur linux-4.1.6.orig/drivers/ide/hpt366.c linux-4.1.6/drivers/ide/hpt366.c
---- linux-4.1.6.orig/drivers/ide/hpt366.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/ide/hpt366.c 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/ide/hpt366.c linux-4.1.10/drivers/ide/hpt366.c
+--- linux-4.1.10.orig/drivers/ide/hpt366.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/ide/hpt366.c 2015-10-07 18:00:08.000000000 +0200
@@ -1241,7 +1241,7 @@
dma_old = inb(base + 2);
@@ -6161,9 +6183,9 @@ diff -Nur linux-4.1.6.orig/drivers/ide/hpt366.c linux-4.1.6/drivers/ide/hpt366.c
printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
hwif->name, base, base + 7);
-diff -Nur linux-4.1.6.orig/drivers/ide/ide-io.c linux-4.1.6/drivers/ide/ide-io.c
---- linux-4.1.6.orig/drivers/ide/ide-io.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/ide/ide-io.c 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/ide/ide-io.c linux-4.1.10/drivers/ide/ide-io.c
+--- linux-4.1.10.orig/drivers/ide/ide-io.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/ide/ide-io.c 2015-10-07 18:00:08.000000000 +0200
@@ -659,7 +659,7 @@
/* disable_irq_nosync ?? */
disable_irq(hwif->irq);
@@ -6173,9 +6195,9 @@ diff -Nur linux-4.1.6.orig/drivers/ide/ide-io.c linux-4.1.6/drivers/ide/ide-io.c
if (hwif->polling) {
startstop = handler(drive);
} else if (drive_is_ready(drive)) {
-diff -Nur linux-4.1.6.orig/drivers/ide/ide-iops.c linux-4.1.6/drivers/ide/ide-iops.c
---- linux-4.1.6.orig/drivers/ide/ide-iops.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/ide/ide-iops.c 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/ide/ide-iops.c linux-4.1.10/drivers/ide/ide-iops.c
+--- linux-4.1.10.orig/drivers/ide/ide-iops.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/ide/ide-iops.c 2015-10-07 18:00:08.000000000 +0200
@@ -129,12 +129,12 @@
if ((stat & ATA_BUSY) == 0)
break;
@@ -6191,9 +6213,9 @@ diff -Nur linux-4.1.6.orig/drivers/ide/ide-iops.c linux-4.1.6/drivers/ide/ide-io
}
/*
* Allow status to settle, then read it again.
-diff -Nur linux-4.1.6.orig/drivers/ide/ide-io-std.c linux-4.1.6/drivers/ide/ide-io-std.c
---- linux-4.1.6.orig/drivers/ide/ide-io-std.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/ide/ide-io-std.c 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/ide/ide-io-std.c linux-4.1.10/drivers/ide/ide-io-std.c
+--- linux-4.1.10.orig/drivers/ide/ide-io-std.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/ide/ide-io-std.c 2015-10-07 18:00:08.000000000 +0200
@@ -175,7 +175,7 @@
unsigned long uninitialized_var(flags);
@@ -6230,9 +6252,9 @@ diff -Nur linux-4.1.6.orig/drivers/ide/ide-io-std.c linux-4.1.6/drivers/ide/ide-
if (((len + 1) & 3) < 2)
return;
-diff -Nur linux-4.1.6.orig/drivers/ide/ide-probe.c linux-4.1.6/drivers/ide/ide-probe.c
---- linux-4.1.6.orig/drivers/ide/ide-probe.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/ide/ide-probe.c 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/ide/ide-probe.c linux-4.1.10/drivers/ide/ide-probe.c
+--- linux-4.1.10.orig/drivers/ide/ide-probe.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/ide/ide-probe.c 2015-10-07 18:00:08.000000000 +0200
@@ -196,10 +196,10 @@
int bswap = 1;
@@ -6246,9 +6268,9 @@ diff -Nur linux-4.1.6.orig/drivers/ide/ide-probe.c linux-4.1.6/drivers/ide/ide-p
drive->dev_flags |= IDE_DFLAG_ID_READ;
#ifdef DEBUG
-diff -Nur linux-4.1.6.orig/drivers/ide/ide-taskfile.c linux-4.1.6/drivers/ide/ide-taskfile.c
---- linux-4.1.6.orig/drivers/ide/ide-taskfile.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/ide/ide-taskfile.c 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/ide/ide-taskfile.c linux-4.1.10/drivers/ide/ide-taskfile.c
+--- linux-4.1.10.orig/drivers/ide/ide-taskfile.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/ide/ide-taskfile.c 2015-10-07 18:00:08.000000000 +0200
@@ -250,7 +250,7 @@
page_is_high = PageHighMem(page);
@@ -6276,9 +6298,9 @@ diff -Nur linux-4.1.6.orig/drivers/ide/ide-taskfile.c linux-4.1.6/drivers/ide/id
ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
-diff -Nur linux-4.1.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-4.1.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
---- linux-4.1.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-4.1.10/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+--- linux-4.1.10.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-10-07 18:00:08.000000000 +0200
@@ -821,7 +821,7 @@
ipoib_dbg_mcast(priv, "restarting multicast task\n");
@@ -6297,9 +6319,9 @@ diff -Nur linux-4.1.6.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-
/*
* make sure the in-flight joins have finished before we attempt
-diff -Nur linux-4.1.6.orig/drivers/input/gameport/gameport.c linux-4.1.6/drivers/input/gameport/gameport.c
---- linux-4.1.6.orig/drivers/input/gameport/gameport.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/input/gameport/gameport.c 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/input/gameport/gameport.c linux-4.1.10/drivers/input/gameport/gameport.c
+--- linux-4.1.10.orig/drivers/input/gameport/gameport.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/input/gameport/gameport.c 2015-10-07 18:00:08.000000000 +0200
@@ -124,12 +124,12 @@
tx = 1 << 30;
@@ -6329,9 +6351,9 @@ diff -Nur linux-4.1.6.orig/drivers/input/gameport/gameport.c linux-4.1.6/drivers
udelay(i * 10);
if (t2 - t1 < tx) tx = t2 - t1;
}
-diff -Nur linux-4.1.6.orig/drivers/leds/trigger/Kconfig linux-4.1.6/drivers/leds/trigger/Kconfig
---- linux-4.1.6.orig/drivers/leds/trigger/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/leds/trigger/Kconfig 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/leds/trigger/Kconfig linux-4.1.10/drivers/leds/trigger/Kconfig
+--- linux-4.1.10.orig/drivers/leds/trigger/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/leds/trigger/Kconfig 2015-10-07 18:00:08.000000000 +0200
@@ -61,7 +61,7 @@
config LEDS_TRIGGER_CPU
@@ -6341,9 +6363,9 @@ diff -Nur linux-4.1.6.orig/drivers/leds/trigger/Kconfig linux-4.1.6/drivers/leds
help
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
-diff -Nur linux-4.1.6.orig/drivers/md/bcache/Kconfig linux-4.1.6/drivers/md/bcache/Kconfig
---- linux-4.1.6.orig/drivers/md/bcache/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/md/bcache/Kconfig 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/md/bcache/Kconfig linux-4.1.10/drivers/md/bcache/Kconfig
+--- linux-4.1.10.orig/drivers/md/bcache/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/md/bcache/Kconfig 2015-10-07 18:00:08.000000000 +0200
@@ -1,6 +1,7 @@
config BCACHE
@@ -6352,9 +6374,9 @@ diff -Nur linux-4.1.6.orig/drivers/md/bcache/Kconfig linux-4.1.6/drivers/md/bcac
---help---
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
-diff -Nur linux-4.1.6.orig/drivers/md/dm.c linux-4.1.6/drivers/md/dm.c
---- linux-4.1.6.orig/drivers/md/dm.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/md/dm.c 2015-09-08 23:49:06.074092692 +0200
+diff -Nur linux-4.1.10.orig/drivers/md/dm.c linux-4.1.10/drivers/md/dm.c
+--- linux-4.1.10.orig/drivers/md/dm.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/md/dm.c 2015-10-07 18:00:08.000000000 +0200
@@ -2132,7 +2132,7 @@
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
@@ -6364,9 +6386,9 @@ diff -Nur linux-4.1.6.orig/drivers/md/dm.c linux-4.1.6/drivers/md/dm.c
}
goto out;
-diff -Nur linux-4.1.6.orig/drivers/md/raid5.c linux-4.1.6/drivers/md/raid5.c
---- linux-4.1.6.orig/drivers/md/raid5.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/md/raid5.c 2015-09-08 23:49:06.078092247 +0200
+diff -Nur linux-4.1.10.orig/drivers/md/raid5.c linux-4.1.10/drivers/md/raid5.c
+--- linux-4.1.10.orig/drivers/md/raid5.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/md/raid5.c 2015-10-07 18:00:08.000000000 +0200
@@ -1918,8 +1918,9 @@
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -6388,7 +6410,7 @@ diff -Nur linux-4.1.6.orig/drivers/md/raid5.c linux-4.1.6/drivers/md/raid5.c
}
static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
-@@ -6350,6 +6352,7 @@
+@@ -6361,6 +6363,7 @@
__func__, cpu);
break;
}
@@ -6396,10 +6418,7867 @@ diff -Nur linux-4.1.6.orig/drivers/md/raid5.c linux-4.1.6/drivers/md/raid5.c
}
put_online_cpus();
-diff -Nur linux-4.1.6.orig/drivers/md/raid5.h linux-4.1.6/drivers/md/raid5.h
---- linux-4.1.6.orig/drivers/md/raid5.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/md/raid5.h 2015-09-08 23:49:06.078092247 +0200
-@@ -494,6 +494,7 @@
+diff -Nur linux-4.1.10.orig/drivers/md/raid5.c.orig linux-4.1.10/drivers/md/raid5.c.orig
+--- linux-4.1.10.orig/drivers/md/raid5.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/drivers/md/raid5.c.orig 2015-10-03 13:49:38.000000000 +0200
+@@ -0,0 +1,7853 @@
++/*
++ * raid5.c : Multiple Devices driver for Linux
++ * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
++ * Copyright (C) 1999, 2000 Ingo Molnar
++ * Copyright (C) 2002, 2003 H. Peter Anvin
++ *
++ * RAID-4/5/6 management functions.
++ * Thanks to Penguin Computing for making the RAID-6 development possible
++ * by donating a test server!
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * You should have received a copy of the GNU General Public License
++ * (for example /usr/src/linux/COPYING); if not, write to the Free
++ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/*
++ * BITMAP UNPLUGGING:
++ *
++ * The sequencing for updating the bitmap reliably is a little
++ * subtle (and I got it wrong the first time) so it deserves some
++ * explanation.
++ *
++ * We group bitmap updates into batches. Each batch has a number.
++ * We may write out several batches at once, but that isn't very important.
++ * conf->seq_write is the number of the last batch successfully written.
++ * conf->seq_flush is the number of the last batch that was closed to
++ * new additions.
++ * When we discover that we will need to write to any block in a stripe
++ * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
++ * the number of the batch it will be in. This is seq_flush+1.
++ * When we are ready to do a write, if that batch hasn't been written yet,
++ * we plug the array and queue the stripe for later.
++ * When an unplug happens, we increment bm_flush, thus closing the current
++ * batch.
++ * When we notice that bm_flush > bm_write, we write out all pending updates
++ * to the bitmap, and advance bm_write to where bm_flush was.
++ * This may occasionally write a bit out twice, but is sure never to
++ * miss any bits.
++ */
++
++#include <linux/blkdev.h>
++#include <linux/kthread.h>
++#include <linux/raid/pq.h>
++#include <linux/async_tx.h>
++#include <linux/module.h>
++#include <linux/async.h>
++#include <linux/seq_file.h>
++#include <linux/cpu.h>
++#include <linux/slab.h>
++#include <linux/ratelimit.h>
++#include <linux/nodemask.h>
++#include <linux/flex_array.h>
++#include <trace/events/block.h>
++
++#include "md.h"
++#include "raid5.h"
++#include "raid0.h"
++#include "bitmap.h"
++
++#define cpu_to_group(cpu) cpu_to_node(cpu)
++#define ANY_GROUP NUMA_NO_NODE
++
++static bool devices_handle_discard_safely = false;
++module_param(devices_handle_discard_safely, bool, 0644);
++MODULE_PARM_DESC(devices_handle_discard_safely,
++ "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
++static struct workqueue_struct *raid5_wq;
++/*
++ * Stripe cache
++ */
++
++#define NR_STRIPES 256
++#define STRIPE_SIZE PAGE_SIZE
++#define STRIPE_SHIFT (PAGE_SHIFT - 9)
++#define STRIPE_SECTORS (STRIPE_SIZE>>9)
++#define IO_THRESHOLD 1
++#define BYPASS_THRESHOLD 1
++#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
++#define HASH_MASK (NR_HASH - 1)
++#define MAX_STRIPE_BATCH 8
++
++static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
++{
++ int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
++ return &conf->stripe_hashtbl[hash];
++}
++
++static inline int stripe_hash_locks_hash(sector_t sect)
++{
++ return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK;
++}
++
++static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
++{
++ spin_lock_irq(conf->hash_locks + hash);
++ spin_lock(&conf->device_lock);
++}
++
++static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
++{
++ spin_unlock(&conf->device_lock);
++ spin_unlock_irq(conf->hash_locks + hash);
++}
++
++static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
++{
++ int i;
++ local_irq_disable();
++ spin_lock(conf->hash_locks);
++ for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
++ spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
++ spin_lock(&conf->device_lock);
++}
++
++static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
++{
++ int i;
++ spin_unlock(&conf->device_lock);
++ for (i = NR_STRIPE_HASH_LOCKS; i; i--)
++ spin_unlock(conf->hash_locks + i - 1);
++ local_irq_enable();
++}
++
++/* bio's attached to a stripe+device for I/O are linked together in bi_sector
++ * order without overlap. There may be several bio's per stripe+device, and
++ * a bio could span several devices.
++ * When walking this list for a particular stripe+device, we must never proceed
++ * beyond a bio that extends past this device, as the next bio might no longer
++ * be valid.
++ * This function is used to determine the 'next' bio in the list, given the sector
++ * of the current stripe+device
++ */
++static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
++{
++ int sectors = bio_sectors(bio);
++ if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
++ return bio->bi_next;
++ else
++ return NULL;
++}
++
++/*
++ * We maintain a biased count of active stripes in the bottom 16 bits of
++ * bi_phys_segments, and a count of processed stripes in the upper 16 bits
++ */
++static inline int raid5_bi_processed_stripes(struct bio *bio)
++{
++ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
++ return (atomic_read(segments) >> 16) & 0xffff;
++}
++
++static inline int raid5_dec_bi_active_stripes(struct bio *bio)
++{
++ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
++ return atomic_sub_return(1, segments) & 0xffff;
++}
++
++static inline void raid5_inc_bi_active_stripes(struct bio *bio)
++{
++ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
++ atomic_inc(segments);
++}
++
++static inline void raid5_set_bi_processed_stripes(struct bio *bio,
++ unsigned int cnt)
++{
++ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
++ int old, new;
++
++ do {
++ old = atomic_read(segments);
++ new = (old & 0xffff) | (cnt << 16);
++ } while (atomic_cmpxchg(segments, old, new) != old);
++}
++
++static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
++{
++ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
++ atomic_set(segments, cnt);
++}
++
++/* Find first data disk in a raid6 stripe */
++static inline int raid6_d0(struct stripe_head *sh)
++{
++ if (sh->ddf_layout)
++ /* ddf always start from first device */
++ return 0;
++ /* md starts just after Q block */
++ if (sh->qd_idx == sh->disks - 1)
++ return 0;
++ else
++ return sh->qd_idx + 1;
++}
++static inline int raid6_next_disk(int disk, int raid_disks)
++{
++ disk++;
++ return (disk < raid_disks) ? disk : 0;
++}
++
++/* When walking through the disks in a raid5, starting at raid6_d0,
++ * We need to map each disk to a 'slot', where the data disks are slot
++ * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
++ * is raid_disks-1. This help does that mapping.
++ */
++static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
++ int *count, int syndrome_disks)
++{
++ int slot = *count;
++
++ if (sh->ddf_layout)
++ (*count)++;
++ if (idx == sh->pd_idx)
++ return syndrome_disks;
++ if (idx == sh->qd_idx)
++ return syndrome_disks + 1;
++ if (!sh->ddf_layout)
++ (*count)++;
++ return slot;
++}
++
++static void return_io(struct bio *return_bi)
++{
++ struct bio *bi = return_bi;
++ while (bi) {
++
++ return_bi = bi->bi_next;
++ bi->bi_next = NULL;
++ bi->bi_iter.bi_size = 0;
++ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
++ bi, 0);
++ bio_endio(bi, 0);
++ bi = return_bi;
++ }
++}
++
++static void print_raid5_conf (struct r5conf *conf);
++
++static int stripe_operations_active(struct stripe_head *sh)
++{
++ return sh->check_state || sh->reconstruct_state ||
++ test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
++ test_bit(STRIPE_COMPUTE_RUN, &sh->state);
++}
++
++static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
++{
++ struct r5conf *conf = sh->raid_conf;
++ struct r5worker_group *group;
++ int thread_cnt;
++ int i, cpu = sh->cpu;
++
++ if (!cpu_online(cpu)) {
++ cpu = cpumask_any(cpu_online_mask);
++ sh->cpu = cpu;
++ }
++
++ if (list_empty(&sh->lru)) {
++ struct r5worker_group *group;
++ group = conf->worker_groups + cpu_to_group(cpu);
++ list_add_tail(&sh->lru, &group->handle_list);
++ group->stripes_cnt++;
++ sh->group = group;
++ }
++
++ if (conf->worker_cnt_per_group == 0) {
++ md_wakeup_thread(conf->mddev->thread);
++ return;
++ }
++
++ group = conf->worker_groups + cpu_to_group(sh->cpu);
++
++ group->workers[0].working = true;
++ /* at least one worker should run to avoid race */
++ queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
++
++ thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1;
++ /* wakeup more workers */
++ for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
++ if (group->workers[i].working == false) {
++ group->workers[i].working = true;
++ queue_work_on(sh->cpu, raid5_wq,
++ &group->workers[i].work);
++ thread_cnt--;
++ }
++ }
++}
++
++static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
++ struct list_head *temp_inactive_list)
++{
++ BUG_ON(!list_empty(&sh->lru));
++ BUG_ON(atomic_read(&conf->active_stripes)==0);
++ if (test_bit(STRIPE_HANDLE, &sh->state)) {
++ if (test_bit(STRIPE_DELAYED, &sh->state) &&
++ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ list_add_tail(&sh->lru, &conf->delayed_list);
++ else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
++ sh->bm_seq - conf->seq_write > 0)
++ list_add_tail(&sh->lru, &conf->bitmap_list);
++ else {
++ clear_bit(STRIPE_DELAYED, &sh->state);
++ clear_bit(STRIPE_BIT_DELAY, &sh->state);
++ if (conf->worker_cnt_per_group == 0) {
++ list_add_tail(&sh->lru, &conf->handle_list);
++ } else {
++ raid5_wakeup_stripe_thread(sh);
++ return;
++ }
++ }
++ md_wakeup_thread(conf->mddev->thread);
++ } else {
++ BUG_ON(stripe_operations_active(sh));
++ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ if (atomic_dec_return(&conf->preread_active_stripes)
++ < IO_THRESHOLD)
++ md_wakeup_thread(conf->mddev->thread);
++ atomic_dec(&conf->active_stripes);
++ if (!test_bit(STRIPE_EXPANDING, &sh->state))
++ list_add_tail(&sh->lru, temp_inactive_list);
++ }
++}
++
++static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
++ struct list_head *temp_inactive_list)
++{
++ if (atomic_dec_and_test(&sh->count))
++ do_release_stripe(conf, sh, temp_inactive_list);
++}
++
++/*
++ * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
++ *
++ * Be careful: Only one task can add/delete stripes from temp_inactive_list at
++ * given time. Adding stripes only takes device lock, while deleting stripes
++ * only takes hash lock.
++ */
++static void release_inactive_stripe_list(struct r5conf *conf,
++ struct list_head *temp_inactive_list,
++ int hash)
++{
++ int size;
++ bool do_wakeup = false;
++ unsigned long flags;
++
++ if (hash == NR_STRIPE_HASH_LOCKS) {
++ size = NR_STRIPE_HASH_LOCKS;
++ hash = NR_STRIPE_HASH_LOCKS - 1;
++ } else
++ size = 1;
++ while (size) {
++ struct list_head *list = &temp_inactive_list[size - 1];
++
++ /*
++ * We don't hold any lock here yet, get_active_stripe() might
++ * remove stripes from the list
++ */
++ if (!list_empty_careful(list)) {
++ spin_lock_irqsave(conf->hash_locks + hash, flags);
++ if (list_empty(conf->inactive_list + hash) &&
++ !list_empty(list))
++ atomic_dec(&conf->empty_inactive_list_nr);
++ list_splice_tail_init(list, conf->inactive_list + hash);
++ do_wakeup = true;
++ spin_unlock_irqrestore(conf->hash_locks + hash, flags);
++ }
++ size--;
++ hash--;
++ }
++
++ if (do_wakeup) {
++ wake_up(&conf->wait_for_stripe);
++ if (conf->retry_read_aligned)
++ md_wakeup_thread(conf->mddev->thread);
++ }
++}
++
++/* should hold conf->device_lock already */
++static int release_stripe_list(struct r5conf *conf,
++ struct list_head *temp_inactive_list)
++{
++ struct stripe_head *sh;
++ int count = 0;
++ struct llist_node *head;
++
++ head = llist_del_all(&conf->released_stripes);
++ head = llist_reverse_order(head);
++ while (head) {
++ int hash;
++
++ sh = llist_entry(head, struct stripe_head, release_list);
++ head = llist_next(head);
++ /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
++ smp_mb();
++ clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
++ /*
++ * Don't worry the bit is set here, because if the bit is set
++ * again, the count is always > 1. This is true for
++ * STRIPE_ON_UNPLUG_LIST bit too.
++ */
++ hash = sh->hash_lock_index;
++ __release_stripe(conf, sh, &temp_inactive_list[hash]);
++ count++;
++ }
++
++ return count;
++}
++
++static void release_stripe(struct stripe_head *sh)
++{
++ struct r5conf *conf = sh->raid_conf;
++ unsigned long flags;
++ struct list_head list;
++ int hash;
++ bool wakeup;
++
++ /* Avoid release_list until the last reference.
++ */
++ if (atomic_add_unless(&sh->count, -1, 1))
++ return;
++
++ if (unlikely(!conf->mddev->thread) ||
++ test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
++ goto slow_path;
++ wakeup = llist_add(&sh->release_list, &conf->released_stripes);
++ if (wakeup)
++ md_wakeup_thread(conf->mddev->thread);
++ return;
++slow_path:
++ local_irq_save(flags);
++ /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
++ if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
++ INIT_LIST_HEAD(&list);
++ hash = sh->hash_lock_index;
++ do_release_stripe(conf, sh, &list);
++ spin_unlock(&conf->device_lock);
++ release_inactive_stripe_list(conf, &list, hash);
++ }
++ local_irq_restore(flags);
++}
++
++static inline void remove_hash(struct stripe_head *sh)
++{
++ pr_debug("remove_hash(), stripe %llu\n",
++ (unsigned long long)sh->sector);
++
++ hlist_del_init(&sh->hash);
++}
++
++static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
++{
++ struct hlist_head *hp = stripe_hash(conf, sh->sector);
++
++ pr_debug("insert_hash(), stripe %llu\n",
++ (unsigned long long)sh->sector);
++
++ hlist_add_head(&sh->hash, hp);
++}
++
++/* find an idle stripe, make sure it is unhashed, and return it. */
++static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
++{
++ struct stripe_head *sh = NULL;
++ struct list_head *first;
++
++ if (list_empty(conf->inactive_list + hash))
++ goto out;
++ first = (conf->inactive_list + hash)->next;
++ sh = list_entry(first, struct stripe_head, lru);
++ list_del_init(first);
++ remove_hash(sh);
++ atomic_inc(&conf->active_stripes);
++ BUG_ON(hash != sh->hash_lock_index);
++ if (list_empty(conf->inactive_list + hash))
++ atomic_inc(&conf->empty_inactive_list_nr);
++out:
++ return sh;
++}
++
++static void shrink_buffers(struct stripe_head *sh)
++{
++ struct page *p;
++ int i;
++ int num = sh->raid_conf->pool_size;
++
++ for (i = 0; i < num ; i++) {
++ WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
++ p = sh->dev[i].page;
++ if (!p)
++ continue;
++ sh->dev[i].page = NULL;
++ put_page(p);
++ }
++}
++
++static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
++{
++ int i;
++ int num = sh->raid_conf->pool_size;
++
++ for (i = 0; i < num; i++) {
++ struct page *page;
++
++ if (!(page = alloc_page(gfp))) {
++ return 1;
++ }
++ sh->dev[i].page = page;
++ sh->dev[i].orig_page = page;
++ }
++ return 0;
++}
++
++static void raid5_build_block(struct stripe_head *sh, int i, int previous);
++static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
++ struct stripe_head *sh);
++
++static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
++{
++ struct r5conf *conf = sh->raid_conf;
++ int i, seq;
++
++ BUG_ON(atomic_read(&sh->count) != 0);
++ BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
++ BUG_ON(stripe_operations_active(sh));
++ BUG_ON(sh->batch_head);
++
++ pr_debug("init_stripe called, stripe %llu\n",
++ (unsigned long long)sector);
++retry:
++ seq = read_seqcount_begin(&conf->gen_lock);
++ sh->generation = conf->generation - previous;
++ sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
++ sh->sector = sector;
++ stripe_set_idx(sector, conf, previous, sh);
++ sh->state = 0;
++
++ for (i = sh->disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++
++ if (dev->toread || dev->read || dev->towrite || dev->written ||
++ test_bit(R5_LOCKED, &dev->flags)) {
++ printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
++ (unsigned long long)sh->sector, i, dev->toread,
++ dev->read, dev->towrite, dev->written,
++ test_bit(R5_LOCKED, &dev->flags));
++ WARN_ON(1);
++ }
++ dev->flags = 0;
++ raid5_build_block(sh, i, previous);
++ }
++ if (read_seqcount_retry(&conf->gen_lock, seq))
++ goto retry;
++ sh->overwrite_disks = 0;
++ insert_hash(conf, sh);
++ sh->cpu = smp_processor_id();
++ set_bit(STRIPE_BATCH_READY, &sh->state);
++}
++
++static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
++ short generation)
++{
++ struct stripe_head *sh;
++
++ pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
++ hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
++ if (sh->sector == sector && sh->generation == generation)
++ return sh;
++ pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
++ return NULL;
++}
++
++/*
++ * Need to check if array has failed when deciding whether to:
++ * - start an array
++ * - remove non-faulty devices
++ * - add a spare
++ * - allow a reshape
++ * This determination is simple when no reshape is happening.
++ * However if there is a reshape, we need to carefully check
++ * both the before and after sections.
++ * This is because some failed devices may only affect one
++ * of the two sections, and some non-in_sync devices may
++ * be insync in the section most affected by failed devices.
++ */
++static int calc_degraded(struct r5conf *conf)
++{
++ int degraded, degraded2;
++ int i;
++
++ rcu_read_lock();
++ degraded = 0;
++ for (i = 0; i < conf->previous_raid_disks; i++) {
++ struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
++ if (rdev && test_bit(Faulty, &rdev->flags))
++ rdev = rcu_dereference(conf->disks[i].replacement);
++ if (!rdev || test_bit(Faulty, &rdev->flags))
++ degraded++;
++ else if (test_bit(In_sync, &rdev->flags))
++ ;
++ else
++ /* not in-sync or faulty.
++ * If the reshape increases the number of devices,
++ * this is being recovered by the reshape, so
++ * this 'previous' section is not in_sync.
++ * If the number of devices is being reduced however,
++ * the device can only be part of the array if
++ * we are reverting a reshape, so this section will
++ * be in-sync.
++ */
++ if (conf->raid_disks >= conf->previous_raid_disks)
++ degraded++;
++ }
++ rcu_read_unlock();
++ if (conf->raid_disks == conf->previous_raid_disks)
++ return degraded;
++ rcu_read_lock();
++ degraded2 = 0;
++ for (i = 0; i < conf->raid_disks; i++) {
++ struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
++ if (rdev && test_bit(Faulty, &rdev->flags))
++ rdev = rcu_dereference(conf->disks[i].replacement);
++ if (!rdev || test_bit(Faulty, &rdev->flags))
++ degraded2++;
++ else if (test_bit(In_sync, &rdev->flags))
++ ;
++ else
++ /* not in-sync or faulty.
++ * If reshape increases the number of devices, this
++ * section has already been recovered, else it
++ * almost certainly hasn't.
++ */
++ if (conf->raid_disks <= conf->previous_raid_disks)
++ degraded2++;
++ }
++ rcu_read_unlock();
++ if (degraded2 > degraded)
++ return degraded2;
++ return degraded;
++}
++
++static int has_failed(struct r5conf *conf)
++{
++ int degraded;
++
++ if (conf->mddev->reshape_position == MaxSector)
++ return conf->mddev->degraded > conf->max_degraded;
++
++ degraded = calc_degraded(conf);
++ if (degraded > conf->max_degraded)
++ return 1;
++ return 0;
++}
++
++static struct stripe_head *
++get_active_stripe(struct r5conf *conf, sector_t sector,
++ int previous, int noblock, int noquiesce)
++{
++ struct stripe_head *sh;
++ int hash = stripe_hash_locks_hash(sector);
++
++ pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
++
++ spin_lock_irq(conf->hash_locks + hash);
++
++ do {
++ wait_event_lock_irq(conf->wait_for_stripe,
++ conf->quiesce == 0 || noquiesce,
++ *(conf->hash_locks + hash));
++ sh = __find_stripe(conf, sector, conf->generation - previous);
++ if (!sh) {
++ if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
++ sh = get_free_stripe(conf, hash);
++ if (!sh && llist_empty(&conf->released_stripes) &&
++ !test_bit(R5_DID_ALLOC, &conf->cache_state))
++ set_bit(R5_ALLOC_MORE,
++ &conf->cache_state);
++ }
++ if (noblock && sh == NULL)
++ break;
++ if (!sh) {
++ set_bit(R5_INACTIVE_BLOCKED,
++ &conf->cache_state);
++ wait_event_lock_irq(
++ conf->wait_for_stripe,
++ !list_empty(conf->inactive_list + hash) &&
++ (atomic_read(&conf->active_stripes)
++ < (conf->max_nr_stripes * 3 / 4)
++ || !test_bit(R5_INACTIVE_BLOCKED,
++ &conf->cache_state)),
++ *(conf->hash_locks + hash));
++ clear_bit(R5_INACTIVE_BLOCKED,
++ &conf->cache_state);
++ } else {
++ init_stripe(sh, sector, previous);
++ atomic_inc(&sh->count);
++ }
++ } else if (!atomic_inc_not_zero(&sh->count)) {
++ spin_lock(&conf->device_lock);
++ if (!atomic_read(&sh->count)) {
++ if (!test_bit(STRIPE_HANDLE, &sh->state))
++ atomic_inc(&conf->active_stripes);
++ BUG_ON(list_empty(&sh->lru) &&
++ !test_bit(STRIPE_EXPANDING, &sh->state));
++ list_del_init(&sh->lru);
++ if (sh->group) {
++ sh->group->stripes_cnt--;
++ sh->group = NULL;
++ }
++ }
++ atomic_inc(&sh->count);
++ spin_unlock(&conf->device_lock);
++ }
++ } while (sh == NULL);
++
++ spin_unlock_irq(conf->hash_locks + hash);
++ return sh;
++}
++
++static bool is_full_stripe_write(struct stripe_head *sh)
++{
++ BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
++ return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded);
++}
++
++static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
++{
++ local_irq_disable();
++ if (sh1 > sh2) {
++ spin_lock(&sh2->stripe_lock);
++ spin_lock_nested(&sh1->stripe_lock, 1);
++ } else {
++ spin_lock(&sh1->stripe_lock);
++ spin_lock_nested(&sh2->stripe_lock, 1);
++ }
++}
++
++static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
++{
++ spin_unlock(&sh1->stripe_lock);
++ spin_unlock(&sh2->stripe_lock);
++ local_irq_enable();
++}
++
++/* Only freshly new full stripe normal write stripe can be added to a batch list */
++static bool stripe_can_batch(struct stripe_head *sh)
++{
++ return test_bit(STRIPE_BATCH_READY, &sh->state) &&
++ !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
++ is_full_stripe_write(sh);
++}
++
++/* we only do back search */
++static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh)
++{
++ struct stripe_head *head;
++ sector_t head_sector, tmp_sec;
++ int hash;
++ int dd_idx;
++
++ if (!stripe_can_batch(sh))
++ return;
++ /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
++ tmp_sec = sh->sector;
++ if (!sector_div(tmp_sec, conf->chunk_sectors))
++ return;
++ head_sector = sh->sector - STRIPE_SECTORS;
++
++ hash = stripe_hash_locks_hash(head_sector);
++ spin_lock_irq(conf->hash_locks + hash);
++ head = __find_stripe(conf, head_sector, conf->generation);
++ if (head && !atomic_inc_not_zero(&head->count)) {
++ spin_lock(&conf->device_lock);
++ if (!atomic_read(&head->count)) {
++ if (!test_bit(STRIPE_HANDLE, &head->state))
++ atomic_inc(&conf->active_stripes);
++ BUG_ON(list_empty(&head->lru) &&
++ !test_bit(STRIPE_EXPANDING, &head->state));
++ list_del_init(&head->lru);
++ if (head->group) {
++ head->group->stripes_cnt--;
++ head->group = NULL;
++ }
++ }
++ atomic_inc(&head->count);
++ spin_unlock(&conf->device_lock);
++ }
++ spin_unlock_irq(conf->hash_locks + hash);
++
++ if (!head)
++ return;
++ if (!stripe_can_batch(head))
++ goto out;
++
++ lock_two_stripes(head, sh);
++ /* clear_batch_ready clear the flag */
++ if (!stripe_can_batch(head) || !stripe_can_batch(sh))
++ goto unlock_out;
++
++ if (sh->batch_head)
++ goto unlock_out;
++
++ dd_idx = 0;
++ while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
++ dd_idx++;
++ if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
++ goto unlock_out;
++
++ if (head->batch_head) {
++ spin_lock(&head->batch_head->batch_lock);
++ /* This batch list is already running */
++ if (!stripe_can_batch(head)) {
++ spin_unlock(&head->batch_head->batch_lock);
++ goto unlock_out;
++ }
++
++ /*
++ * at this point, head's BATCH_READY could be cleared, but we
++ * can still add the stripe to batch list
++ */
++ list_add(&sh->batch_list, &head->batch_list);
++ spin_unlock(&head->batch_head->batch_lock);
++
++ sh->batch_head = head->batch_head;
++ } else {
++ head->batch_head = head;
++ sh->batch_head = head->batch_head;
++ spin_lock(&head->batch_lock);
++ list_add_tail(&sh->batch_list, &head->batch_list);
++ spin_unlock(&head->batch_lock);
++ }
++
++ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ if (atomic_dec_return(&conf->preread_active_stripes)
++ < IO_THRESHOLD)
++ md_wakeup_thread(conf->mddev->thread);
++
++ if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) {
++ int seq = sh->bm_seq;
++ if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) &&
++ sh->batch_head->bm_seq > seq)
++ seq = sh->batch_head->bm_seq;
++ set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state);
++ sh->batch_head->bm_seq = seq;
++ }
++
++ atomic_inc(&sh->count);
++unlock_out:
++ unlock_two_stripes(head, sh);
++out:
++ release_stripe(head);
++}
++
++/* Determine if 'data_offset' or 'new_data_offset' should be used
++ * in this stripe_head.
++ */
++static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
++{
++ sector_t progress = conf->reshape_progress;
++ /* Need a memory barrier to make sure we see the value
++ * of conf->generation, or ->data_offset that was set before
++ * reshape_progress was updated.
++ */
++ smp_rmb();
++ if (progress == MaxSector)
++ return 0;
++ if (sh->generation == conf->generation - 1)
++ return 0;
++ /* We are in a reshape, and this is a new-generation stripe,
++ * so use new_data_offset.
++ */
++ return 1;
++}
++
++static void
++raid5_end_read_request(struct bio *bi, int error);
++static void
++raid5_end_write_request(struct bio *bi, int error);
++
++static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
++{
++ struct r5conf *conf = sh->raid_conf;
++ int i, disks = sh->disks;
++ struct stripe_head *head_sh = sh;
++
++ might_sleep();
++
++ for (i = disks; i--; ) {
++ int rw;
++ int replace_only = 0;
++ struct bio *bi, *rbi;
++ struct md_rdev *rdev, *rrdev = NULL;
++
++ sh = head_sh;
++ if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
++ if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
++ rw = WRITE_FUA;
++ else
++ rw = WRITE;
++ if (test_bit(R5_Discard, &sh->dev[i].flags))
++ rw |= REQ_DISCARD;
++ } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
++ rw = READ;
++ else if (test_and_clear_bit(R5_WantReplace,
++ &sh->dev[i].flags)) {
++ rw = WRITE;
++ replace_only = 1;
++ } else
++ continue;
++ if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
++ rw |= REQ_SYNC;
++
++again:
++ bi = &sh->dev[i].req;
++ rbi = &sh->dev[i].rreq; /* For writing to replacement */
++
++ rcu_read_lock();
++ rrdev = rcu_dereference(conf->disks[i].replacement);
++ smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
++ rdev = rcu_dereference(conf->disks[i].rdev);
++ if (!rdev) {
++ rdev = rrdev;
++ rrdev = NULL;
++ }
++ if (rw & WRITE) {
++ if (replace_only)
++ rdev = NULL;
++ if (rdev == rrdev)
++ /* We raced and saw duplicates */
++ rrdev = NULL;
++ } else {
++ if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev)
++ rdev = rrdev;
++ rrdev = NULL;
++ }
++
++ if (rdev && test_bit(Faulty, &rdev->flags))
++ rdev = NULL;
++ if (rdev)
++ atomic_inc(&rdev->nr_pending);
++ if (rrdev && test_bit(Faulty, &rrdev->flags))
++ rrdev = NULL;
++ if (rrdev)
++ atomic_inc(&rrdev->nr_pending);
++ rcu_read_unlock();
++
++ /* We have already checked bad blocks for reads. Now
++ * need to check for writes. We never accept write errors
++ * on the replacement, so we don't to check rrdev.
++ */
++ while ((rw & WRITE) && rdev &&
++ test_bit(WriteErrorSeen, &rdev->flags)) {
++ sector_t first_bad;
++ int bad_sectors;
++ int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
++ &first_bad, &bad_sectors);
++ if (!bad)
++ break;
++
++ if (bad < 0) {
++ set_bit(BlockedBadBlocks, &rdev->flags);
++ if (!conf->mddev->external &&
++ conf->mddev->flags) {
++ /* It is very unlikely, but we might
++ * still need to write out the
++ * bad block log - better give it
++ * a chance*/
++ md_check_recovery(conf->mddev);
++ }
++ /*
++ * Because md_wait_for_blocked_rdev
++ * will dec nr_pending, we must
++ * increment it first.
++ */
++ atomic_inc(&rdev->nr_pending);
++ md_wait_for_blocked_rdev(rdev, conf->mddev);
++ } else {
++ /* Acknowledged bad block - skip the write */
++ rdev_dec_pending(rdev, conf->mddev);
++ rdev = NULL;
++ }
++ }
++
++ if (rdev) {
++ if (s->syncing || s->expanding || s->expanded
++ || s->replacing)
++ md_sync_acct(rdev->bdev, STRIPE_SECTORS);
++
++ set_bit(STRIPE_IO_STARTED, &sh->state);
++
++ bio_reset(bi);
++ bi->bi_bdev = rdev->bdev;
++ bi->bi_rw = rw;
++ bi->bi_end_io = (rw & WRITE)
++ ? raid5_end_write_request
++ : raid5_end_read_request;
++ bi->bi_private = sh;
++
++ pr_debug("%s: for %llu schedule op %ld on disc %d\n",
++ __func__, (unsigned long long)sh->sector,
++ bi->bi_rw, i);
++ atomic_inc(&sh->count);
++ if (sh != head_sh)
++ atomic_inc(&head_sh->count);
++ if (use_new_offset(conf, sh))
++ bi->bi_iter.bi_sector = (sh->sector
++ + rdev->new_data_offset);
++ else
++ bi->bi_iter.bi_sector = (sh->sector
++ + rdev->data_offset);
++ if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
++ bi->bi_rw |= REQ_NOMERGE;
++
++ if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
++ WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
++ sh->dev[i].vec.bv_page = sh->dev[i].page;
++ bi->bi_vcnt = 1;
++ bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
++ bi->bi_io_vec[0].bv_offset = 0;
++ bi->bi_iter.bi_size = STRIPE_SIZE;
++ /*
++ * If this is discard request, set bi_vcnt 0. We don't
++ * want to confuse SCSI because SCSI will replace payload
++ */
++ if (rw & REQ_DISCARD)
++ bi->bi_vcnt = 0;
++ if (rrdev)
++ set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
++
++ if (conf->mddev->gendisk)
++ trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
++ bi, disk_devt(conf->mddev->gendisk),
++ sh->dev[i].sector);
++ generic_make_request(bi);
++ }
++ if (rrdev) {
++ if (s->syncing || s->expanding || s->expanded
++ || s->replacing)
++ md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
++
++ set_bit(STRIPE_IO_STARTED, &sh->state);
++
++ bio_reset(rbi);
++ rbi->bi_bdev = rrdev->bdev;
++ rbi->bi_rw = rw;
++ BUG_ON(!(rw & WRITE));
++ rbi->bi_end_io = raid5_end_write_request;
++ rbi->bi_private = sh;
++
++ pr_debug("%s: for %llu schedule op %ld on "
++ "replacement disc %d\n",
++ __func__, (unsigned long long)sh->sector,
++ rbi->bi_rw, i);
++ atomic_inc(&sh->count);
++ if (sh != head_sh)
++ atomic_inc(&head_sh->count);
++ if (use_new_offset(conf, sh))
++ rbi->bi_iter.bi_sector = (sh->sector
++ + rrdev->new_data_offset);
++ else
++ rbi->bi_iter.bi_sector = (sh->sector
++ + rrdev->data_offset);
++ if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
++ WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
++ sh->dev[i].rvec.bv_page = sh->dev[i].page;
++ rbi->bi_vcnt = 1;
++ rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
++ rbi->bi_io_vec[0].bv_offset = 0;
++ rbi->bi_iter.bi_size = STRIPE_SIZE;
++ /*
++ * If this is discard request, set bi_vcnt 0. We don't
++ * want to confuse SCSI because SCSI will replace payload
++ */
++ if (rw & REQ_DISCARD)
++ rbi->bi_vcnt = 0;
++ if (conf->mddev->gendisk)
++ trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
++ rbi, disk_devt(conf->mddev->gendisk),
++ sh->dev[i].sector);
++ generic_make_request(rbi);
++ }
++ if (!rdev && !rrdev) {
++ if (rw & WRITE)
++ set_bit(STRIPE_DEGRADED, &sh->state);
++ pr_debug("skip op %ld on disc %d for sector %llu\n",
++ bi->bi_rw, i, (unsigned long long)sh->sector);
++ clear_bit(R5_LOCKED, &sh->dev[i].flags);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ }
++
++ if (!head_sh->batch_head)
++ continue;
++ sh = list_first_entry(&sh->batch_list, struct stripe_head,
++ batch_list);
++ if (sh != head_sh)
++ goto again;
++ }
++}
++
++static struct dma_async_tx_descriptor *
++async_copy_data(int frombio, struct bio *bio, struct page **page,
++ sector_t sector, struct dma_async_tx_descriptor *tx,
++ struct stripe_head *sh)
++{
++ struct bio_vec bvl;
++ struct bvec_iter iter;
++ struct page *bio_page;
++ int page_offset;
++ struct async_submit_ctl submit;
++ enum async_tx_flags flags = 0;
++
++ if (bio->bi_iter.bi_sector >= sector)
++ page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
++ else
++ page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
++
++ if (frombio)
++ flags |= ASYNC_TX_FENCE;
++ init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
++
++ bio_for_each_segment(bvl, bio, iter) {
++ int len = bvl.bv_len;
++ int clen;
++ int b_offset = 0;
++
++ if (page_offset < 0) {
++ b_offset = -page_offset;
++ page_offset += b_offset;
++ len -= b_offset;
++ }
++
++ if (len > 0 && page_offset + len > STRIPE_SIZE)
++ clen = STRIPE_SIZE - page_offset;
++ else
++ clen = len;
++
++ if (clen > 0) {
++ b_offset += bvl.bv_offset;
++ bio_page = bvl.bv_page;
++ if (frombio) {
++ if (sh->raid_conf->skip_copy &&
++ b_offset == 0 && page_offset == 0 &&
++ clen == STRIPE_SIZE)
++ *page = bio_page;
++ else
++ tx = async_memcpy(*page, bio_page, page_offset,
++ b_offset, clen, &submit);
++ } else
++ tx = async_memcpy(bio_page, *page, b_offset,
++ page_offset, clen, &submit);
++ }
++ /* chain the operations */
++ submit.depend_tx = tx;
++
++ if (clen < len) /* hit end of page */
++ break;
++ page_offset += len;
++ }
++
++ return tx;
++}
++
++static void ops_complete_biofill(void *stripe_head_ref)
++{
++ struct stripe_head *sh = stripe_head_ref;
++ struct bio *return_bi = NULL;
++ int i;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ /* clear completed biofills */
++ for (i = sh->disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++
++ /* acknowledge completion of a biofill operation */
++ /* and check if we need to reply to a read request,
++ * new R5_Wantfill requests are held off until
++ * !STRIPE_BIOFILL_RUN
++ */
++ if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
++ struct bio *rbi, *rbi2;
++
++ BUG_ON(!dev->read);
++ rbi = dev->read;
++ dev->read = NULL;
++ while (rbi && rbi->bi_iter.bi_sector <
++ dev->sector + STRIPE_SECTORS) {
++ rbi2 = r5_next_bio(rbi, dev->sector);
++ if (!raid5_dec_bi_active_stripes(rbi)) {
++ rbi->bi_next = return_bi;
++ return_bi = rbi;
++ }
++ rbi = rbi2;
++ }
++ }
++ }
++ clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
++
++ return_io(return_bi);
++
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++}
++
++static void ops_run_biofill(struct stripe_head *sh)
++{
++ struct dma_async_tx_descriptor *tx = NULL;
++ struct async_submit_ctl submit;
++ int i;
++
++ BUG_ON(sh->batch_head);
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ for (i = sh->disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (test_bit(R5_Wantfill, &dev->flags)) {
++ struct bio *rbi;
++ spin_lock_irq(&sh->stripe_lock);
++ dev->read = rbi = dev->toread;
++ dev->toread = NULL;
++ spin_unlock_irq(&sh->stripe_lock);
++ while (rbi && rbi->bi_iter.bi_sector <
++ dev->sector + STRIPE_SECTORS) {
++ tx = async_copy_data(0, rbi, &dev->page,
++ dev->sector, tx, sh);
++ rbi = r5_next_bio(rbi, dev->sector);
++ }
++ }
++ }
++
++ atomic_inc(&sh->count);
++ init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
++ async_trigger_callback(&submit);
++}
++
++static void mark_target_uptodate(struct stripe_head *sh, int target)
++{
++ struct r5dev *tgt;
++
++ if (target < 0)
++ return;
++
++ tgt = &sh->dev[target];
++ set_bit(R5_UPTODATE, &tgt->flags);
++ BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
++ clear_bit(R5_Wantcompute, &tgt->flags);
++}
++
++static void ops_complete_compute(void *stripe_head_ref)
++{
++ struct stripe_head *sh = stripe_head_ref;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ /* mark the computed target(s) as uptodate */
++ mark_target_uptodate(sh, sh->ops.target);
++ mark_target_uptodate(sh, sh->ops.target2);
++
++ clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
++ if (sh->check_state == check_state_compute_run)
++ sh->check_state = check_state_compute_result;
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++}
++
++/* return a pointer to the address conversion region of the scribble buffer */
++static addr_conv_t *to_addr_conv(struct stripe_head *sh,
++ struct raid5_percpu *percpu, int i)
++{
++ void *addr;
++
++ addr = flex_array_get(percpu->scribble, i);
++ return addr + sizeof(struct page *) * (sh->disks + 2);
++}
++
++/* return a pointer to the address conversion region of the scribble buffer */
++static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
++{
++ void *addr;
++
++ addr = flex_array_get(percpu->scribble, i);
++ return addr;
++}
++
++static struct dma_async_tx_descriptor *
++ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
++{
++ int disks = sh->disks;
++ struct page **xor_srcs = to_addr_page(percpu, 0);
++ int target = sh->ops.target;
++ struct r5dev *tgt = &sh->dev[target];
++ struct page *xor_dest = tgt->page;
++ int count = 0;
++ struct dma_async_tx_descriptor *tx;
++ struct async_submit_ctl submit;
++ int i;
++
++ BUG_ON(sh->batch_head);
++
++ pr_debug("%s: stripe %llu block: %d\n",
++ __func__, (unsigned long long)sh->sector, target);
++ BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
++
++ for (i = disks; i--; )
++ if (i != target)
++ xor_srcs[count++] = sh->dev[i].page;
++
++ atomic_inc(&sh->count);
++
++ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
++ ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
++ if (unlikely(count == 1))
++ tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
++ else
++ tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
++
++ return tx;
++}
++
++/* set_syndrome_sources - populate source buffers for gen_syndrome
++ * @srcs - (struct page *) array of size sh->disks
++ * @sh - stripe_head to parse
++ *
++ * Populates srcs in proper layout order for the stripe and returns the
++ * 'count' of sources to be used in a call to async_gen_syndrome. The P
++ * destination buffer is recorded in srcs[count] and the Q destination
++ * is recorded in srcs[count+1]].
++ */
++static int set_syndrome_sources(struct page **srcs,
++ struct stripe_head *sh,
++ int srctype)
++{
++ int disks = sh->disks;
++ int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
++ int d0_idx = raid6_d0(sh);
++ int count;
++ int i;
++
++ for (i = 0; i < disks; i++)
++ srcs[i] = NULL;
++
++ count = 0;
++ i = d0_idx;
++ do {
++ int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
++ struct r5dev *dev = &sh->dev[i];
++
++ if (i == sh->qd_idx || i == sh->pd_idx ||
++ (srctype == SYNDROME_SRC_ALL) ||
++ (srctype == SYNDROME_SRC_WANT_DRAIN &&
++ test_bit(R5_Wantdrain, &dev->flags)) ||
++ (srctype == SYNDROME_SRC_WRITTEN &&
++ dev->written))
++ srcs[slot] = sh->dev[i].page;
++ i = raid6_next_disk(i, disks);
++ } while (i != d0_idx);
++
++ return syndrome_disks;
++}
++
++static struct dma_async_tx_descriptor *
++ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
++{
++ int disks = sh->disks;
++ struct page **blocks = to_addr_page(percpu, 0);
++ int target;
++ int qd_idx = sh->qd_idx;
++ struct dma_async_tx_descriptor *tx;
++ struct async_submit_ctl submit;
++ struct r5dev *tgt;
++ struct page *dest;
++ int i;
++ int count;
++
++ BUG_ON(sh->batch_head);
++ if (sh->ops.target < 0)
++ target = sh->ops.target2;
++ else if (sh->ops.target2 < 0)
++ target = sh->ops.target;
++ else
++ /* we should only have one valid target */
++ BUG();
++ BUG_ON(target < 0);
++ pr_debug("%s: stripe %llu block: %d\n",
++ __func__, (unsigned long long)sh->sector, target);
++
++ tgt = &sh->dev[target];
++ BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
++ dest = tgt->page;
++
++ atomic_inc(&sh->count);
++
++ if (target == qd_idx) {
++ count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
++ blocks[count] = NULL; /* regenerating p is not necessary */
++ BUG_ON(blocks[count+1] != dest); /* q should already be set */
++ init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
++ ops_complete_compute, sh,
++ to_addr_conv(sh, percpu, 0));
++ tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
++ } else {
++ /* Compute any data- or p-drive using XOR */
++ count = 0;
++ for (i = disks; i-- ; ) {
++ if (i == target || i == qd_idx)
++ continue;
++ blocks[count++] = sh->dev[i].page;
++ }
++
++ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
++ NULL, ops_complete_compute, sh,
++ to_addr_conv(sh, percpu, 0));
++ tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
++ }
++
++ return tx;
++}
++
++static struct dma_async_tx_descriptor *
++ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
++{
++ int i, count, disks = sh->disks;
++ int syndrome_disks = sh->ddf_layout ? disks : disks-2;
++ int d0_idx = raid6_d0(sh);
++ int faila = -1, failb = -1;
++ int target = sh->ops.target;
++ int target2 = sh->ops.target2;
++ struct r5dev *tgt = &sh->dev[target];
++ struct r5dev *tgt2 = &sh->dev[target2];
++ struct dma_async_tx_descriptor *tx;
++ struct page **blocks = to_addr_page(percpu, 0);
++ struct async_submit_ctl submit;
++
++ BUG_ON(sh->batch_head);
++ pr_debug("%s: stripe %llu block1: %d block2: %d\n",
++ __func__, (unsigned long long)sh->sector, target, target2);
++ BUG_ON(target < 0 || target2 < 0);
++ BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
++ BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
++
++ /* we need to open-code set_syndrome_sources to handle the
++ * slot number conversion for 'faila' and 'failb'
++ */
++ for (i = 0; i < disks ; i++)
++ blocks[i] = NULL;
++ count = 0;
++ i = d0_idx;
++ do {
++ int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
++
++ blocks[slot] = sh->dev[i].page;
++
++ if (i == target)
++ faila = slot;
++ if (i == target2)
++ failb = slot;
++ i = raid6_next_disk(i, disks);
++ } while (i != d0_idx);
++
++ BUG_ON(faila == failb);
++ if (failb < faila)
++ swap(faila, failb);
++ pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
++ __func__, (unsigned long long)sh->sector, faila, failb);
++
++ atomic_inc(&sh->count);
++
++ if (failb == syndrome_disks+1) {
++ /* Q disk is one of the missing disks */
++ if (faila == syndrome_disks) {
++ /* Missing P+Q, just recompute */
++ init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
++ ops_complete_compute, sh,
++ to_addr_conv(sh, percpu, 0));
++ return async_gen_syndrome(blocks, 0, syndrome_disks+2,
++ STRIPE_SIZE, &submit);
++ } else {
++ struct page *dest;
++ int data_target;
++ int qd_idx = sh->qd_idx;
++
++ /* Missing D+Q: recompute D from P, then recompute Q */
++ if (target == qd_idx)
++ data_target = target2;
++ else
++ data_target = target;
++
++ count = 0;
++ for (i = disks; i-- ; ) {
++ if (i == data_target || i == qd_idx)
++ continue;
++ blocks[count++] = sh->dev[i].page;
++ }
++ dest = sh->dev[data_target].page;
++ init_async_submit(&submit,
++ ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
++ NULL, NULL, NULL,
++ to_addr_conv(sh, percpu, 0));
++ tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
++ &submit);
++
++ count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
++ init_async_submit(&submit, ASYNC_TX_FENCE, tx,
++ ops_complete_compute, sh,
++ to_addr_conv(sh, percpu, 0));
++ return async_gen_syndrome(blocks, 0, count+2,
++ STRIPE_SIZE, &submit);
++ }
++ } else {
++ init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
++ ops_complete_compute, sh,
++ to_addr_conv(sh, percpu, 0));
++ if (failb == syndrome_disks) {
++ /* We're missing D+P. */
++ return async_raid6_datap_recov(syndrome_disks+2,
++ STRIPE_SIZE, faila,
++ blocks, &submit);
++ } else {
++ /* We're missing D+D. */
++ return async_raid6_2data_recov(syndrome_disks+2,
++ STRIPE_SIZE, faila, failb,
++ blocks, &submit);
++ }
++ }
++}
++
++static void ops_complete_prexor(void *stripe_head_ref)
++{
++ struct stripe_head *sh = stripe_head_ref;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++}
++
++static struct dma_async_tx_descriptor *
++ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
++ struct dma_async_tx_descriptor *tx)
++{
++ int disks = sh->disks;
++ struct page **xor_srcs = to_addr_page(percpu, 0);
++ int count = 0, pd_idx = sh->pd_idx, i;
++ struct async_submit_ctl submit;
++
++ /* existing parity data subtracted */
++ struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
++
++ BUG_ON(sh->batch_head);
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ /* Only process blocks that are known to be uptodate */
++ if (test_bit(R5_Wantdrain, &dev->flags))
++ xor_srcs[count++] = dev->page;
++ }
++
++ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
++ ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
++ tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
++
++ return tx;
++}
++
++static struct dma_async_tx_descriptor *
++ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
++ struct dma_async_tx_descriptor *tx)
++{
++ struct page **blocks = to_addr_page(percpu, 0);
++ int count;
++ struct async_submit_ctl submit;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN);
++
++ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
++ ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
++ tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
++
++ return tx;
++}
++
++static struct dma_async_tx_descriptor *
++ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
++{
++ int disks = sh->disks;
++ int i;
++ struct stripe_head *head_sh = sh;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ for (i = disks; i--; ) {
++ struct r5dev *dev;
++ struct bio *chosen;
++
++ sh = head_sh;
++ if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) {
++ struct bio *wbi;
++
++again:
++ dev = &sh->dev[i];
++ spin_lock_irq(&sh->stripe_lock);
++ chosen = dev->towrite;
++ dev->towrite = NULL;
++ sh->overwrite_disks = 0;
++ BUG_ON(dev->written);
++ wbi = dev->written = chosen;
++ spin_unlock_irq(&sh->stripe_lock);
++ WARN_ON(dev->page != dev->orig_page);
++
++ while (wbi && wbi->bi_iter.bi_sector <
++ dev->sector + STRIPE_SECTORS) {
++ if (wbi->bi_rw & REQ_FUA)
++ set_bit(R5_WantFUA, &dev->flags);
++ if (wbi->bi_rw & REQ_SYNC)
++ set_bit(R5_SyncIO, &dev->flags);
++ if (wbi->bi_rw & REQ_DISCARD)
++ set_bit(R5_Discard, &dev->flags);
++ else {
++ tx = async_copy_data(1, wbi, &dev->page,
++ dev->sector, tx, sh);
++ if (dev->page != dev->orig_page) {
++ set_bit(R5_SkipCopy, &dev->flags);
++ clear_bit(R5_UPTODATE, &dev->flags);
++ clear_bit(R5_OVERWRITE, &dev->flags);
++ }
++ }
++ wbi = r5_next_bio(wbi, dev->sector);
++ }
++
++ if (head_sh->batch_head) {
++ sh = list_first_entry(&sh->batch_list,
++ struct stripe_head,
++ batch_list);
++ if (sh == head_sh)
++ continue;
++ goto again;
++ }
++ }
++ }
++
++ return tx;
++}
++
++static void ops_complete_reconstruct(void *stripe_head_ref)
++{
++ struct stripe_head *sh = stripe_head_ref;
++ int disks = sh->disks;
++ int pd_idx = sh->pd_idx;
++ int qd_idx = sh->qd_idx;
++ int i;
++ bool fua = false, sync = false, discard = false;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ for (i = disks; i--; ) {
++ fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
++ sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
++ discard |= test_bit(R5_Discard, &sh->dev[i].flags);
++ }
++
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++
++ if (dev->written || i == pd_idx || i == qd_idx) {
++ if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
++ set_bit(R5_UPTODATE, &dev->flags);
++ if (fua)
++ set_bit(R5_WantFUA, &dev->flags);
++ if (sync)
++ set_bit(R5_SyncIO, &dev->flags);
++ }
++ }
++
++ if (sh->reconstruct_state == reconstruct_state_drain_run)
++ sh->reconstruct_state = reconstruct_state_drain_result;
++ else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
++ sh->reconstruct_state = reconstruct_state_prexor_drain_result;
++ else {
++ BUG_ON(sh->reconstruct_state != reconstruct_state_run);
++ sh->reconstruct_state = reconstruct_state_result;
++ }
++
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++}
++
++static void
++ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
++ struct dma_async_tx_descriptor *tx)
++{
++ int disks = sh->disks;
++ struct page **xor_srcs;
++ struct async_submit_ctl submit;
++ int count, pd_idx = sh->pd_idx, i;
++ struct page *xor_dest;
++ int prexor = 0;
++ unsigned long flags;
++ int j = 0;
++ struct stripe_head *head_sh = sh;
++ int last_stripe;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ for (i = 0; i < sh->disks; i++) {
++ if (pd_idx == i)
++ continue;
++ if (!test_bit(R5_Discard, &sh->dev[i].flags))
++ break;
++ }
++ if (i >= sh->disks) {
++ atomic_inc(&sh->count);
++ set_bit(R5_Discard, &sh->dev[pd_idx].flags);
++ ops_complete_reconstruct(sh);
++ return;
++ }
++again:
++ count = 0;
++ xor_srcs = to_addr_page(percpu, j);
++ /* check if prexor is active which means only process blocks
++ * that are part of a read-modify-write (written)
++ */
++ if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
++ prexor = 1;
++ xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (head_sh->dev[i].written)
++ xor_srcs[count++] = dev->page;
++ }
++ } else {
++ xor_dest = sh->dev[pd_idx].page;
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (i != pd_idx)
++ xor_srcs[count++] = dev->page;
++ }
++ }
++
++ /* 1/ if we prexor'd then the dest is reused as a source
++ * 2/ if we did not prexor then we are redoing the parity
++ * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
++ * for the synchronous xor case
++ */
++ last_stripe = !head_sh->batch_head ||
++ list_first_entry(&sh->batch_list,
++ struct stripe_head, batch_list) == head_sh;
++ if (last_stripe) {
++ flags = ASYNC_TX_ACK |
++ (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
++
++ atomic_inc(&head_sh->count);
++ init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh,
++ to_addr_conv(sh, percpu, j));
++ } else {
++ flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST;
++ init_async_submit(&submit, flags, tx, NULL, NULL,
++ to_addr_conv(sh, percpu, j));
++ }
++
++ if (unlikely(count == 1))
++ tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
++ else
++ tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
++ if (!last_stripe) {
++ j++;
++ sh = list_first_entry(&sh->batch_list, struct stripe_head,
++ batch_list);
++ goto again;
++ }
++}
++
++static void
++ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
++ struct dma_async_tx_descriptor *tx)
++{
++ struct async_submit_ctl submit;
++ struct page **blocks;
++ int count, i, j = 0;
++ struct stripe_head *head_sh = sh;
++ int last_stripe;
++ int synflags;
++ unsigned long txflags;
++
++ pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
++
++ for (i = 0; i < sh->disks; i++) {
++ if (sh->pd_idx == i || sh->qd_idx == i)
++ continue;
++ if (!test_bit(R5_Discard, &sh->dev[i].flags))
++ break;
++ }
++ if (i >= sh->disks) {
++ atomic_inc(&sh->count);
++ set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
++ set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
++ ops_complete_reconstruct(sh);
++ return;
++ }
++
++again:
++ blocks = to_addr_page(percpu, j);
++
++ if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
++ synflags = SYNDROME_SRC_WRITTEN;
++ txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST;
++ } else {
++ synflags = SYNDROME_SRC_ALL;
++ txflags = ASYNC_TX_ACK;
++ }
++
++ count = set_syndrome_sources(blocks, sh, synflags);
++ last_stripe = !head_sh->batch_head ||
++ list_first_entry(&sh->batch_list,
++ struct stripe_head, batch_list) == head_sh;
++
++ if (last_stripe) {
++ atomic_inc(&head_sh->count);
++ init_async_submit(&submit, txflags, tx, ops_complete_reconstruct,
++ head_sh, to_addr_conv(sh, percpu, j));
++ } else
++ init_async_submit(&submit, 0, tx, NULL, NULL,
++ to_addr_conv(sh, percpu, j));
++ tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
++ if (!last_stripe) {
++ j++;
++ sh = list_first_entry(&sh->batch_list, struct stripe_head,
++ batch_list);
++ goto again;
++ }
++}
++
++static void ops_complete_check(void *stripe_head_ref)
++{
++ struct stripe_head *sh = stripe_head_ref;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ sh->check_state = check_state_check_result;
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++}
++
++static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
++{
++ int disks = sh->disks;
++ int pd_idx = sh->pd_idx;
++ int qd_idx = sh->qd_idx;
++ struct page *xor_dest;
++ struct page **xor_srcs = to_addr_page(percpu, 0);
++ struct dma_async_tx_descriptor *tx;
++ struct async_submit_ctl submit;
++ int count;
++ int i;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ BUG_ON(sh->batch_head);
++ count = 0;
++ xor_dest = sh->dev[pd_idx].page;
++ xor_srcs[count++] = xor_dest;
++ for (i = disks; i--; ) {
++ if (i == pd_idx || i == qd_idx)
++ continue;
++ xor_srcs[count++] = sh->dev[i].page;
++ }
++
++ init_async_submit(&submit, 0, NULL, NULL, NULL,
++ to_addr_conv(sh, percpu, 0));
++ tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
++ &sh->ops.zero_sum_result, &submit);
++
++ atomic_inc(&sh->count);
++ init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
++ tx = async_trigger_callback(&submit);
++}
++
++static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
++{
++ struct page **srcs = to_addr_page(percpu, 0);
++ struct async_submit_ctl submit;
++ int count;
++
++ pr_debug("%s: stripe %llu checkp: %d\n", __func__,
++ (unsigned long long)sh->sector, checkp);
++
++ BUG_ON(sh->batch_head);
++ count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL);
++ if (!checkp)
++ srcs[count] = NULL;
++
++ atomic_inc(&sh->count);
++ init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
++ sh, to_addr_conv(sh, percpu, 0));
++ async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
++ &sh->ops.zero_sum_result, percpu->spare_page, &submit);
++}
++
++static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
++{
++ int overlap_clear = 0, i, disks = sh->disks;
++ struct dma_async_tx_descriptor *tx = NULL;
++ struct r5conf *conf = sh->raid_conf;
++ int level = conf->level;
++ struct raid5_percpu *percpu;
++ unsigned long cpu;
++
++ cpu = get_cpu();
++ percpu = per_cpu_ptr(conf->percpu, cpu);
++ if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
++ ops_run_biofill(sh);
++ overlap_clear++;
++ }
++
++ if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
++ if (level < 6)
++ tx = ops_run_compute5(sh, percpu);
++ else {
++ if (sh->ops.target2 < 0 || sh->ops.target < 0)
++ tx = ops_run_compute6_1(sh, percpu);
++ else
++ tx = ops_run_compute6_2(sh, percpu);
++ }
++ /* terminate the chain if reconstruct is not set to be run */
++ if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
++ async_tx_ack(tx);
++ }
++
++ if (test_bit(STRIPE_OP_PREXOR, &ops_request)) {
++ if (level < 6)
++ tx = ops_run_prexor5(sh, percpu, tx);
++ else
++ tx = ops_run_prexor6(sh, percpu, tx);
++ }
++
++ if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
++ tx = ops_run_biodrain(sh, tx);
++ overlap_clear++;
++ }
++
++ if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
++ if (level < 6)
++ ops_run_reconstruct5(sh, percpu, tx);
++ else
++ ops_run_reconstruct6(sh, percpu, tx);
++ }
++
++ if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
++ if (sh->check_state == check_state_run)
++ ops_run_check_p(sh, percpu);
++ else if (sh->check_state == check_state_run_q)
++ ops_run_check_pq(sh, percpu, 0);
++ else if (sh->check_state == check_state_run_pq)
++ ops_run_check_pq(sh, percpu, 1);
++ else
++ BUG();
++ }
++
++ if (overlap_clear && !sh->batch_head)
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (test_and_clear_bit(R5_Overlap, &dev->flags))
++ wake_up(&sh->raid_conf->wait_for_overlap);
++ }
++ put_cpu();
++}
++
++static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
++{
++ struct stripe_head *sh;
++
++ sh = kmem_cache_zalloc(sc, gfp);
++ if (sh) {
++ spin_lock_init(&sh->stripe_lock);
++ spin_lock_init(&sh->batch_lock);
++ INIT_LIST_HEAD(&sh->batch_list);
++ INIT_LIST_HEAD(&sh->lru);
++ atomic_set(&sh->count, 1);
++ }
++ return sh;
++}
++static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
++{
++ struct stripe_head *sh;
++
++ sh = alloc_stripe(conf->slab_cache, gfp);
++ if (!sh)
++ return 0;
++
++ sh->raid_conf = conf;
++
++ if (grow_buffers(sh, gfp)) {
++ shrink_buffers(sh);
++ kmem_cache_free(conf->slab_cache, sh);
++ return 0;
++ }
++ sh->hash_lock_index =
++ conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
++ /* we just created an active stripe so... */
++ atomic_inc(&conf->active_stripes);
++
++ release_stripe(sh);
++ conf->max_nr_stripes++;
++ return 1;
++}
++
++static int grow_stripes(struct r5conf *conf, int num)
++{
++ struct kmem_cache *sc;
++ int devs = max(conf->raid_disks, conf->previous_raid_disks);
++
++ if (conf->mddev->gendisk)
++ sprintf(conf->cache_name[0],
++ "raid%d-%s", conf->level, mdname(conf->mddev));
++ else
++ sprintf(conf->cache_name[0],
++ "raid%d-%p", conf->level, conf->mddev);
++ sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
++
++ conf->active_name = 0;
++ sc = kmem_cache_create(conf->cache_name[conf->active_name],
++ sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
++ 0, 0, NULL);
++ if (!sc)
++ return 1;
++ conf->slab_cache = sc;
++ conf->pool_size = devs;
++ while (num--)
++ if (!grow_one_stripe(conf, GFP_KERNEL))
++ return 1;
++
++ return 0;
++}
++
++/**
++ * scribble_len - return the required size of the scribble region
++ * @num - total number of disks in the array
++ *
++ * The size must be enough to contain:
++ * 1/ a struct page pointer for each device in the array +2
++ * 2/ room to convert each entry in (1) to its corresponding dma
++ * (dma_map_page()) or page (page_address()) address.
++ *
++ * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
++ * calculate over all devices (not just the data blocks), using zeros in place
++ * of the P and Q blocks.
++ */
++static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags)
++{
++ struct flex_array *ret;
++ size_t len;
++
++ len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
++ ret = flex_array_alloc(len, cnt, flags);
++ if (!ret)
++ return NULL;
++ /* always prealloc all elements, so no locking is required */
++ if (flex_array_prealloc(ret, 0, cnt, flags)) {
++ flex_array_free(ret);
++ return NULL;
++ }
++ return ret;
++}
++
++static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
++{
++ unsigned long cpu;
++ int err = 0;
++
++ mddev_suspend(conf->mddev);
++ get_online_cpus();
++ for_each_present_cpu(cpu) {
++ struct raid5_percpu *percpu;
++ struct flex_array *scribble;
++
++ percpu = per_cpu_ptr(conf->percpu, cpu);
++ scribble = scribble_alloc(new_disks,
++ new_sectors / STRIPE_SECTORS,
++ GFP_NOIO);
++
++ if (scribble) {
++ flex_array_free(percpu->scribble);
++ percpu->scribble = scribble;
++ } else {
++ err = -ENOMEM;
++ break;
++ }
++ }
++ put_online_cpus();
++ mddev_resume(conf->mddev);
++ return err;
++}
++
++static int resize_stripes(struct r5conf *conf, int newsize)
++{
++ /* Make all the stripes able to hold 'newsize' devices.
++ * New slots in each stripe get 'page' set to a new page.
++ *
++ * This happens in stages:
++ * 1/ create a new kmem_cache and allocate the required number of
++ * stripe_heads.
++ * 2/ gather all the old stripe_heads and transfer the pages across
++ * to the new stripe_heads. This will have the side effect of
++ * freezing the array as once all stripe_heads have been collected,
++ * no IO will be possible. Old stripe heads are freed once their
++ * pages have been transferred over, and the old kmem_cache is
++ * freed when all stripes are done.
++ * 3/ reallocate conf->disks to be suitable bigger. If this fails,
++ * we simple return a failre status - no need to clean anything up.
++ * 4/ allocate new pages for the new slots in the new stripe_heads.
++ * If this fails, we don't bother trying the shrink the
++ * stripe_heads down again, we just leave them as they are.
++ * As each stripe_head is processed the new one is released into
++ * active service.
++ *
++ * Once step2 is started, we cannot afford to wait for a write,
++ * so we use GFP_NOIO allocations.
++ */
++ struct stripe_head *osh, *nsh;
++ LIST_HEAD(newstripes);
++ struct disk_info *ndisks;
++ int err;
++ struct kmem_cache *sc;
++ int i;
++ int hash, cnt;
++
++ if (newsize <= conf->pool_size)
++ return 0; /* never bother to shrink */
++
++ err = md_allow_write(conf->mddev);
++ if (err)
++ return err;
++
++ /* Step 1 */
++ sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
++ sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
++ 0, 0, NULL);
++ if (!sc)
++ return -ENOMEM;
++
++ /* Need to ensure auto-resizing doesn't interfere */
++ mutex_lock(&conf->cache_size_mutex);
++
++ for (i = conf->max_nr_stripes; i; i--) {
++ nsh = alloc_stripe(sc, GFP_KERNEL);
++ if (!nsh)
++ break;
++
++ nsh->raid_conf = conf;
++ list_add(&nsh->lru, &newstripes);
++ }
++ if (i) {
++ /* didn't get enough, give up */
++ while (!list_empty(&newstripes)) {
++ nsh = list_entry(newstripes.next, struct stripe_head, lru);
++ list_del(&nsh->lru);
++ kmem_cache_free(sc, nsh);
++ }
++ kmem_cache_destroy(sc);
++ mutex_unlock(&conf->cache_size_mutex);
++ return -ENOMEM;
++ }
++ /* Step 2 - Must use GFP_NOIO now.
++ * OK, we have enough stripes, start collecting inactive
++ * stripes and copying them over
++ */
++ hash = 0;
++ cnt = 0;
++ list_for_each_entry(nsh, &newstripes, lru) {
++ lock_device_hash_lock(conf, hash);
++ wait_event_cmd(conf->wait_for_stripe,
++ !list_empty(conf->inactive_list + hash),
++ unlock_device_hash_lock(conf, hash),
++ lock_device_hash_lock(conf, hash));
++ osh = get_free_stripe(conf, hash);
++ unlock_device_hash_lock(conf, hash);
++
++ for(i=0; i<conf->pool_size; i++) {
++ nsh->dev[i].page = osh->dev[i].page;
++ nsh->dev[i].orig_page = osh->dev[i].page;
++ }
++ nsh->hash_lock_index = hash;
++ kmem_cache_free(conf->slab_cache, osh);
++ cnt++;
++ if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
++ !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
++ hash++;
++ cnt = 0;
++ }
++ }
++ kmem_cache_destroy(conf->slab_cache);
++
++ /* Step 3.
++ * At this point, we are holding all the stripes so the array
++ * is completely stalled, so now is a good time to resize
++ * conf->disks and the scribble region
++ */
++ ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
++ if (ndisks) {
++ for (i=0; i<conf->raid_disks; i++)
++ ndisks[i] = conf->disks[i];
++ kfree(conf->disks);
++ conf->disks = ndisks;
++ } else
++ err = -ENOMEM;
++
++ mutex_unlock(&conf->cache_size_mutex);
++ /* Step 4, return new stripes to service */
++ while(!list_empty(&newstripes)) {
++ nsh = list_entry(newstripes.next, struct stripe_head, lru);
++ list_del_init(&nsh->lru);
++
++ for (i=conf->raid_disks; i < newsize; i++)
++ if (nsh->dev[i].page == NULL) {
++ struct page *p = alloc_page(GFP_NOIO);
++ nsh->dev[i].page = p;
++ nsh->dev[i].orig_page = p;
++ if (!p)
++ err = -ENOMEM;
++ }
++ release_stripe(nsh);
++ }
++ /* critical section pass, GFP_NOIO no longer needed */
++
++ conf->slab_cache = sc;
++ conf->active_name = 1-conf->active_name;
++ if (!err)
++ conf->pool_size = newsize;
++ return err;
++}
++
++static int drop_one_stripe(struct r5conf *conf)
++{
++ struct stripe_head *sh;
++ int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
++
++ spin_lock_irq(conf->hash_locks + hash);
++ sh = get_free_stripe(conf, hash);
++ spin_unlock_irq(conf->hash_locks + hash);
++ if (!sh)
++ return 0;
++ BUG_ON(atomic_read(&sh->count));
++ shrink_buffers(sh);
++ kmem_cache_free(conf->slab_cache, sh);
++ atomic_dec(&conf->active_stripes);
++ conf->max_nr_stripes--;
++ return 1;
++}
++
++static void shrink_stripes(struct r5conf *conf)
++{
++ while (conf->max_nr_stripes &&
++ drop_one_stripe(conf))
++ ;
++
++ if (conf->slab_cache)
++ kmem_cache_destroy(conf->slab_cache);
++ conf->slab_cache = NULL;
++}
++
++static void raid5_end_read_request(struct bio * bi, int error)
++{
++ struct stripe_head *sh = bi->bi_private;
++ struct r5conf *conf = sh->raid_conf;
++ int disks = sh->disks, i;
++ int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
++ char b[BDEVNAME_SIZE];
++ struct md_rdev *rdev = NULL;
++ sector_t s;
++
++ for (i=0 ; i<disks; i++)
++ if (bi == &sh->dev[i].req)
++ break;
++
++ pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
++ (unsigned long long)sh->sector, i, atomic_read(&sh->count),
++ uptodate);
++ if (i == disks) {
++ BUG();
++ return;
++ }
++ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
++ /* If replacement finished while this request was outstanding,
++ * 'replacement' might be NULL already.
++ * In that case it moved down to 'rdev'.
++ * rdev is not removed until all requests are finished.
++ */
++ rdev = conf->disks[i].replacement;
++ if (!rdev)
++ rdev = conf->disks[i].rdev;
++
++ if (use_new_offset(conf, sh))
++ s = sh->sector + rdev->new_data_offset;
++ else
++ s = sh->sector + rdev->data_offset;
++ if (uptodate) {
++ set_bit(R5_UPTODATE, &sh->dev[i].flags);
++ if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
++ /* Note that this cannot happen on a
++ * replacement device. We just fail those on
++ * any error
++ */
++ printk_ratelimited(
++ KERN_INFO
++ "md/raid:%s: read error corrected"
++ " (%lu sectors at %llu on %s)\n",
++ mdname(conf->mddev), STRIPE_SECTORS,
++ (unsigned long long)s,
++ bdevname(rdev->bdev, b));
++ atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
++ clear_bit(R5_ReadError, &sh->dev[i].flags);
++ clear_bit(R5_ReWrite, &sh->dev[i].flags);
++ } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
++ clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
++
++ if (atomic_read(&rdev->read_errors))
++ atomic_set(&rdev->read_errors, 0);
++ } else {
++ const char *bdn = bdevname(rdev->bdev, b);
++ int retry = 0;
++ int set_bad = 0;
++
++ clear_bit(R5_UPTODATE, &sh->dev[i].flags);
++ atomic_inc(&rdev->read_errors);
++ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
++ printk_ratelimited(
++ KERN_WARNING
++ "md/raid:%s: read error on replacement device "
++ "(sector %llu on %s).\n",
++ mdname(conf->mddev),
++ (unsigned long long)s,
++ bdn);
++ else if (conf->mddev->degraded >= conf->max_degraded) {
++ set_bad = 1;
++ printk_ratelimited(
++ KERN_WARNING
++ "md/raid:%s: read error not correctable "
++ "(sector %llu on %s).\n",
++ mdname(conf->mddev),
++ (unsigned long long)s,
++ bdn);
++ } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
++ /* Oh, no!!! */
++ set_bad = 1;
++ printk_ratelimited(
++ KERN_WARNING
++ "md/raid:%s: read error NOT corrected!! "
++ "(sector %llu on %s).\n",
++ mdname(conf->mddev),
++ (unsigned long long)s,
++ bdn);
++ } else if (atomic_read(&rdev->read_errors)
++ > conf->max_nr_stripes)
++ printk(KERN_WARNING
++ "md/raid:%s: Too many read errors, failing device %s.\n",
++ mdname(conf->mddev), bdn);
++ else
++ retry = 1;
++ if (set_bad && test_bit(In_sync, &rdev->flags)
++ && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
++ retry = 1;
++ if (retry)
++ if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
++ set_bit(R5_ReadError, &sh->dev[i].flags);
++ clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
++ } else
++ set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
++ else {
++ clear_bit(R5_ReadError, &sh->dev[i].flags);
++ clear_bit(R5_ReWrite, &sh->dev[i].flags);
++ if (!(set_bad
++ && test_bit(In_sync, &rdev->flags)
++ && rdev_set_badblocks(
++ rdev, sh->sector, STRIPE_SECTORS, 0)))
++ md_error(conf->mddev, rdev);
++ }
++ }
++ rdev_dec_pending(rdev, conf->mddev);
++ clear_bit(R5_LOCKED, &sh->dev[i].flags);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++}
++
++static void raid5_end_write_request(struct bio *bi, int error)
++{
++ struct stripe_head *sh = bi->bi_private;
++ struct r5conf *conf = sh->raid_conf;
++ int disks = sh->disks, i;
++ struct md_rdev *uninitialized_var(rdev);
++ int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
++ sector_t first_bad;
++ int bad_sectors;
++ int replacement = 0;
++
++ for (i = 0 ; i < disks; i++) {
++ if (bi == &sh->dev[i].req) {
++ rdev = conf->disks[i].rdev;
++ break;
++ }
++ if (bi == &sh->dev[i].rreq) {
++ rdev = conf->disks[i].replacement;
++ if (rdev)
++ replacement = 1;
++ else
++ /* rdev was removed and 'replacement'
++ * replaced it. rdev is not removed
++ * until all requests are finished.
++ */
++ rdev = conf->disks[i].rdev;
++ break;
++ }
++ }
++ pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
++ (unsigned long long)sh->sector, i, atomic_read(&sh->count),
++ uptodate);
++ if (i == disks) {
++ BUG();
++ return;
++ }
++
++ if (replacement) {
++ if (!uptodate)
++ md_error(conf->mddev, rdev);
++ else if (is_badblock(rdev, sh->sector,
++ STRIPE_SECTORS,
++ &first_bad, &bad_sectors))
++ set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
++ } else {
++ if (!uptodate) {
++ set_bit(STRIPE_DEGRADED, &sh->state);
++ set_bit(WriteErrorSeen, &rdev->flags);
++ set_bit(R5_WriteError, &sh->dev[i].flags);
++ if (!test_and_set_bit(WantReplacement, &rdev->flags))
++ set_bit(MD_RECOVERY_NEEDED,
++ &rdev->mddev->recovery);
++ } else if (is_badblock(rdev, sh->sector,
++ STRIPE_SECTORS,
++ &first_bad, &bad_sectors)) {
++ set_bit(R5_MadeGood, &sh->dev[i].flags);
++ if (test_bit(R5_ReadError, &sh->dev[i].flags))
++ /* That was a successful write so make
++ * sure it looks like we already did
++ * a re-write.
++ */
++ set_bit(R5_ReWrite, &sh->dev[i].flags);
++ }
++ }
++ rdev_dec_pending(rdev, conf->mddev);
++
++ if (sh->batch_head && !uptodate && !replacement)
++ set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
++
++ if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
++ clear_bit(R5_LOCKED, &sh->dev[i].flags);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++
++ if (sh->batch_head && sh != sh->batch_head)
++ release_stripe(sh->batch_head);
++}
++
++static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
++
++static void raid5_build_block(struct stripe_head *sh, int i, int previous)
++{
++ struct r5dev *dev = &sh->dev[i];
++
++ bio_init(&dev->req);
++ dev->req.bi_io_vec = &dev->vec;
++ dev->req.bi_max_vecs = 1;
++ dev->req.bi_private = sh;
++
++ bio_init(&dev->rreq);
++ dev->rreq.bi_io_vec = &dev->rvec;
++ dev->rreq.bi_max_vecs = 1;
++ dev->rreq.bi_private = sh;
++
++ dev->flags = 0;
++ dev->sector = compute_blocknr(sh, i, previous);
++}
++
++static void error(struct mddev *mddev, struct md_rdev *rdev)
++{
++ char b[BDEVNAME_SIZE];
++ struct r5conf *conf = mddev->private;
++ unsigned long flags;
++ pr_debug("raid456: error called\n");
++
++ spin_lock_irqsave(&conf->device_lock, flags);
++ clear_bit(In_sync, &rdev->flags);
++ mddev->degraded = calc_degraded(conf);
++ spin_unlock_irqrestore(&conf->device_lock, flags);
++ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
++
++ set_bit(Blocked, &rdev->flags);
++ set_bit(Faulty, &rdev->flags);
++ set_bit(MD_CHANGE_DEVS, &mddev->flags);
++ printk(KERN_ALERT
++ "md/raid:%s: Disk failure on %s, disabling device.\n"
++ "md/raid:%s: Operation continuing on %d devices.\n",
++ mdname(mddev),
++ bdevname(rdev->bdev, b),
++ mdname(mddev),
++ conf->raid_disks - mddev->degraded);
++}
++
++/*
++ * Input: a 'big' sector number,
++ * Output: index of the data and parity disk, and the sector # in them.
++ */
++static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
++ int previous, int *dd_idx,
++ struct stripe_head *sh)
++{
++ sector_t stripe, stripe2;
++ sector_t chunk_number;
++ unsigned int chunk_offset;
++ int pd_idx, qd_idx;
++ int ddf_layout = 0;
++ sector_t new_sector;
++ int algorithm = previous ? conf->prev_algo
++ : conf->algorithm;
++ int sectors_per_chunk = previous ? conf->prev_chunk_sectors
++ : conf->chunk_sectors;
++ int raid_disks = previous ? conf->previous_raid_disks
++ : conf->raid_disks;
++ int data_disks = raid_disks - conf->max_degraded;
++
++ /* First compute the information on this sector */
++
++ /*
++ * Compute the chunk number and the sector offset inside the chunk
++ */
++ chunk_offset = sector_div(r_sector, sectors_per_chunk);
++ chunk_number = r_sector;
++
++ /*
++ * Compute the stripe number
++ */
++ stripe = chunk_number;
++ *dd_idx = sector_div(stripe, data_disks);
++ stripe2 = stripe;
++ /*
++ * Select the parity disk based on the user selected algorithm.
++ */
++ pd_idx = qd_idx = -1;
++ switch(conf->level) {
++ case 4:
++ pd_idx = data_disks;
++ break;
++ case 5:
++ switch (algorithm) {
++ case ALGORITHM_LEFT_ASYMMETRIC:
++ pd_idx = data_disks - sector_div(stripe2, raid_disks);
++ if (*dd_idx >= pd_idx)
++ (*dd_idx)++;
++ break;
++ case ALGORITHM_RIGHT_ASYMMETRIC:
++ pd_idx = sector_div(stripe2, raid_disks);
++ if (*dd_idx >= pd_idx)
++ (*dd_idx)++;
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC:
++ pd_idx = data_disks - sector_div(stripe2, raid_disks);
++ *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
++ break;
++ case ALGORITHM_RIGHT_SYMMETRIC:
++ pd_idx = sector_div(stripe2, raid_disks);
++ *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
++ break;
++ case ALGORITHM_PARITY_0:
++ pd_idx = 0;
++ (*dd_idx)++;
++ break;
++ case ALGORITHM_PARITY_N:
++ pd_idx = data_disks;
++ break;
++ default:
++ BUG();
++ }
++ break;
++ case 6:
++
++ switch (algorithm) {
++ case ALGORITHM_LEFT_ASYMMETRIC:
++ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
++ qd_idx = pd_idx + 1;
++ if (pd_idx == raid_disks-1) {
++ (*dd_idx)++; /* Q D D D P */
++ qd_idx = 0;
++ } else if (*dd_idx >= pd_idx)
++ (*dd_idx) += 2; /* D D P Q D */
++ break;
++ case ALGORITHM_RIGHT_ASYMMETRIC:
++ pd_idx = sector_div(stripe2, raid_disks);
++ qd_idx = pd_idx + 1;
++ if (pd_idx == raid_disks-1) {
++ (*dd_idx)++; /* Q D D D P */
++ qd_idx = 0;
++ } else if (*dd_idx >= pd_idx)
++ (*dd_idx) += 2; /* D D P Q D */
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC:
++ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
++ qd_idx = (pd_idx + 1) % raid_disks;
++ *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
++ break;
++ case ALGORITHM_RIGHT_SYMMETRIC:
++ pd_idx = sector_div(stripe2, raid_disks);
++ qd_idx = (pd_idx + 1) % raid_disks;
++ *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
++ break;
++
++ case ALGORITHM_PARITY_0:
++ pd_idx = 0;
++ qd_idx = 1;
++ (*dd_idx) += 2;
++ break;
++ case ALGORITHM_PARITY_N:
++ pd_idx = data_disks;
++ qd_idx = data_disks + 1;
++ break;
++
++ case ALGORITHM_ROTATING_ZERO_RESTART:
++ /* Exactly the same as RIGHT_ASYMMETRIC, but or
++ * of blocks for computing Q is different.
++ */
++ pd_idx = sector_div(stripe2, raid_disks);
++ qd_idx = pd_idx + 1;
++ if (pd_idx == raid_disks-1) {
++ (*dd_idx)++; /* Q D D D P */
++ qd_idx = 0;
++ } else if (*dd_idx >= pd_idx)
++ (*dd_idx) += 2; /* D D P Q D */
++ ddf_layout = 1;
++ break;
++
++ case ALGORITHM_ROTATING_N_RESTART:
++ /* Same a left_asymmetric, by first stripe is
++ * D D D P Q rather than
++ * Q D D D P
++ */
++ stripe2 += 1;
++ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
++ qd_idx = pd_idx + 1;
++ if (pd_idx == raid_disks-1) {
++ (*dd_idx)++; /* Q D D D P */
++ qd_idx = 0;
++ } else if (*dd_idx >= pd_idx)
++ (*dd_idx) += 2; /* D D P Q D */
++ ddf_layout = 1;
++ break;
++
++ case ALGORITHM_ROTATING_N_CONTINUE:
++ /* Same as left_symmetric but Q is before P */
++ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
++ qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
++ *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
++ ddf_layout = 1;
++ break;
++
++ case ALGORITHM_LEFT_ASYMMETRIC_6:
++ /* RAID5 left_asymmetric, with Q on last device */
++ pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
++ if (*dd_idx >= pd_idx)
++ (*dd_idx)++;
++ qd_idx = raid_disks - 1;
++ break;
++
++ case ALGORITHM_RIGHT_ASYMMETRIC_6:
++ pd_idx = sector_div(stripe2, raid_disks-1);
++ if (*dd_idx >= pd_idx)
++ (*dd_idx)++;
++ qd_idx = raid_disks - 1;
++ break;
++
++ case ALGORITHM_LEFT_SYMMETRIC_6:
++ pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
++ *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
++ qd_idx = raid_disks - 1;
++ break;
++
++ case ALGORITHM_RIGHT_SYMMETRIC_6:
++ pd_idx = sector_div(stripe2, raid_disks-1);
++ *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
++ qd_idx = raid_disks - 1;
++ break;
++
++ case ALGORITHM_PARITY_0_6:
++ pd_idx = 0;
++ (*dd_idx)++;
++ qd_idx = raid_disks - 1;
++ break;
++
++ default:
++ BUG();
++ }
++ break;
++ }
++
++ if (sh) {
++ sh->pd_idx = pd_idx;
++ sh->qd_idx = qd_idx;
++ sh->ddf_layout = ddf_layout;
++ }
++ /*
++ * Finally, compute the new sector number
++ */
++ new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
++ return new_sector;
++}
++
++static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
++{
++ struct r5conf *conf = sh->raid_conf;
++ int raid_disks = sh->disks;
++ int data_disks = raid_disks - conf->max_degraded;
++ sector_t new_sector = sh->sector, check;
++ int sectors_per_chunk = previous ? conf->prev_chunk_sectors
++ : conf->chunk_sectors;
++ int algorithm = previous ? conf->prev_algo
++ : conf->algorithm;
++ sector_t stripe;
++ int chunk_offset;
++ sector_t chunk_number;
++ int dummy1, dd_idx = i;
++ sector_t r_sector;
++ struct stripe_head sh2;
++
++ chunk_offset = sector_div(new_sector, sectors_per_chunk);
++ stripe = new_sector;
++
++ if (i == sh->pd_idx)
++ return 0;
++ switch(conf->level) {
++ case 4: break;
++ case 5:
++ switch (algorithm) {
++ case ALGORITHM_LEFT_ASYMMETRIC:
++ case ALGORITHM_RIGHT_ASYMMETRIC:
++ if (i > sh->pd_idx)
++ i--;
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC:
++ case ALGORITHM_RIGHT_SYMMETRIC:
++ if (i < sh->pd_idx)
++ i += raid_disks;
++ i -= (sh->pd_idx + 1);
++ break;
++ case ALGORITHM_PARITY_0:
++ i -= 1;
++ break;
++ case ALGORITHM_PARITY_N:
++ break;
++ default:
++ BUG();
++ }
++ break;
++ case 6:
++ if (i == sh->qd_idx)
++ return 0; /* It is the Q disk */
++ switch (algorithm) {
++ case ALGORITHM_LEFT_ASYMMETRIC:
++ case ALGORITHM_RIGHT_ASYMMETRIC:
++ case ALGORITHM_ROTATING_ZERO_RESTART:
++ case ALGORITHM_ROTATING_N_RESTART:
++ if (sh->pd_idx == raid_disks-1)
++ i--; /* Q D D D P */
++ else if (i > sh->pd_idx)
++ i -= 2; /* D D P Q D */
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC:
++ case ALGORITHM_RIGHT_SYMMETRIC:
++ if (sh->pd_idx == raid_disks-1)
++ i--; /* Q D D D P */
++ else {
++ /* D D P Q D */
++ if (i < sh->pd_idx)
++ i += raid_disks;
++ i -= (sh->pd_idx + 2);
++ }
++ break;
++ case ALGORITHM_PARITY_0:
++ i -= 2;
++ break;
++ case ALGORITHM_PARITY_N:
++ break;
++ case ALGORITHM_ROTATING_N_CONTINUE:
++ /* Like left_symmetric, but P is before Q */
++ if (sh->pd_idx == 0)
++ i--; /* P D D D Q */
++ else {
++ /* D D Q P D */
++ if (i < sh->pd_idx)
++ i += raid_disks;
++ i -= (sh->pd_idx + 1);
++ }
++ break;
++ case ALGORITHM_LEFT_ASYMMETRIC_6:
++ case ALGORITHM_RIGHT_ASYMMETRIC_6:
++ if (i > sh->pd_idx)
++ i--;
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC_6:
++ case ALGORITHM_RIGHT_SYMMETRIC_6:
++ if (i < sh->pd_idx)
++ i += data_disks + 1;
++ i -= (sh->pd_idx + 1);
++ break;
++ case ALGORITHM_PARITY_0_6:
++ i -= 1;
++ break;
++ default:
++ BUG();
++ }
++ break;
++ }
++
++ chunk_number = stripe * data_disks + i;
++ r_sector = chunk_number * sectors_per_chunk + chunk_offset;
++
++ check = raid5_compute_sector(conf, r_sector,
++ previous, &dummy1, &sh2);
++ if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
++ || sh2.qd_idx != sh->qd_idx) {
++ printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
++ mdname(conf->mddev));
++ return 0;
++ }
++ return r_sector;
++}
++
++static void
++schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
++ int rcw, int expand)
++{
++ int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks;
++ struct r5conf *conf = sh->raid_conf;
++ int level = conf->level;
++
++ if (rcw) {
++
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++
++ if (dev->towrite) {
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantdrain, &dev->flags);
++ if (!expand)
++ clear_bit(R5_UPTODATE, &dev->flags);
++ s->locked++;
++ }
++ }
++ /* if we are not expanding this is a proper write request, and
++ * there will be bios with new data to be drained into the
++ * stripe cache
++ */
++ if (!expand) {
++ if (!s->locked)
++ /* False alarm, nothing to do */
++ return;
++ sh->reconstruct_state = reconstruct_state_drain_run;
++ set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
++ } else
++ sh->reconstruct_state = reconstruct_state_run;
++
++ set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
++
++ if (s->locked + conf->max_degraded == disks)
++ if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
++ atomic_inc(&conf->pending_full_writes);
++ } else {
++ BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
++ test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
++ BUG_ON(level == 6 &&
++ (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) ||
++ test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags))));
++
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (i == pd_idx || i == qd_idx)
++ continue;
++
++ if (dev->towrite &&
++ (test_bit(R5_UPTODATE, &dev->flags) ||
++ test_bit(R5_Wantcompute, &dev->flags))) {
++ set_bit(R5_Wantdrain, &dev->flags);
++ set_bit(R5_LOCKED, &dev->flags);
++ clear_bit(R5_UPTODATE, &dev->flags);
++ s->locked++;
++ }
++ }
++ if (!s->locked)
++ /* False alarm - nothing to do */
++ return;
++ sh->reconstruct_state = reconstruct_state_prexor_drain_run;
++ set_bit(STRIPE_OP_PREXOR, &s->ops_request);
++ set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
++ set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
++ }
++
++ /* keep the parity disk(s) locked while asynchronous operations
++ * are in flight
++ */
++ set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
++ clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
++ s->locked++;
++
++ if (level == 6) {
++ int qd_idx = sh->qd_idx;
++ struct r5dev *dev = &sh->dev[qd_idx];
++
++ set_bit(R5_LOCKED, &dev->flags);
++ clear_bit(R5_UPTODATE, &dev->flags);
++ s->locked++;
++ }
++
++ pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
++ __func__, (unsigned long long)sh->sector,
++ s->locked, s->ops_request);
++}
++
++/*
++ * Each stripe/dev can have one or more bion attached.
++ * toread/towrite point to the first in a chain.
++ * The bi_next chain must be in order.
++ */
++static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
++ int forwrite, int previous)
++{
++ struct bio **bip;
++ struct r5conf *conf = sh->raid_conf;
++ int firstwrite=0;
++
++ pr_debug("adding bi b#%llu to stripe s#%llu\n",
++ (unsigned long long)bi->bi_iter.bi_sector,
++ (unsigned long long)sh->sector);
++
++ /*
++ * If several bio share a stripe. The bio bi_phys_segments acts as a
++ * reference count to avoid race. The reference count should already be
++ * increased before this function is called (for example, in
++ * make_request()), so other bio sharing this stripe will not free the
++ * stripe. If a stripe is owned by one stripe, the stripe lock will
++ * protect it.
++ */
++ spin_lock_irq(&sh->stripe_lock);
++ /* Don't allow new IO added to stripes in batch list */
++ if (sh->batch_head)
++ goto overlap;
++ if (forwrite) {
++ bip = &sh->dev[dd_idx].towrite;
++ if (*bip == NULL)
++ firstwrite = 1;
++ } else
++ bip = &sh->dev[dd_idx].toread;
++ while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
++ if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
++ goto overlap;
++ bip = & (*bip)->bi_next;
++ }
++ if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
++ goto overlap;
++
++ if (!forwrite || previous)
++ clear_bit(STRIPE_BATCH_READY, &sh->state);
++
++ BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
++ if (*bip)
++ bi->bi_next = *bip;
++ *bip = bi;
++ raid5_inc_bi_active_stripes(bi);
++
++ if (forwrite) {
++ /* check if page is covered */
++ sector_t sector = sh->dev[dd_idx].sector;
++ for (bi=sh->dev[dd_idx].towrite;
++ sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
++ bi && bi->bi_iter.bi_sector <= sector;
++ bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
++ if (bio_end_sector(bi) >= sector)
++ sector = bio_end_sector(bi);
++ }
++ if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
++ if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags))
++ sh->overwrite_disks++;
++ }
++
++ pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
++ (unsigned long long)(*bip)->bi_iter.bi_sector,
++ (unsigned long long)sh->sector, dd_idx);
++
++ if (conf->mddev->bitmap && firstwrite) {
++ /* Cannot hold spinlock over bitmap_startwrite,
++ * but must ensure this isn't added to a batch until
++ * we have added to the bitmap and set bm_seq.
++ * So set STRIPE_BITMAP_PENDING to prevent
++ * batching.
++ * If multiple add_stripe_bio() calls race here they
++ * much all set STRIPE_BITMAP_PENDING. So only the first one
++ * to complete "bitmap_startwrite" gets to set
++ * STRIPE_BIT_DELAY. This is important as once a stripe
++ * is added to a batch, STRIPE_BIT_DELAY cannot be changed
++ * any more.
++ */
++ set_bit(STRIPE_BITMAP_PENDING, &sh->state);
++ spin_unlock_irq(&sh->stripe_lock);
++ bitmap_startwrite(conf->mddev->bitmap, sh->sector,
++ STRIPE_SECTORS, 0);
++ spin_lock_irq(&sh->stripe_lock);
++ clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
++ if (!sh->batch_head) {
++ sh->bm_seq = conf->seq_flush+1;
++ set_bit(STRIPE_BIT_DELAY, &sh->state);
++ }
++ }
++ spin_unlock_irq(&sh->stripe_lock);
++
++ if (stripe_can_batch(sh))
++ stripe_add_to_batch_list(conf, sh);
++ return 1;
++
++ overlap:
++ set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
++ spin_unlock_irq(&sh->stripe_lock);
++ return 0;
++}
++
++static void end_reshape(struct r5conf *conf);
++
++static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
++ struct stripe_head *sh)
++{
++ int sectors_per_chunk =
++ previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
++ int dd_idx;
++ int chunk_offset = sector_div(stripe, sectors_per_chunk);
++ int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
++
++ raid5_compute_sector(conf,
++ stripe * (disks - conf->max_degraded)
++ *sectors_per_chunk + chunk_offset,
++ previous,
++ &dd_idx, sh);
++}
++
++static void
++handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
++ struct stripe_head_state *s, int disks,
++ struct bio **return_bi)
++{
++ int i;
++ BUG_ON(sh->batch_head);
++ for (i = disks; i--; ) {
++ struct bio *bi;
++ int bitmap_end = 0;
++
++ if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
++ struct md_rdev *rdev;
++ rcu_read_lock();
++ rdev = rcu_dereference(conf->disks[i].rdev);
++ if (rdev && test_bit(In_sync, &rdev->flags))
++ atomic_inc(&rdev->nr_pending);
++ else
++ rdev = NULL;
++ rcu_read_unlock();
++ if (rdev) {
++ if (!rdev_set_badblocks(
++ rdev,
++ sh->sector,
++ STRIPE_SECTORS, 0))
++ md_error(conf->mddev, rdev);
++ rdev_dec_pending(rdev, conf->mddev);
++ }
++ }
++ spin_lock_irq(&sh->stripe_lock);
++ /* fail all writes first */
++ bi = sh->dev[i].towrite;
++ sh->dev[i].towrite = NULL;
++ sh->overwrite_disks = 0;
++ spin_unlock_irq(&sh->stripe_lock);
++ if (bi)
++ bitmap_end = 1;
++
++ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
++ wake_up(&conf->wait_for_overlap);
++
++ while (bi && bi->bi_iter.bi_sector <
++ sh->dev[i].sector + STRIPE_SECTORS) {
++ struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
++ clear_bit(BIO_UPTODATE, &bi->bi_flags);
++ if (!raid5_dec_bi_active_stripes(bi)) {
++ md_write_end(conf->mddev);
++ bi->bi_next = *return_bi;
++ *return_bi = bi;
++ }
++ bi = nextbi;
++ }
++ if (bitmap_end)
++ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
++ STRIPE_SECTORS, 0, 0);
++ bitmap_end = 0;
++ /* and fail all 'written' */
++ bi = sh->dev[i].written;
++ sh->dev[i].written = NULL;
++ if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) {
++ WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
++ sh->dev[i].page = sh->dev[i].orig_page;
++ }
++
++ if (bi) bitmap_end = 1;
++ while (bi && bi->bi_iter.bi_sector <
++ sh->dev[i].sector + STRIPE_SECTORS) {
++ struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
++ clear_bit(BIO_UPTODATE, &bi->bi_flags);
++ if (!raid5_dec_bi_active_stripes(bi)) {
++ md_write_end(conf->mddev);
++ bi->bi_next = *return_bi;
++ *return_bi = bi;
++ }
++ bi = bi2;
++ }
++
++ /* fail any reads if this device is non-operational and
++ * the data has not reached the cache yet.
++ */
++ if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
++ (!test_bit(R5_Insync, &sh->dev[i].flags) ||
++ test_bit(R5_ReadError, &sh->dev[i].flags))) {
++ spin_lock_irq(&sh->stripe_lock);
++ bi = sh->dev[i].toread;
++ sh->dev[i].toread = NULL;
++ spin_unlock_irq(&sh->stripe_lock);
++ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
++ wake_up(&conf->wait_for_overlap);
++ while (bi && bi->bi_iter.bi_sector <
++ sh->dev[i].sector + STRIPE_SECTORS) {
++ struct bio *nextbi =
++ r5_next_bio(bi, sh->dev[i].sector);
++ clear_bit(BIO_UPTODATE, &bi->bi_flags);
++ if (!raid5_dec_bi_active_stripes(bi)) {
++ bi->bi_next = *return_bi;
++ *return_bi = bi;
++ }
++ bi = nextbi;
++ }
++ }
++ if (bitmap_end)
++ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
++ STRIPE_SECTORS, 0, 0);
++ /* If we were in the middle of a write the parity block might
++ * still be locked - so just clear all R5_LOCKED flags
++ */
++ clear_bit(R5_LOCKED, &sh->dev[i].flags);
++ }
++
++ if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
++ if (atomic_dec_and_test(&conf->pending_full_writes))
++ md_wakeup_thread(conf->mddev->thread);
++}
++
++static void
++handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
++ struct stripe_head_state *s)
++{
++ int abort = 0;
++ int i;
++
++ BUG_ON(sh->batch_head);
++ clear_bit(STRIPE_SYNCING, &sh->state);
++ if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
++ wake_up(&conf->wait_for_overlap);
++ s->syncing = 0;
++ s->replacing = 0;
++ /* There is nothing more to do for sync/check/repair.
++ * Don't even need to abort as that is handled elsewhere
++ * if needed, and not always wanted e.g. if there is a known
++ * bad block here.
++ * For recover/replace we need to record a bad block on all
++ * non-sync devices, or abort the recovery
++ */
++ if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
++ /* During recovery devices cannot be removed, so
++ * locking and refcounting of rdevs is not needed
++ */
++ for (i = 0; i < conf->raid_disks; i++) {
++ struct md_rdev *rdev = conf->disks[i].rdev;
++ if (rdev
++ && !test_bit(Faulty, &rdev->flags)
++ && !test_bit(In_sync, &rdev->flags)
++ && !rdev_set_badblocks(rdev, sh->sector,
++ STRIPE_SECTORS, 0))
++ abort = 1;
++ rdev = conf->disks[i].replacement;
++ if (rdev
++ && !test_bit(Faulty, &rdev->flags)
++ && !test_bit(In_sync, &rdev->flags)
++ && !rdev_set_badblocks(rdev, sh->sector,
++ STRIPE_SECTORS, 0))
++ abort = 1;
++ }
++ if (abort)
++ conf->recovery_disabled =
++ conf->mddev->recovery_disabled;
++ }
++ md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
++}
++
++static int want_replace(struct stripe_head *sh, int disk_idx)
++{
++ struct md_rdev *rdev;
++ int rv = 0;
++ /* Doing recovery so rcu locking not required */
++ rdev = sh->raid_conf->disks[disk_idx].replacement;
++ if (rdev
++ && !test_bit(Faulty, &rdev->flags)
++ && !test_bit(In_sync, &rdev->flags)
++ && (rdev->recovery_offset <= sh->sector
++ || rdev->mddev->recovery_cp <= sh->sector))
++ rv = 1;
++
++ return rv;
++}
++
++/* fetch_block - checks the given member device to see if its data needs
++ * to be read or computed to satisfy a request.
++ *
++ * Returns 1 when no more member devices need to be checked, otherwise returns
++ * 0 to tell the loop in handle_stripe_fill to continue
++ */
++
++static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
++ int disk_idx, int disks)
++{
++ struct r5dev *dev = &sh->dev[disk_idx];
++ struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
++ &sh->dev[s->failed_num[1]] };
++ int i;
++
++
++ if (test_bit(R5_LOCKED, &dev->flags) ||
++ test_bit(R5_UPTODATE, &dev->flags))
++ /* No point reading this as we already have it or have
++ * decided to get it.
++ */
++ return 0;
++
++ if (dev->toread ||
++ (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)))
++ /* We need this block to directly satisfy a request */
++ return 1;
++
++ if (s->syncing || s->expanding ||
++ (s->replacing && want_replace(sh, disk_idx)))
++ /* When syncing, or expanding we read everything.
++ * When replacing, we need the replaced block.
++ */
++ return 1;
++
++ if ((s->failed >= 1 && fdev[0]->toread) ||
++ (s->failed >= 2 && fdev[1]->toread))
++ /* If we want to read from a failed device, then
++ * we need to actually read every other device.
++ */
++ return 1;
++
++ /* Sometimes neither read-modify-write nor reconstruct-write
++ * cycles can work. In those cases we read every block we
++ * can. Then the parity-update is certain to have enough to
++ * work with.
++ * This can only be a problem when we need to write something,
++ * and some device has failed. If either of those tests
++ * fail we need look no further.
++ */
++ if (!s->failed || !s->to_write)
++ return 0;
++
++ if (test_bit(R5_Insync, &dev->flags) &&
++ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ /* Pre-reads at not permitted until after short delay
++ * to gather multiple requests. However if this
++ * device is no Insync, the block could only be be computed
++ * and there is no need to delay that.
++ */
++ return 0;
++
++ for (i = 0; i < s->failed; i++) {
++ if (fdev[i]->towrite &&
++ !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
++ !test_bit(R5_OVERWRITE, &fdev[i]->flags))
++ /* If we have a partial write to a failed
++ * device, then we will need to reconstruct
++ * the content of that device, so all other
++ * devices must be read.
++ */
++ return 1;
++ }
++
++ /* If we are forced to do a reconstruct-write, either because
++ * the current RAID6 implementation only supports that, or
++ * or because parity cannot be trusted and we are currently
++ * recovering it, there is extra need to be careful.
++ * If one of the devices that we would need to read, because
++ * it is not being overwritten (and maybe not written at all)
++ * is missing/faulty, then we need to read everything we can.
++ */
++ if (sh->raid_conf->level != 6 &&
++ sh->sector < sh->raid_conf->mddev->recovery_cp)
++ /* reconstruct-write isn't being forced */
++ return 0;
++ for (i = 0; i < s->failed; i++) {
++ if (s->failed_num[i] != sh->pd_idx &&
++ s->failed_num[i] != sh->qd_idx &&
++ !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
++ !test_bit(R5_OVERWRITE, &fdev[i]->flags))
++ return 1;
++ }
++
++ return 0;
++}
++
++static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
++ int disk_idx, int disks)
++{
++ struct r5dev *dev = &sh->dev[disk_idx];
++
++ /* is the data in this block needed, and can we get it? */
++ if (need_this_block(sh, s, disk_idx, disks)) {
++ /* we would like to get this block, possibly by computing it,
++ * otherwise read it if the backing disk is insync
++ */
++ BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
++ BUG_ON(test_bit(R5_Wantread, &dev->flags));
++ BUG_ON(sh->batch_head);
++ if ((s->uptodate == disks - 1) &&
++ (s->failed && (disk_idx == s->failed_num[0] ||
++ disk_idx == s->failed_num[1]))) {
++ /* have disk failed, and we're requested to fetch it;
++ * do compute it
++ */
++ pr_debug("Computing stripe %llu block %d\n",
++ (unsigned long long)sh->sector, disk_idx);
++ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
++ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
++ set_bit(R5_Wantcompute, &dev->flags);
++ sh->ops.target = disk_idx;
++ sh->ops.target2 = -1; /* no 2nd target */
++ s->req_compute = 1;
++ /* Careful: from this point on 'uptodate' is in the eye
++ * of raid_run_ops which services 'compute' operations
++ * before writes. R5_Wantcompute flags a block that will
++ * be R5_UPTODATE by the time it is needed for a
++ * subsequent operation.
++ */
++ s->uptodate++;
++ return 1;
++ } else if (s->uptodate == disks-2 && s->failed >= 2) {
++ /* Computing 2-failure is *very* expensive; only
++ * do it if failed >= 2
++ */
++ int other;
++ for (other = disks; other--; ) {
++ if (other == disk_idx)
++ continue;
++ if (!test_bit(R5_UPTODATE,
++ &sh->dev[other].flags))
++ break;
++ }
++ BUG_ON(other < 0);
++ pr_debug("Computing stripe %llu blocks %d,%d\n",
++ (unsigned long long)sh->sector,
++ disk_idx, other);
++ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
++ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
++ set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
++ set_bit(R5_Wantcompute, &sh->dev[other].flags);
++ sh->ops.target = disk_idx;
++ sh->ops.target2 = other;
++ s->uptodate += 2;
++ s->req_compute = 1;
++ return 1;
++ } else if (test_bit(R5_Insync, &dev->flags)) {
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantread, &dev->flags);
++ s->locked++;
++ pr_debug("Reading block %d (sync=%d)\n",
++ disk_idx, s->syncing);
++ }
++ }
++
++ return 0;
++}
++
++/**
++ * handle_stripe_fill - read or compute data to satisfy pending requests.
++ */
++static void handle_stripe_fill(struct stripe_head *sh,
++ struct stripe_head_state *s,
++ int disks)
++{
++ int i;
++
++ /* look for blocks to read/compute, skip this if a compute
++ * is already in flight, or if the stripe contents are in the
++ * midst of changing due to a write
++ */
++ if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
++ !sh->reconstruct_state)
++ for (i = disks; i--; )
++ if (fetch_block(sh, s, i, disks))
++ break;
++ set_bit(STRIPE_HANDLE, &sh->state);
++}
++
++static void break_stripe_batch_list(struct stripe_head *head_sh,
++ unsigned long handle_flags);
++/* handle_stripe_clean_event
++ * any written block on an uptodate or failed drive can be returned.
++ * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
++ * never LOCKED, so we don't need to test 'failed' directly.
++ */
++static void handle_stripe_clean_event(struct r5conf *conf,
++ struct stripe_head *sh, int disks, struct bio **return_bi)
++{
++ int i;
++ struct r5dev *dev;
++ int discard_pending = 0;
++ struct stripe_head *head_sh = sh;
++ bool do_endio = false;
++
++ for (i = disks; i--; )
++ if (sh->dev[i].written) {
++ dev = &sh->dev[i];
++ if (!test_bit(R5_LOCKED, &dev->flags) &&
++ (test_bit(R5_UPTODATE, &dev->flags) ||
++ test_bit(R5_Discard, &dev->flags) ||
++ test_bit(R5_SkipCopy, &dev->flags))) {
++ /* We can return any write requests */
++ struct bio *wbi, *wbi2;
++ pr_debug("Return write for disc %d\n", i);
++ if (test_and_clear_bit(R5_Discard, &dev->flags))
++ clear_bit(R5_UPTODATE, &dev->flags);
++ if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
++ WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
++ }
++ do_endio = true;
++
++returnbi:
++ dev->page = dev->orig_page;
++ wbi = dev->written;
++ dev->written = NULL;
++ while (wbi && wbi->bi_iter.bi_sector <
++ dev->sector + STRIPE_SECTORS) {
++ wbi2 = r5_next_bio(wbi, dev->sector);
++ if (!raid5_dec_bi_active_stripes(wbi)) {
++ md_write_end(conf->mddev);
++ wbi->bi_next = *return_bi;
++ *return_bi = wbi;
++ }
++ wbi = wbi2;
++ }
++ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
++ STRIPE_SECTORS,
++ !test_bit(STRIPE_DEGRADED, &sh->state),
++ 0);
++ if (head_sh->batch_head) {
++ sh = list_first_entry(&sh->batch_list,
++ struct stripe_head,
++ batch_list);
++ if (sh != head_sh) {
++ dev = &sh->dev[i];
++ goto returnbi;
++ }
++ }
++ sh = head_sh;
++ dev = &sh->dev[i];
++ } else if (test_bit(R5_Discard, &dev->flags))
++ discard_pending = 1;
++ WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
++ WARN_ON(dev->page != dev->orig_page);
++ }
++ if (!discard_pending &&
++ test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
++ clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
++ clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
++ if (sh->qd_idx >= 0) {
++ clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
++ clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
++ }
++ /* now that discard is done we can proceed with any sync */
++ clear_bit(STRIPE_DISCARD, &sh->state);
++ /*
++ * SCSI discard will change some bio fields and the stripe has
++ * no updated data, so remove it from hash list and the stripe
++ * will be reinitialized
++ */
++ spin_lock_irq(&conf->device_lock);
++unhash:
++ remove_hash(sh);
++ if (head_sh->batch_head) {
++ sh = list_first_entry(&sh->batch_list,
++ struct stripe_head, batch_list);
++ if (sh != head_sh)
++ goto unhash;
++ }
++ spin_unlock_irq(&conf->device_lock);
++ sh = head_sh;
++
++ if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
++ set_bit(STRIPE_HANDLE, &sh->state);
++
++ }
++
++ if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
++ if (atomic_dec_and_test(&conf->pending_full_writes))
++ md_wakeup_thread(conf->mddev->thread);
++
++ if (head_sh->batch_head && do_endio)
++ break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
++}
++
++static void handle_stripe_dirtying(struct r5conf *conf,
++ struct stripe_head *sh,
++ struct stripe_head_state *s,
++ int disks)
++{
++ int rmw = 0, rcw = 0, i;
++ sector_t recovery_cp = conf->mddev->recovery_cp;
++
++ /* Check whether resync is now happening or should start.
++ * If yes, then the array is dirty (after unclean shutdown or
++ * initial creation), so parity in some stripes might be inconsistent.
++ * In this case, we need to always do reconstruct-write, to ensure
++ * that in case of drive failure or read-error correction, we
++ * generate correct data from the parity.
++ */
++ if (conf->rmw_level == PARITY_DISABLE_RMW ||
++ (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
++ s->failed == 0)) {
++ /* Calculate the real rcw later - for now make it
++ * look like rcw is cheaper
++ */
++ rcw = 1; rmw = 2;
++ pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
++ conf->rmw_level, (unsigned long long)recovery_cp,
++ (unsigned long long)sh->sector);
++ } else for (i = disks; i--; ) {
++ /* would I have to read this buffer for read_modify_write */
++ struct r5dev *dev = &sh->dev[i];
++ if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
++ !test_bit(R5_LOCKED, &dev->flags) &&
++ !(test_bit(R5_UPTODATE, &dev->flags) ||
++ test_bit(R5_Wantcompute, &dev->flags))) {
++ if (test_bit(R5_Insync, &dev->flags))
++ rmw++;
++ else
++ rmw += 2*disks; /* cannot read it */
++ }
++ /* Would I have to read this buffer for reconstruct_write */
++ if (!test_bit(R5_OVERWRITE, &dev->flags) &&
++ i != sh->pd_idx && i != sh->qd_idx &&
++ !test_bit(R5_LOCKED, &dev->flags) &&
++ !(test_bit(R5_UPTODATE, &dev->flags) ||
++ test_bit(R5_Wantcompute, &dev->flags))) {
++ if (test_bit(R5_Insync, &dev->flags))
++ rcw++;
++ else
++ rcw += 2*disks;
++ }
++ }
++ pr_debug("for sector %llu, rmw=%d rcw=%d\n",
++ (unsigned long long)sh->sector, rmw, rcw);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_ENABLE_RMW)) && rmw > 0) {
++ /* prefer read-modify-write, but need to get some data */
++ if (conf->mddev->queue)
++ blk_add_trace_msg(conf->mddev->queue,
++ "raid5 rmw %llu %d",
++ (unsigned long long)sh->sector, rmw);
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
++ !test_bit(R5_LOCKED, &dev->flags) &&
++ !(test_bit(R5_UPTODATE, &dev->flags) ||
++ test_bit(R5_Wantcompute, &dev->flags)) &&
++ test_bit(R5_Insync, &dev->flags)) {
++ if (test_bit(STRIPE_PREREAD_ACTIVE,
++ &sh->state)) {
++ pr_debug("Read_old block %d for r-m-w\n",
++ i);
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantread, &dev->flags);
++ s->locked++;
++ } else {
++ set_bit(STRIPE_DELAYED, &sh->state);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ }
++ }
++ }
++ }
++ if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_ENABLE_RMW)) && rcw > 0) {
++ /* want reconstruct write, but need to get some data */
++ int qread =0;
++ rcw = 0;
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (!test_bit(R5_OVERWRITE, &dev->flags) &&
++ i != sh->pd_idx && i != sh->qd_idx &&
++ !test_bit(R5_LOCKED, &dev->flags) &&
++ !(test_bit(R5_UPTODATE, &dev->flags) ||
++ test_bit(R5_Wantcompute, &dev->flags))) {
++ rcw++;
++ if (test_bit(R5_Insync, &dev->flags) &&
++ test_bit(STRIPE_PREREAD_ACTIVE,
++ &sh->state)) {
++ pr_debug("Read_old block "
++ "%d for Reconstruct\n", i);
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantread, &dev->flags);
++ s->locked++;
++ qread++;
++ } else {
++ set_bit(STRIPE_DELAYED, &sh->state);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ }
++ }
++ }
++ if (rcw && conf->mddev->queue)
++ blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
++ (unsigned long long)sh->sector,
++ rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
++ }
++
++ if (rcw > disks && rmw > disks &&
++ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ set_bit(STRIPE_DELAYED, &sh->state);
++
++ /* now if nothing is locked, and if we have enough data,
++ * we can start a write request
++ */
++ /* since handle_stripe can be called at any time we need to handle the
++ * case where a compute block operation has been submitted and then a
++ * subsequent call wants to start a write request. raid_run_ops only
++ * handles the case where compute block and reconstruct are requested
++ * simultaneously. If this is not the case then new writes need to be
++ * held off until the compute completes.
++ */
++ if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
++ (s->locked == 0 && (rcw == 0 || rmw == 0) &&
++ !test_bit(STRIPE_BIT_DELAY, &sh->state)))
++ schedule_reconstruction(sh, s, rcw == 0, 0);
++}
++
++static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
++ struct stripe_head_state *s, int disks)
++{
++ struct r5dev *dev = NULL;
++
++ BUG_ON(sh->batch_head);
++ set_bit(STRIPE_HANDLE, &sh->state);
++
++ switch (sh->check_state) {
++ case check_state_idle:
++ /* start a new check operation if there are no failures */
++ if (s->failed == 0) {
++ BUG_ON(s->uptodate != disks);
++ sh->check_state = check_state_run;
++ set_bit(STRIPE_OP_CHECK, &s->ops_request);
++ clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
++ s->uptodate--;
++ break;
++ }
++ dev = &sh->dev[s->failed_num[0]];
++ /* fall through */
++ case check_state_compute_result:
++ sh->check_state = check_state_idle;
++ if (!dev)
++ dev = &sh->dev[sh->pd_idx];
++
++ /* check that a write has not made the stripe insync */
++ if (test_bit(STRIPE_INSYNC, &sh->state))
++ break;
++
++ /* either failed parity check, or recovery is happening */
++ BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
++ BUG_ON(s->uptodate != disks);
++
++ set_bit(R5_LOCKED, &dev->flags);
++ s->locked++;
++ set_bit(R5_Wantwrite, &dev->flags);
++
++ clear_bit(STRIPE_DEGRADED, &sh->state);
++ set_bit(STRIPE_INSYNC, &sh->state);
++ break;
++ case check_state_run:
++ break; /* we will be called again upon completion */
++ case check_state_check_result:
++ sh->check_state = check_state_idle;
++
++ /* if a failure occurred during the check operation, leave
++ * STRIPE_INSYNC not set and let the stripe be handled again
++ */
++ if (s->failed)
++ break;
++
++ /* handle a successful check operation, if parity is correct
++ * we are done. Otherwise update the mismatch count and repair
++ * parity if !MD_RECOVERY_CHECK
++ */
++ if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
++ /* parity is correct (on disc,
++ * not in buffer any more)
++ */
++ set_bit(STRIPE_INSYNC, &sh->state);
++ else {
++ atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
++ if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
++ /* don't try to repair!! */
++ set_bit(STRIPE_INSYNC, &sh->state);
++ else {
++ sh->check_state = check_state_compute_run;
++ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
++ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
++ set_bit(R5_Wantcompute,
++ &sh->dev[sh->pd_idx].flags);
++ sh->ops.target = sh->pd_idx;
++ sh->ops.target2 = -1;
++ s->uptodate++;
++ }
++ }
++ break;
++ case check_state_compute_run:
++ break;
++ default:
++ printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
++ __func__, sh->check_state,
++ (unsigned long long) sh->sector);
++ BUG();
++ }
++}
++
++static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
++ struct stripe_head_state *s,
++ int disks)
++{
++ int pd_idx = sh->pd_idx;
++ int qd_idx = sh->qd_idx;
++ struct r5dev *dev;
++
++ BUG_ON(sh->batch_head);
++ set_bit(STRIPE_HANDLE, &sh->state);
++
++ BUG_ON(s->failed > 2);
++
++ /* Want to check and possibly repair P and Q.
++ * However there could be one 'failed' device, in which
++ * case we can only check one of them, possibly using the
++ * other to generate missing data
++ */
++
++ switch (sh->check_state) {
++ case check_state_idle:
++ /* start a new check operation if there are < 2 failures */
++ if (s->failed == s->q_failed) {
++ /* The only possible failed device holds Q, so it
++ * makes sense to check P (If anything else were failed,
++ * we would have used P to recreate it).
++ */
++ sh->check_state = check_state_run;
++ }
++ if (!s->q_failed && s->failed < 2) {
++ /* Q is not failed, and we didn't use it to generate
++ * anything, so it makes sense to check it
++ */
++ if (sh->check_state == check_state_run)
++ sh->check_state = check_state_run_pq;
++ else
++ sh->check_state = check_state_run_q;
++ }
++
++ /* discard potentially stale zero_sum_result */
++ sh->ops.zero_sum_result = 0;
++
++ if (sh->check_state == check_state_run) {
++ /* async_xor_zero_sum destroys the contents of P */
++ clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
++ s->uptodate--;
++ }
++ if (sh->check_state >= check_state_run &&
++ sh->check_state <= check_state_run_pq) {
++ /* async_syndrome_zero_sum preserves P and Q, so
++ * no need to mark them !uptodate here
++ */
++ set_bit(STRIPE_OP_CHECK, &s->ops_request);
++ break;
++ }
++
++ /* we have 2-disk failure */
++ BUG_ON(s->failed != 2);
++ /* fall through */
++ case check_state_compute_result:
++ sh->check_state = check_state_idle;
++
++ /* check that a write has not made the stripe insync */
++ if (test_bit(STRIPE_INSYNC, &sh->state))
++ break;
++
++ /* now write out any block on a failed drive,
++ * or P or Q if they were recomputed
++ */
++ BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
++ if (s->failed == 2) {
++ dev = &sh->dev[s->failed_num[1]];
++ s->locked++;
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantwrite, &dev->flags);
++ }
++ if (s->failed >= 1) {
++ dev = &sh->dev[s->failed_num[0]];
++ s->locked++;
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantwrite, &dev->flags);
++ }
++ if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
++ dev = &sh->dev[pd_idx];
++ s->locked++;
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantwrite, &dev->flags);
++ }
++ if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
++ dev = &sh->dev[qd_idx];
++ s->locked++;
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantwrite, &dev->flags);
++ }
++ clear_bit(STRIPE_DEGRADED, &sh->state);
++
++ set_bit(STRIPE_INSYNC, &sh->state);
++ break;
++ case check_state_run:
++ case check_state_run_q:
++ case check_state_run_pq:
++ break; /* we will be called again upon completion */
++ case check_state_check_result:
++ sh->check_state = check_state_idle;
++
++ /* handle a successful check operation, if parity is correct
++ * we are done. Otherwise update the mismatch count and repair
++ * parity if !MD_RECOVERY_CHECK
++ */
++ if (sh->ops.zero_sum_result == 0) {
++ /* both parities are correct */
++ if (!s->failed)
++ set_bit(STRIPE_INSYNC, &sh->state);
++ else {
++ /* in contrast to the raid5 case we can validate
++ * parity, but still have a failure to write
++ * back
++ */
++ sh->check_state = check_state_compute_result;
++ /* Returning at this point means that we may go
++ * off and bring p and/or q uptodate again so
++ * we make sure to check zero_sum_result again
++ * to verify if p or q need writeback
++ */
++ }
++ } else {
++ atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
++ if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
++ /* don't try to repair!! */
++ set_bit(STRIPE_INSYNC, &sh->state);
++ else {
++ int *target = &sh->ops.target;
++
++ sh->ops.target = -1;
++ sh->ops.target2 = -1;
++ sh->check_state = check_state_compute_run;
++ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
++ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
++ if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
++ set_bit(R5_Wantcompute,
++ &sh->dev[pd_idx].flags);
++ *target = pd_idx;
++ target = &sh->ops.target2;
++ s->uptodate++;
++ }
++ if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
++ set_bit(R5_Wantcompute,
++ &sh->dev[qd_idx].flags);
++ *target = qd_idx;
++ s->uptodate++;
++ }
++ }
++ }
++ break;
++ case check_state_compute_run:
++ break;
++ default:
++ printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
++ __func__, sh->check_state,
++ (unsigned long long) sh->sector);
++ BUG();
++ }
++}
++
++static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
++{
++ int i;
++
++ /* We have read all the blocks in this stripe and now we need to
++ * copy some of them into a target stripe for expand.
++ */
++ struct dma_async_tx_descriptor *tx = NULL;
++ BUG_ON(sh->batch_head);
++ clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
++ for (i = 0; i < sh->disks; i++)
++ if (i != sh->pd_idx && i != sh->qd_idx) {
++ int dd_idx, j;
++ struct stripe_head *sh2;
++ struct async_submit_ctl submit;
++
++ sector_t bn = compute_blocknr(sh, i, 1);
++ sector_t s = raid5_compute_sector(conf, bn, 0,
++ &dd_idx, NULL);
++ sh2 = get_active_stripe(conf, s, 0, 1, 1);
++ if (sh2 == NULL)
++ /* so far only the early blocks of this stripe
++ * have been requested. When later blocks
++ * get requested, we will try again
++ */
++ continue;
++ if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
++ test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
++ /* must have already done this block */
++ release_stripe(sh2);
++ continue;
++ }
++
++ /* place all the copies on one channel */
++ init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
++ tx = async_memcpy(sh2->dev[dd_idx].page,
++ sh->dev[i].page, 0, 0, STRIPE_SIZE,
++ &submit);
++
++ set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
++ set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
++ for (j = 0; j < conf->raid_disks; j++)
++ if (j != sh2->pd_idx &&
++ j != sh2->qd_idx &&
++ !test_bit(R5_Expanded, &sh2->dev[j].flags))
++ break;
++ if (j == conf->raid_disks) {
++ set_bit(STRIPE_EXPAND_READY, &sh2->state);
++ set_bit(STRIPE_HANDLE, &sh2->state);
++ }
++ release_stripe(sh2);
++
++ }
++ /* done submitting copies, wait for them to complete */
++ async_tx_quiesce(&tx);
++}
++
++/*
++ * handle_stripe - do things to a stripe.
++ *
++ * We lock the stripe by setting STRIPE_ACTIVE and then examine the
++ * state of various bits to see what needs to be done.
++ * Possible results:
++ * return some read requests which now have data
++ * return some write requests which are safely on storage
++ * schedule a read on some buffers
++ * schedule a write of some buffers
++ * return confirmation of parity correctness
++ *
++ */
++
++static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
++{
++ struct r5conf *conf = sh->raid_conf;
++ int disks = sh->disks;
++ struct r5dev *dev;
++ int i;
++ int do_recovery = 0;
++
++ memset(s, 0, sizeof(*s));
++
++ s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head;
++ s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head;
++ s->failed_num[0] = -1;
++ s->failed_num[1] = -1;
++
++ /* Now to look around and see what can be done */
++ rcu_read_lock();
++ for (i=disks; i--; ) {
++ struct md_rdev *rdev;
++ sector_t first_bad;
++ int bad_sectors;
++ int is_bad = 0;
++
++ dev = &sh->dev[i];
++
++ pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
++ i, dev->flags,
++ dev->toread, dev->towrite, dev->written);
++ /* maybe we can reply to a read
++ *
++ * new wantfill requests are only permitted while
++ * ops_complete_biofill is guaranteed to be inactive
++ */
++ if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
++ !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
++ set_bit(R5_Wantfill, &dev->flags);
++
++ /* now count some things */
++ if (test_bit(R5_LOCKED, &dev->flags))
++ s->locked++;
++ if (test_bit(R5_UPTODATE, &dev->flags))
++ s->uptodate++;
++ if (test_bit(R5_Wantcompute, &dev->flags)) {
++ s->compute++;
++ BUG_ON(s->compute > 2);
++ }
++
++ if (test_bit(R5_Wantfill, &dev->flags))
++ s->to_fill++;
++ else if (dev->toread)
++ s->to_read++;
++ if (dev->towrite) {
++ s->to_write++;
++ if (!test_bit(R5_OVERWRITE, &dev->flags))
++ s->non_overwrite++;
++ }
++ if (dev->written)
++ s->written++;
++ /* Prefer to use the replacement for reads, but only
++ * if it is recovered enough and has no bad blocks.
++ */
++ rdev = rcu_dereference(conf->disks[i].replacement);
++ if (rdev && !test_bit(Faulty, &rdev->flags) &&
++ rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
++ !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
++ &first_bad, &bad_sectors))
++ set_bit(R5_ReadRepl, &dev->flags);
++ else {
++ if (rdev)
++ set_bit(R5_NeedReplace, &dev->flags);
++ rdev = rcu_dereference(conf->disks[i].rdev);
++ clear_bit(R5_ReadRepl, &dev->flags);
++ }
++ if (rdev && test_bit(Faulty, &rdev->flags))
++ rdev = NULL;
++ if (rdev) {
++ is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
++ &first_bad, &bad_sectors);
++ if (s->blocked_rdev == NULL
++ && (test_bit(Blocked, &rdev->flags)
++ || is_bad < 0)) {
++ if (is_bad < 0)
++ set_bit(BlockedBadBlocks,
++ &rdev->flags);
++ s->blocked_rdev = rdev;
++ atomic_inc(&rdev->nr_pending);
++ }
++ }
++ clear_bit(R5_Insync, &dev->flags);
++ if (!rdev)
++ /* Not in-sync */;
++ else if (is_bad) {
++ /* also not in-sync */
++ if (!test_bit(WriteErrorSeen, &rdev->flags) &&
++ test_bit(R5_UPTODATE, &dev->flags)) {
++ /* treat as in-sync, but with a read error
++ * which we can now try to correct
++ */
++ set_bit(R5_Insync, &dev->flags);
++ set_bit(R5_ReadError, &dev->flags);
++ }
++ } else if (test_bit(In_sync, &rdev->flags))
++ set_bit(R5_Insync, &dev->flags);
++ else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
++ /* in sync if before recovery_offset */
++ set_bit(R5_Insync, &dev->flags);
++ else if (test_bit(R5_UPTODATE, &dev->flags) &&
++ test_bit(R5_Expanded, &dev->flags))
++ /* If we've reshaped into here, we assume it is Insync.
++ * We will shortly update recovery_offset to make
++ * it official.
++ */
++ set_bit(R5_Insync, &dev->flags);
++
++ if (test_bit(R5_WriteError, &dev->flags)) {
++ /* This flag does not apply to '.replacement'
++ * only to .rdev, so make sure to check that*/
++ struct md_rdev *rdev2 = rcu_dereference(
++ conf->disks[i].rdev);
++ if (rdev2 == rdev)
++ clear_bit(R5_Insync, &dev->flags);
++ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
++ s->handle_bad_blocks = 1;
++ atomic_inc(&rdev2->nr_pending);
++ } else
++ clear_bit(R5_WriteError, &dev->flags);
++ }
++ if (test_bit(R5_MadeGood, &dev->flags)) {
++ /* This flag does not apply to '.replacement'
++ * only to .rdev, so make sure to check that*/
++ struct md_rdev *rdev2 = rcu_dereference(
++ conf->disks[i].rdev);
++ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
++ s->handle_bad_blocks = 1;
++ atomic_inc(&rdev2->nr_pending);
++ } else
++ clear_bit(R5_MadeGood, &dev->flags);
++ }
++ if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
++ struct md_rdev *rdev2 = rcu_dereference(
++ conf->disks[i].replacement);
++ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
++ s->handle_bad_blocks = 1;
++ atomic_inc(&rdev2->nr_pending);
++ } else
++ clear_bit(R5_MadeGoodRepl, &dev->flags);
++ }
++ if (!test_bit(R5_Insync, &dev->flags)) {
++ /* The ReadError flag will just be confusing now */
++ clear_bit(R5_ReadError, &dev->flags);
++ clear_bit(R5_ReWrite, &dev->flags);
++ }
++ if (test_bit(R5_ReadError, &dev->flags))
++ clear_bit(R5_Insync, &dev->flags);
++ if (!test_bit(R5_Insync, &dev->flags)) {
++ if (s->failed < 2)
++ s->failed_num[s->failed] = i;
++ s->failed++;
++ if (rdev && !test_bit(Faulty, &rdev->flags))
++ do_recovery = 1;
++ }
++ }
++ if (test_bit(STRIPE_SYNCING, &sh->state)) {
++ /* If there is a failed device being replaced,
++ * we must be recovering.
++ * else if we are after recovery_cp, we must be syncing
++ * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
++ * else we can only be replacing
++ * sync and recovery both need to read all devices, and so
++ * use the same flag.
++ */
++ if (do_recovery ||
++ sh->sector >= conf->mddev->recovery_cp ||
++ test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
++ s->syncing = 1;
++ else
++ s->replacing = 1;
++ }
++ rcu_read_unlock();
++}
++
++static int clear_batch_ready(struct stripe_head *sh)
++{
++ /* Return '1' if this is a member of batch, or
++ * '0' if it is a lone stripe or a head which can now be
++ * handled.
++ */
++ struct stripe_head *tmp;
++ if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
++ return (sh->batch_head && sh->batch_head != sh);
++ spin_lock(&sh->stripe_lock);
++ if (!sh->batch_head) {
++ spin_unlock(&sh->stripe_lock);
++ return 0;
++ }
++
++ /*
++ * this stripe could be added to a batch list before we check
++ * BATCH_READY, skips it
++ */
++ if (sh->batch_head != sh) {
++ spin_unlock(&sh->stripe_lock);
++ return 1;
++ }
++ spin_lock(&sh->batch_lock);
++ list_for_each_entry(tmp, &sh->batch_list, batch_list)
++ clear_bit(STRIPE_BATCH_READY, &tmp->state);
++ spin_unlock(&sh->batch_lock);
++ spin_unlock(&sh->stripe_lock);
++
++ /*
++ * BATCH_READY is cleared, no new stripes can be added.
++ * batch_list can be accessed without lock
++ */
++ return 0;
++}
++
++static void break_stripe_batch_list(struct stripe_head *head_sh,
++ unsigned long handle_flags)
++{
++ struct stripe_head *sh, *next;
++ int i;
++ int do_wakeup = 0;
++
++ list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
++
++ list_del_init(&sh->batch_list);
++
++ WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
++ (1 << STRIPE_SYNCING) |
++ (1 << STRIPE_REPLACED) |
++ (1 << STRIPE_PREREAD_ACTIVE) |
++ (1 << STRIPE_DELAYED) |
++ (1 << STRIPE_BIT_DELAY) |
++ (1 << STRIPE_FULL_WRITE) |
++ (1 << STRIPE_BIOFILL_RUN) |
++ (1 << STRIPE_COMPUTE_RUN) |
++ (1 << STRIPE_OPS_REQ_PENDING) |
++ (1 << STRIPE_DISCARD) |
++ (1 << STRIPE_BATCH_READY) |
++ (1 << STRIPE_BATCH_ERR) |
++ (1 << STRIPE_BITMAP_PENDING)));
++ WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
++ (1 << STRIPE_REPLACED)));
++
++ set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
++ (1 << STRIPE_DEGRADED)),
++ head_sh->state & (1 << STRIPE_INSYNC));
++
++ sh->check_state = head_sh->check_state;
++ sh->reconstruct_state = head_sh->reconstruct_state;
++ for (i = 0; i < sh->disks; i++) {
++ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
++ do_wakeup = 1;
++ sh->dev[i].flags = head_sh->dev[i].flags &
++ (~((1 << R5_WriteError) | (1 << R5_Overlap)));
++ }
++ spin_lock_irq(&sh->stripe_lock);
++ sh->batch_head = NULL;
++ spin_unlock_irq(&sh->stripe_lock);
++ if (handle_flags == 0 ||
++ sh->state & handle_flags)
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++ }
++ spin_lock_irq(&head_sh->stripe_lock);
++ head_sh->batch_head = NULL;
++ spin_unlock_irq(&head_sh->stripe_lock);
++ for (i = 0; i < head_sh->disks; i++)
++ if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
++ do_wakeup = 1;
++ if (head_sh->state & handle_flags)
++ set_bit(STRIPE_HANDLE, &head_sh->state);
++
++ if (do_wakeup)
++ wake_up(&head_sh->raid_conf->wait_for_overlap);
++}
++
++static void handle_stripe(struct stripe_head *sh)
++{
++ struct stripe_head_state s;
++ struct r5conf *conf = sh->raid_conf;
++ int i;
++ int prexor;
++ int disks = sh->disks;
++ struct r5dev *pdev, *qdev;
++
++ clear_bit(STRIPE_HANDLE, &sh->state);
++ if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
++ /* already being handled, ensure it gets handled
++ * again when current action finishes */
++ set_bit(STRIPE_HANDLE, &sh->state);
++ return;
++ }
++
++ if (clear_batch_ready(sh) ) {
++ clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
++ return;
++ }
++
++ if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
++ break_stripe_batch_list(sh, 0);
++
++ if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
++ spin_lock(&sh->stripe_lock);
++ /* Cannot process 'sync' concurrently with 'discard' */
++ if (!test_bit(STRIPE_DISCARD, &sh->state) &&
++ test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
++ set_bit(STRIPE_SYNCING, &sh->state);
++ clear_bit(STRIPE_INSYNC, &sh->state);
++ clear_bit(STRIPE_REPLACED, &sh->state);
++ }
++ spin_unlock(&sh->stripe_lock);
++ }
++ clear_bit(STRIPE_DELAYED, &sh->state);
++
++ pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
++ "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
++ (unsigned long long)sh->sector, sh->state,
++ atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
++ sh->check_state, sh->reconstruct_state);
++
++ analyse_stripe(sh, &s);
++
++ if (s.handle_bad_blocks) {
++ set_bit(STRIPE_HANDLE, &sh->state);
++ goto finish;
++ }
++
++ if (unlikely(s.blocked_rdev)) {
++ if (s.syncing || s.expanding || s.expanded ||
++ s.replacing || s.to_write || s.written) {
++ set_bit(STRIPE_HANDLE, &sh->state);
++ goto finish;
++ }
++ /* There is nothing for the blocked_rdev to block */
++ rdev_dec_pending(s.blocked_rdev, conf->mddev);
++ s.blocked_rdev = NULL;
++ }
++
++ if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
++ set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
++ set_bit(STRIPE_BIOFILL_RUN, &sh->state);
++ }
++
++ pr_debug("locked=%d uptodate=%d to_read=%d"
++ " to_write=%d failed=%d failed_num=%d,%d\n",
++ s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
++ s.failed_num[0], s.failed_num[1]);
++ /* check if the array has lost more than max_degraded devices and,
++ * if so, some requests might need to be failed.
++ */
++ if (s.failed > conf->max_degraded) {
++ sh->check_state = 0;
++ sh->reconstruct_state = 0;
++ break_stripe_batch_list(sh, 0);
++ if (s.to_read+s.to_write+s.written)
++ handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
++ if (s.syncing + s.replacing)
++ handle_failed_sync(conf, sh, &s);
++ }
++
++ /* Now we check to see if any write operations have recently
++ * completed
++ */
++ prexor = 0;
++ if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
++ prexor = 1;
++ if (sh->reconstruct_state == reconstruct_state_drain_result ||
++ sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
++ sh->reconstruct_state = reconstruct_state_idle;
++
++ /* All the 'written' buffers and the parity block are ready to
++ * be written back to disk
++ */
++ BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
++ !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
++ BUG_ON(sh->qd_idx >= 0 &&
++ !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
++ !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (test_bit(R5_LOCKED, &dev->flags) &&
++ (i == sh->pd_idx || i == sh->qd_idx ||
++ dev->written)) {
++ pr_debug("Writing block %d\n", i);
++ set_bit(R5_Wantwrite, &dev->flags);
++ if (prexor)
++ continue;
++ if (s.failed > 1)
++ continue;
++ if (!test_bit(R5_Insync, &dev->flags) ||
++ ((i == sh->pd_idx || i == sh->qd_idx) &&
++ s.failed == 0))
++ set_bit(STRIPE_INSYNC, &sh->state);
++ }
++ }
++ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ s.dec_preread_active = 1;
++ }
++
++ /*
++ * might be able to return some write requests if the parity blocks
++ * are safe, or on a failed drive
++ */
++ pdev = &sh->dev[sh->pd_idx];
++ s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
++ || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
++ qdev = &sh->dev[sh->qd_idx];
++ s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
++ || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
++ || conf->level < 6;
++
++ if (s.written &&
++ (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
++ && !test_bit(R5_LOCKED, &pdev->flags)
++ && (test_bit(R5_UPTODATE, &pdev->flags) ||
++ test_bit(R5_Discard, &pdev->flags))))) &&
++ (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
++ && !test_bit(R5_LOCKED, &qdev->flags)
++ && (test_bit(R5_UPTODATE, &qdev->flags) ||
++ test_bit(R5_Discard, &qdev->flags))))))
++ handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
++
++ /* Now we might consider reading some blocks, either to check/generate
++ * parity, or to satisfy requests
++ * or to load a block that is being partially written.
++ */
++ if (s.to_read || s.non_overwrite
++ || (conf->level == 6 && s.to_write && s.failed)
++ || (s.syncing && (s.uptodate + s.compute < disks))
++ || s.replacing
++ || s.expanding)
++ handle_stripe_fill(sh, &s, disks);
++
++ /* Now to consider new write requests and what else, if anything
++ * should be read. We do not handle new writes when:
++ * 1/ A 'write' operation (copy+xor) is already in flight.
++ * 2/ A 'check' operation is in flight, as it may clobber the parity
++ * block.
++ */
++ if (s.to_write && !sh->reconstruct_state && !sh->check_state)
++ handle_stripe_dirtying(conf, sh, &s, disks);
++
++ /* maybe we need to check and possibly fix the parity for this stripe
++ * Any reads will already have been scheduled, so we just see if enough
++ * data is available. The parity check is held off while parity
++ * dependent operations are in flight.
++ */
++ if (sh->check_state ||
++ (s.syncing && s.locked == 0 &&
++ !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
++ !test_bit(STRIPE_INSYNC, &sh->state))) {
++ if (conf->level == 6)
++ handle_parity_checks6(conf, sh, &s, disks);
++ else
++ handle_parity_checks5(conf, sh, &s, disks);
++ }
++
++ if ((s.replacing || s.syncing) && s.locked == 0
++ && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
++ && !test_bit(STRIPE_REPLACED, &sh->state)) {
++ /* Write out to replacement devices where possible */
++ for (i = 0; i < conf->raid_disks; i++)
++ if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
++ WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
++ set_bit(R5_WantReplace, &sh->dev[i].flags);
++ set_bit(R5_LOCKED, &sh->dev[i].flags);
++ s.locked++;
++ }
++ if (s.replacing)
++ set_bit(STRIPE_INSYNC, &sh->state);
++ set_bit(STRIPE_REPLACED, &sh->state);
++ }
++ if ((s.syncing || s.replacing) && s.locked == 0 &&
++ !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
++ test_bit(STRIPE_INSYNC, &sh->state)) {
++ md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
++ clear_bit(STRIPE_SYNCING, &sh->state);
++ if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
++ wake_up(&conf->wait_for_overlap);
++ }
++
++ /* If the failed drives are just a ReadError, then we might need
++ * to progress the repair/check process
++ */
++ if (s.failed <= conf->max_degraded && !conf->mddev->ro)
++ for (i = 0; i < s.failed; i++) {
++ struct r5dev *dev = &sh->dev[s.failed_num[i]];
++ if (test_bit(R5_ReadError, &dev->flags)
++ && !test_bit(R5_LOCKED, &dev->flags)
++ && test_bit(R5_UPTODATE, &dev->flags)
++ ) {
++ if (!test_bit(R5_ReWrite, &dev->flags)) {
++ set_bit(R5_Wantwrite, &dev->flags);
++ set_bit(R5_ReWrite, &dev->flags);
++ set_bit(R5_LOCKED, &dev->flags);
++ s.locked++;
++ } else {
++ /* let's read it back */
++ set_bit(R5_Wantread, &dev->flags);
++ set_bit(R5_LOCKED, &dev->flags);
++ s.locked++;
++ }
++ }
++ }
++
++ /* Finish reconstruct operations initiated by the expansion process */
++ if (sh->reconstruct_state == reconstruct_state_result) {
++ struct stripe_head *sh_src
++ = get_active_stripe(conf, sh->sector, 1, 1, 1);
++ if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
++ /* sh cannot be written until sh_src has been read.
++ * so arrange for sh to be delayed a little
++ */
++ set_bit(STRIPE_DELAYED, &sh->state);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
++ &sh_src->state))
++ atomic_inc(&conf->preread_active_stripes);
++ release_stripe(sh_src);
++ goto finish;
++ }
++ if (sh_src)
++ release_stripe(sh_src);
++
++ sh->reconstruct_state = reconstruct_state_idle;
++ clear_bit(STRIPE_EXPANDING, &sh->state);
++ for (i = conf->raid_disks; i--; ) {
++ set_bit(R5_Wantwrite, &sh->dev[i].flags);
++ set_bit(R5_LOCKED, &sh->dev[i].flags);
++ s.locked++;
++ }
++ }
++
++ if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
++ !sh->reconstruct_state) {
++ /* Need to write out all blocks after computing parity */
++ sh->disks = conf->raid_disks;
++ stripe_set_idx(sh->sector, conf, 0, sh);
++ schedule_reconstruction(sh, &s, 1, 1);
++ } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
++ clear_bit(STRIPE_EXPAND_READY, &sh->state);
++ atomic_dec(&conf->reshape_stripes);
++ wake_up(&conf->wait_for_overlap);
++ md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
++ }
++
++ if (s.expanding && s.locked == 0 &&
++ !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
++ handle_stripe_expansion(conf, sh);
++
++finish:
++ /* wait for this device to become unblocked */
++ if (unlikely(s.blocked_rdev)) {
++ if (conf->mddev->external)
++ md_wait_for_blocked_rdev(s.blocked_rdev,
++ conf->mddev);
++ else
++ /* Internal metadata will immediately
++ * be written by raid5d, so we don't
++ * need to wait here.
++ */
++ rdev_dec_pending(s.blocked_rdev,
++ conf->mddev);
++ }
++
++ if (s.handle_bad_blocks)
++ for (i = disks; i--; ) {
++ struct md_rdev *rdev;
++ struct r5dev *dev = &sh->dev[i];
++ if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
++ /* We own a safe reference to the rdev */
++ rdev = conf->disks[i].rdev;
++ if (!rdev_set_badblocks(rdev, sh->sector,
++ STRIPE_SECTORS, 0))
++ md_error(conf->mddev, rdev);
++ rdev_dec_pending(rdev, conf->mddev);
++ }
++ if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
++ rdev = conf->disks[i].rdev;
++ rdev_clear_badblocks(rdev, sh->sector,
++ STRIPE_SECTORS, 0);
++ rdev_dec_pending(rdev, conf->mddev);
++ }
++ if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
++ rdev = conf->disks[i].replacement;
++ if (!rdev)
++ /* rdev have been moved down */
++ rdev = conf->disks[i].rdev;
++ rdev_clear_badblocks(rdev, sh->sector,
++ STRIPE_SECTORS, 0);
++ rdev_dec_pending(rdev, conf->mddev);
++ }
++ }
++
++ if (s.ops_request)
++ raid_run_ops(sh, s.ops_request);
++
++ ops_run_io(sh, &s);
++
++ if (s.dec_preread_active) {
++ /* We delay this until after ops_run_io so that if make_request
++ * is waiting on a flush, it won't continue until the writes
++ * have actually been submitted.
++ */
++ atomic_dec(&conf->preread_active_stripes);
++ if (atomic_read(&conf->preread_active_stripes) <
++ IO_THRESHOLD)
++ md_wakeup_thread(conf->mddev->thread);
++ }
++
++ return_io(s.return_bi);
++
++ clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
++}
++
++static void raid5_activate_delayed(struct r5conf *conf)
++{
++ if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
++ while (!list_empty(&conf->delayed_list)) {
++ struct list_head *l = conf->delayed_list.next;
++ struct stripe_head *sh;
++ sh = list_entry(l, struct stripe_head, lru);
++ list_del_init(l);
++ clear_bit(STRIPE_DELAYED, &sh->state);
++ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ atomic_inc(&conf->preread_active_stripes);
++ list_add_tail(&sh->lru, &conf->hold_list);
++ raid5_wakeup_stripe_thread(sh);
++ }
++ }
++}
++
++static void activate_bit_delay(struct r5conf *conf,
++ struct list_head *temp_inactive_list)
++{
++ /* device_lock is held */
++ struct list_head head;
++ list_add(&head, &conf->bitmap_list);
++ list_del_init(&conf->bitmap_list);
++ while (!list_empty(&head)) {
++ struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
++ int hash;
++ list_del_init(&sh->lru);
++ atomic_inc(&sh->count);
++ hash = sh->hash_lock_index;
++ __release_stripe(conf, sh, &temp_inactive_list[hash]);
++ }
++}
++
++static int raid5_congested(struct mddev *mddev, int bits)
++{
++ struct r5conf *conf = mddev->private;
++
++ /* No difference between reads and writes. Just check
++ * how busy the stripe_cache is
++ */
++
++ if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
++ return 1;
++ if (conf->quiesce)
++ return 1;
++ if (atomic_read(&conf->empty_inactive_list_nr))
++ return 1;
++
++ return 0;
++}
++
++/* We want read requests to align with chunks where possible,
++ * but write requests don't need to.
++ */
++static int raid5_mergeable_bvec(struct mddev *mddev,
++ struct bvec_merge_data *bvm,
++ struct bio_vec *biovec)
++{
++ sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
++ int max;
++ unsigned int chunk_sectors = mddev->chunk_sectors;
++ unsigned int bio_sectors = bvm->bi_size >> 9;
++
++ /*
++ * always allow writes to be mergeable, read as well if array
++ * is degraded as we'll go through stripe cache anyway.
++ */
++ if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
++ return biovec->bv_len;
++
++ if (mddev->new_chunk_sectors < mddev->chunk_sectors)
++ chunk_sectors = mddev->new_chunk_sectors;
++ max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
++ if (max < 0) max = 0;
++ if (max <= biovec->bv_len && bio_sectors == 0)
++ return biovec->bv_len;
++ else
++ return max;
++}
++
++static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
++{
++ sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
++ unsigned int chunk_sectors = mddev->chunk_sectors;
++ unsigned int bio_sectors = bio_sectors(bio);
++
++ if (mddev->new_chunk_sectors < mddev->chunk_sectors)
++ chunk_sectors = mddev->new_chunk_sectors;
++ return chunk_sectors >=
++ ((sector & (chunk_sectors - 1)) + bio_sectors);
++}
++
++/*
++ * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
++ * later sampled by raid5d.
++ */
++static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&conf->device_lock, flags);
++
++ bi->bi_next = conf->retry_read_aligned_list;
++ conf->retry_read_aligned_list = bi;
++
++ spin_unlock_irqrestore(&conf->device_lock, flags);
++ md_wakeup_thread(conf->mddev->thread);
++}
++
++static struct bio *remove_bio_from_retry(struct r5conf *conf)
++{
++ struct bio *bi;
++
++ bi = conf->retry_read_aligned;
++ if (bi) {
++ conf->retry_read_aligned = NULL;
++ return bi;
++ }
++ bi = conf->retry_read_aligned_list;
++ if(bi) {
++ conf->retry_read_aligned_list = bi->bi_next;
++ bi->bi_next = NULL;
++ /*
++ * this sets the active strip count to 1 and the processed
++ * strip count to zero (upper 8 bits)
++ */
++ raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
++ }
++
++ return bi;
++}
++
++/*
++ * The "raid5_align_endio" should check if the read succeeded and if it
++ * did, call bio_endio on the original bio (having bio_put the new bio
++ * first).
++ * If the read failed..
++ */
++static void raid5_align_endio(struct bio *bi, int error)
++{
++ struct bio* raid_bi = bi->bi_private;
++ struct mddev *mddev;
++ struct r5conf *conf;
++ int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
++ struct md_rdev *rdev;
++
++ bio_put(bi);
++
++ rdev = (void*)raid_bi->bi_next;
++ raid_bi->bi_next = NULL;
++ mddev = rdev->mddev;
++ conf = mddev->private;
++
++ rdev_dec_pending(rdev, conf->mddev);
++
++ if (!error && uptodate) {
++ trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
++ raid_bi, 0);
++ bio_endio(raid_bi, 0);
++ if (atomic_dec_and_test(&conf->active_aligned_reads))
++ wake_up(&conf->wait_for_stripe);
++ return;
++ }
++
++ pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
++
++ add_bio_to_retry(raid_bi, conf);
++}
++
++static int bio_fits_rdev(struct bio *bi)
++{
++ struct request_queue *q = bdev_get_queue(bi->bi_bdev);
++
++ if (bio_sectors(bi) > queue_max_sectors(q))
++ return 0;
++ blk_recount_segments(q, bi);
++ if (bi->bi_phys_segments > queue_max_segments(q))
++ return 0;
++
++ if (q->merge_bvec_fn)
++ /* it's too hard to apply the merge_bvec_fn at this stage,
++ * just just give up
++ */
++ return 0;
++
++ return 1;
++}
++
++static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
++{
++ struct r5conf *conf = mddev->private;
++ int dd_idx;
++ struct bio* align_bi;
++ struct md_rdev *rdev;
++ sector_t end_sector;
++
++ if (!in_chunk_boundary(mddev, raid_bio)) {
++ pr_debug("chunk_aligned_read : non aligned\n");
++ return 0;
++ }
++ /*
++ * use bio_clone_mddev to make a copy of the bio
++ */
++ align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
++ if (!align_bi)
++ return 0;
++ /*
++ * set bi_end_io to a new function, and set bi_private to the
++ * original bio.
++ */
++ align_bi->bi_end_io = raid5_align_endio;
++ align_bi->bi_private = raid_bio;
++ /*
++ * compute position
++ */
++ align_bi->bi_iter.bi_sector =
++ raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
++ 0, &dd_idx, NULL);
++
++ end_sector = bio_end_sector(align_bi);
++ rcu_read_lock();
++ rdev = rcu_dereference(conf->disks[dd_idx].replacement);
++ if (!rdev || test_bit(Faulty, &rdev->flags) ||
++ rdev->recovery_offset < end_sector) {
++ rdev = rcu_dereference(conf->disks[dd_idx].rdev);
++ if (rdev &&
++ (test_bit(Faulty, &rdev->flags) ||
++ !(test_bit(In_sync, &rdev->flags) ||
++ rdev->recovery_offset >= end_sector)))
++ rdev = NULL;
++ }
++ if (rdev) {
++ sector_t first_bad;
++ int bad_sectors;
++
++ atomic_inc(&rdev->nr_pending);
++ rcu_read_unlock();
++ raid_bio->bi_next = (void*)rdev;
++ align_bi->bi_bdev = rdev->bdev;
++ __clear_bit(BIO_SEG_VALID, &align_bi->bi_flags);
++
++ if (!bio_fits_rdev(align_bi) ||
++ is_badblock(rdev, align_bi->bi_iter.bi_sector,
++ bio_sectors(align_bi),
++ &first_bad, &bad_sectors)) {
++ /* too big in some way, or has a known bad block */
++ bio_put(align_bi);
++ rdev_dec_pending(rdev, mddev);
++ return 0;
++ }
++
++ /* No reshape active, so we can trust rdev->data_offset */
++ align_bi->bi_iter.bi_sector += rdev->data_offset;
++
++ spin_lock_irq(&conf->device_lock);
++ wait_event_lock_irq(conf->wait_for_stripe,
++ conf->quiesce == 0,
++ conf->device_lock);
++ atomic_inc(&conf->active_aligned_reads);
++ spin_unlock_irq(&conf->device_lock);
++
++ if (mddev->gendisk)
++ trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
++ align_bi, disk_devt(mddev->gendisk),
++ raid_bio->bi_iter.bi_sector);
++ generic_make_request(align_bi);
++ return 1;
++ } else {
++ rcu_read_unlock();
++ bio_put(align_bi);
++ return 0;
++ }
++}
++
++/* __get_priority_stripe - get the next stripe to process
++ *
++ * Full stripe writes are allowed to pass preread active stripes up until
++ * the bypass_threshold is exceeded. In general the bypass_count
++ * increments when the handle_list is handled before the hold_list; however, it
++ * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
++ * stripe with in flight i/o. The bypass_count will be reset when the
++ * head of the hold_list has changed, i.e. the head was promoted to the
++ * handle_list.
++ */
++static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
++{
++ struct stripe_head *sh = NULL, *tmp;
++ struct list_head *handle_list = NULL;
++ struct r5worker_group *wg = NULL;
++
++ if (conf->worker_cnt_per_group == 0) {
++ handle_list = &conf->handle_list;
++ } else if (group != ANY_GROUP) {
++ handle_list = &conf->worker_groups[group].handle_list;
++ wg = &conf->worker_groups[group];
++ } else {
++ int i;
++ for (i = 0; i < conf->group_cnt; i++) {
++ handle_list = &conf->worker_groups[i].handle_list;
++ wg = &conf->worker_groups[i];
++ if (!list_empty(handle_list))
++ break;
++ }
++ }
++
++ pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
++ __func__,
++ list_empty(handle_list) ? "empty" : "busy",
++ list_empty(&conf->hold_list) ? "empty" : "busy",
++ atomic_read(&conf->pending_full_writes), conf->bypass_count);
++
++ if (!list_empty(handle_list)) {
++ sh = list_entry(handle_list->next, typeof(*sh), lru);
++
++ if (list_empty(&conf->hold_list))
++ conf->bypass_count = 0;
++ else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
++ if (conf->hold_list.next == conf->last_hold)
++ conf->bypass_count++;
++ else {
++ conf->last_hold = conf->hold_list.next;
++ conf->bypass_count -= conf->bypass_threshold;
++ if (conf->bypass_count < 0)
++ conf->bypass_count = 0;
++ }
++ }
++ } else if (!list_empty(&conf->hold_list) &&
++ ((conf->bypass_threshold &&
++ conf->bypass_count > conf->bypass_threshold) ||
++ atomic_read(&conf->pending_full_writes) == 0)) {
++
++ list_for_each_entry(tmp, &conf->hold_list, lru) {
++ if (conf->worker_cnt_per_group == 0 ||
++ group == ANY_GROUP ||
++ !cpu_online(tmp->cpu) ||
++ cpu_to_group(tmp->cpu) == group) {
++ sh = tmp;
++ break;
++ }
++ }
++
++ if (sh) {
++ conf->bypass_count -= conf->bypass_threshold;
++ if (conf->bypass_count < 0)
++ conf->bypass_count = 0;
++ }
++ wg = NULL;
++ }
++
++ if (!sh)
++ return NULL;
++
++ if (wg) {
++ wg->stripes_cnt--;
++ sh->group = NULL;
++ }
++ list_del_init(&sh->lru);
++ BUG_ON(atomic_inc_return(&sh->count) != 1);
++ return sh;
++}
++
++struct raid5_plug_cb {
++ struct blk_plug_cb cb;
++ struct list_head list;
++ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
++};
++
++static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
++{
++ struct raid5_plug_cb *cb = container_of(
++ blk_cb, struct raid5_plug_cb, cb);
++ struct stripe_head *sh;
++ struct mddev *mddev = cb->cb.data;
++ struct r5conf *conf = mddev->private;
++ int cnt = 0;
++ int hash;
++
++ if (cb->list.next && !list_empty(&cb->list)) {
++ spin_lock_irq(&conf->device_lock);
++ while (!list_empty(&cb->list)) {
++ sh = list_first_entry(&cb->list, struct stripe_head, lru);
++ list_del_init(&sh->lru);
++ /*
++ * avoid race release_stripe_plug() sees
++ * STRIPE_ON_UNPLUG_LIST clear but the stripe
++ * is still in our list
++ */
++ smp_mb__before_atomic();
++ clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
++ /*
++ * STRIPE_ON_RELEASE_LIST could be set here. In that
++ * case, the count is always > 1 here
++ */
++ hash = sh->hash_lock_index;
++ __release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
++ cnt++;
++ }
++ spin_unlock_irq(&conf->device_lock);
++ }
++ release_inactive_stripe_list(conf, cb->temp_inactive_list,
++ NR_STRIPE_HASH_LOCKS);
++ if (mddev->queue)
++ trace_block_unplug(mddev->queue, cnt, !from_schedule);
++ kfree(cb);
++}
++
++static void release_stripe_plug(struct mddev *mddev,
++ struct stripe_head *sh)
++{
++ struct blk_plug_cb *blk_cb = blk_check_plugged(
++ raid5_unplug, mddev,
++ sizeof(struct raid5_plug_cb));
++ struct raid5_plug_cb *cb;
++
++ if (!blk_cb) {
++ release_stripe(sh);
++ return;
++ }
++
++ cb = container_of(blk_cb, struct raid5_plug_cb, cb);
++
++ if (cb->list.next == NULL) {
++ int i;
++ INIT_LIST_HEAD(&cb->list);
++ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
++ INIT_LIST_HEAD(cb->temp_inactive_list + i);
++ }
++
++ if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
++ list_add_tail(&sh->lru, &cb->list);
++ else
++ release_stripe(sh);
++}
++
++static void make_discard_request(struct mddev *mddev, struct bio *bi)
++{
++ struct r5conf *conf = mddev->private;
++ sector_t logical_sector, last_sector;
++ struct stripe_head *sh;
++ int remaining;
++ int stripe_sectors;
++
++ if (mddev->reshape_position != MaxSector)
++ /* Skip discard while reshape is happening */
++ return;
++
++ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
++ last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
++
++ bi->bi_next = NULL;
++ bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
++
++ stripe_sectors = conf->chunk_sectors *
++ (conf->raid_disks - conf->max_degraded);
++ logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
++ stripe_sectors);
++ sector_div(last_sector, stripe_sectors);
++
++ logical_sector *= conf->chunk_sectors;
++ last_sector *= conf->chunk_sectors;
++
++ for (; logical_sector < last_sector;
++ logical_sector += STRIPE_SECTORS) {
++ DEFINE_WAIT(w);
++ int d;
++ again:
++ sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
++ prepare_to_wait(&conf->wait_for_overlap, &w,
++ TASK_UNINTERRUPTIBLE);
++ set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
++ if (test_bit(STRIPE_SYNCING, &sh->state)) {
++ release_stripe(sh);
++ schedule();
++ goto again;
++ }
++ clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
++ spin_lock_irq(&sh->stripe_lock);
++ for (d = 0; d < conf->raid_disks; d++) {
++ if (d == sh->pd_idx || d == sh->qd_idx)
++ continue;
++ if (sh->dev[d].towrite || sh->dev[d].toread) {
++ set_bit(R5_Overlap, &sh->dev[d].flags);
++ spin_unlock_irq(&sh->stripe_lock);
++ release_stripe(sh);
++ schedule();
++ goto again;
++ }
++ }
++ set_bit(STRIPE_DISCARD, &sh->state);
++ finish_wait(&conf->wait_for_overlap, &w);
++ sh->overwrite_disks = 0;
++ for (d = 0; d < conf->raid_disks; d++) {
++ if (d == sh->pd_idx || d == sh->qd_idx)
++ continue;
++ sh->dev[d].towrite = bi;
++ set_bit(R5_OVERWRITE, &sh->dev[d].flags);
++ raid5_inc_bi_active_stripes(bi);
++ sh->overwrite_disks++;
++ }
++ spin_unlock_irq(&sh->stripe_lock);
++ if (conf->mddev->bitmap) {
++ for (d = 0;
++ d < conf->raid_disks - conf->max_degraded;
++ d++)
++ bitmap_startwrite(mddev->bitmap,
++ sh->sector,
++ STRIPE_SECTORS,
++ 0);
++ sh->bm_seq = conf->seq_flush + 1;
++ set_bit(STRIPE_BIT_DELAY, &sh->state);
++ }
++
++ set_bit(STRIPE_HANDLE, &sh->state);
++ clear_bit(STRIPE_DELAYED, &sh->state);
++ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ atomic_inc(&conf->preread_active_stripes);
++ release_stripe_plug(mddev, sh);
++ }
++
++ remaining = raid5_dec_bi_active_stripes(bi);
++ if (remaining == 0) {
++ md_write_end(mddev);
++ bio_endio(bi, 0);
++ }
++}
++
++static void make_request(struct mddev *mddev, struct bio * bi)
++{
++ struct r5conf *conf = mddev->private;
++ int dd_idx;
++ sector_t new_sector;
++ sector_t logical_sector, last_sector;
++ struct stripe_head *sh;
++ const int rw = bio_data_dir(bi);
++ int remaining;
++ DEFINE_WAIT(w);
++ bool do_prepare;
++
++ if (unlikely(bi->bi_rw & REQ_FLUSH)) {
++ md_flush_request(mddev, bi);
++ return;
++ }
++
++ md_write_start(mddev, bi);
++
++ /*
++ * If array is degraded, better not do chunk aligned read because
++ * later we might have to read it again in order to reconstruct
++ * data on failed drives.
++ */
++ if (rw == READ && mddev->degraded == 0 &&
++ mddev->reshape_position == MaxSector &&
++ chunk_aligned_read(mddev,bi))
++ return;
++
++ if (unlikely(bi->bi_rw & REQ_DISCARD)) {
++ make_discard_request(mddev, bi);
++ return;
++ }
++
++ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
++ last_sector = bio_end_sector(bi);
++ bi->bi_next = NULL;
++ bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
++
++ prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
++ for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
++ int previous;
++ int seq;
++
++ do_prepare = false;
++ retry:
++ seq = read_seqcount_begin(&conf->gen_lock);
++ previous = 0;
++ if (do_prepare)
++ prepare_to_wait(&conf->wait_for_overlap, &w,
++ TASK_UNINTERRUPTIBLE);
++ if (unlikely(conf->reshape_progress != MaxSector)) {
++ /* spinlock is needed as reshape_progress may be
++ * 64bit on a 32bit platform, and so it might be
++ * possible to see a half-updated value
++ * Of course reshape_progress could change after
++ * the lock is dropped, so once we get a reference
++ * to the stripe that we think it is, we will have
++ * to check again.
++ */
++ spin_lock_irq(&conf->device_lock);
++ if (mddev->reshape_backwards
++ ? logical_sector < conf->reshape_progress
++ : logical_sector >= conf->reshape_progress) {
++ previous = 1;
++ } else {
++ if (mddev->reshape_backwards
++ ? logical_sector < conf->reshape_safe
++ : logical_sector >= conf->reshape_safe) {
++ spin_unlock_irq(&conf->device_lock);
++ schedule();
++ do_prepare = true;
++ goto retry;
++ }
++ }
++ spin_unlock_irq(&conf->device_lock);
++ }
++
++ new_sector = raid5_compute_sector(conf, logical_sector,
++ previous,
++ &dd_idx, NULL);
++ pr_debug("raid456: make_request, sector %llu logical %llu\n",
++ (unsigned long long)new_sector,
++ (unsigned long long)logical_sector);
++
++ sh = get_active_stripe(conf, new_sector, previous,
++ (bi->bi_rw&RWA_MASK), 0);
++ if (sh) {
++ if (unlikely(previous)) {
++ /* expansion might have moved on while waiting for a
++ * stripe, so we must do the range check again.
++ * Expansion could still move past after this
++ * test, but as we are holding a reference to
++ * 'sh', we know that if that happens,
++ * STRIPE_EXPANDING will get set and the expansion
++ * won't proceed until we finish with the stripe.
++ */
++ int must_retry = 0;
++ spin_lock_irq(&conf->device_lock);
++ if (mddev->reshape_backwards
++ ? logical_sector >= conf->reshape_progress
++ : logical_sector < conf->reshape_progress)
++ /* mismatch, need to try again */
++ must_retry = 1;
++ spin_unlock_irq(&conf->device_lock);
++ if (must_retry) {
++ release_stripe(sh);
++ schedule();
++ do_prepare = true;
++ goto retry;
++ }
++ }
++ if (read_seqcount_retry(&conf->gen_lock, seq)) {
++ /* Might have got the wrong stripe_head
++ * by accident
++ */
++ release_stripe(sh);
++ goto retry;
++ }
++
++ if (rw == WRITE &&
++ logical_sector >= mddev->suspend_lo &&
++ logical_sector < mddev->suspend_hi) {
++ release_stripe(sh);
++ /* As the suspend_* range is controlled by
++ * userspace, we want an interruptible
++ * wait.
++ */
++ flush_signals(current);
++ prepare_to_wait(&conf->wait_for_overlap,
++ &w, TASK_INTERRUPTIBLE);
++ if (logical_sector >= mddev->suspend_lo &&
++ logical_sector < mddev->suspend_hi) {
++ schedule();
++ do_prepare = true;
++ }
++ goto retry;
++ }
++
++ if (test_bit(STRIPE_EXPANDING, &sh->state) ||
++ !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
++ /* Stripe is busy expanding or
++ * add failed due to overlap. Flush everything
++ * and wait a while
++ */
++ md_wakeup_thread(mddev->thread);
++ release_stripe(sh);
++ schedule();
++ do_prepare = true;
++ goto retry;
++ }
++ set_bit(STRIPE_HANDLE, &sh->state);
++ clear_bit(STRIPE_DELAYED, &sh->state);
++ if ((!sh->batch_head || sh == sh->batch_head) &&
++ (bi->bi_rw & REQ_SYNC) &&
++ !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ atomic_inc(&conf->preread_active_stripes);
++ release_stripe_plug(mddev, sh);
++ } else {
++ /* cannot get stripe for read-ahead, just give-up */
++ clear_bit(BIO_UPTODATE, &bi->bi_flags);
++ break;
++ }
++ }
++ finish_wait(&conf->wait_for_overlap, &w);
++
++ remaining = raid5_dec_bi_active_stripes(bi);
++ if (remaining == 0) {
++
++ if ( rw == WRITE )
++ md_write_end(mddev);
++
++ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
++ bi, 0);
++ bio_endio(bi, 0);
++ }
++}
++
++static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
++
++static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
++{
++ /* reshaping is quite different to recovery/resync so it is
++ * handled quite separately ... here.
++ *
++ * On each call to sync_request, we gather one chunk worth of
++ * destination stripes and flag them as expanding.
++ * Then we find all the source stripes and request reads.
++ * As the reads complete, handle_stripe will copy the data
++ * into the destination stripe and release that stripe.
++ */
++ struct r5conf *conf = mddev->private;
++ struct stripe_head *sh;
++ sector_t first_sector, last_sector;
++ int raid_disks = conf->previous_raid_disks;
++ int data_disks = raid_disks - conf->max_degraded;
++ int new_data_disks = conf->raid_disks - conf->max_degraded;
++ int i;
++ int dd_idx;
++ sector_t writepos, readpos, safepos;
++ sector_t stripe_addr;
++ int reshape_sectors;
++ struct list_head stripes;
++
++ if (sector_nr == 0) {
++ /* If restarting in the middle, skip the initial sectors */
++ if (mddev->reshape_backwards &&
++ conf->reshape_progress < raid5_size(mddev, 0, 0)) {
++ sector_nr = raid5_size(mddev, 0, 0)
++ - conf->reshape_progress;
++ } else if (!mddev->reshape_backwards &&
++ conf->reshape_progress > 0)
++ sector_nr = conf->reshape_progress;
++ sector_div(sector_nr, new_data_disks);
++ if (sector_nr) {
++ mddev->curr_resync_completed = sector_nr;
++ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
++ *skipped = 1;
++ return sector_nr;
++ }
++ }
++
++ /* We need to process a full chunk at a time.
++ * If old and new chunk sizes differ, we need to process the
++ * largest of these
++ */
++ if (mddev->new_chunk_sectors > mddev->chunk_sectors)
++ reshape_sectors = mddev->new_chunk_sectors;
++ else
++ reshape_sectors = mddev->chunk_sectors;
++
++ /* We update the metadata at least every 10 seconds, or when
++ * the data about to be copied would over-write the source of
++ * the data at the front of the range. i.e. one new_stripe
++ * along from reshape_progress new_maps to after where
++ * reshape_safe old_maps to
++ */
++ writepos = conf->reshape_progress;
++ sector_div(writepos, new_data_disks);
++ readpos = conf->reshape_progress;
++ sector_div(readpos, data_disks);
++ safepos = conf->reshape_safe;
++ sector_div(safepos, data_disks);
++ if (mddev->reshape_backwards) {
++ writepos -= min_t(sector_t, reshape_sectors, writepos);
++ readpos += reshape_sectors;
++ safepos += reshape_sectors;
++ } else {
++ writepos += reshape_sectors;
++ readpos -= min_t(sector_t, reshape_sectors, readpos);
++ safepos -= min_t(sector_t, reshape_sectors, safepos);
++ }
++
++ /* Having calculated the 'writepos' possibly use it
++ * to set 'stripe_addr' which is where we will write to.
++ */
++ if (mddev->reshape_backwards) {
++ BUG_ON(conf->reshape_progress == 0);
++ stripe_addr = writepos;
++ BUG_ON((mddev->dev_sectors &
++ ~((sector_t)reshape_sectors - 1))
++ - reshape_sectors - stripe_addr
++ != sector_nr);
++ } else {
++ BUG_ON(writepos != sector_nr + reshape_sectors);
++ stripe_addr = sector_nr;
++ }
++
++ /* 'writepos' is the most advanced device address we might write.
++ * 'readpos' is the least advanced device address we might read.
++ * 'safepos' is the least address recorded in the metadata as having
++ * been reshaped.
++ * If there is a min_offset_diff, these are adjusted either by
++ * increasing the safepos/readpos if diff is negative, or
++ * increasing writepos if diff is positive.
++ * If 'readpos' is then behind 'writepos', there is no way that we can
++ * ensure safety in the face of a crash - that must be done by userspace
++ * making a backup of the data. So in that case there is no particular
++ * rush to update metadata.
++ * Otherwise if 'safepos' is behind 'writepos', then we really need to
++ * update the metadata to advance 'safepos' to match 'readpos' so that
++ * we can be safe in the event of a crash.
++ * So we insist on updating metadata if safepos is behind writepos and
++ * readpos is beyond writepos.
++ * In any case, update the metadata every 10 seconds.
++ * Maybe that number should be configurable, but I'm not sure it is
++ * worth it.... maybe it could be a multiple of safemode_delay???
++ */
++ if (conf->min_offset_diff < 0) {
++ safepos += -conf->min_offset_diff;
++ readpos += -conf->min_offset_diff;
++ } else
++ writepos += conf->min_offset_diff;
++
++ if ((mddev->reshape_backwards
++ ? (safepos > writepos && readpos < writepos)
++ : (safepos < writepos && readpos > writepos)) ||
++ time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
++ /* Cannot proceed until we've updated the superblock... */
++ wait_event(conf->wait_for_overlap,
++ atomic_read(&conf->reshape_stripes)==0
++ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
++ if (atomic_read(&conf->reshape_stripes) != 0)
++ return 0;
++ mddev->reshape_position = conf->reshape_progress;
++ mddev->curr_resync_completed = sector_nr;
++ conf->reshape_checkpoint = jiffies;
++ set_bit(MD_CHANGE_DEVS, &mddev->flags);
++ md_wakeup_thread(mddev->thread);
++ wait_event(mddev->sb_wait, mddev->flags == 0 ||
++ test_bit(MD_RECOVERY_INTR, &mddev->recovery));
++ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
++ return 0;
++ spin_lock_irq(&conf->device_lock);
++ conf->reshape_safe = mddev->reshape_position;
++ spin_unlock_irq(&conf->device_lock);
++ wake_up(&conf->wait_for_overlap);
++ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
++ }
++
++ INIT_LIST_HEAD(&stripes);
++ for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
++ int j;
++ int skipped_disk = 0;
++ sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
++ set_bit(STRIPE_EXPANDING, &sh->state);
++ atomic_inc(&conf->reshape_stripes);
++ /* If any of this stripe is beyond the end of the old
++ * array, then we need to zero those blocks
++ */
++ for (j=sh->disks; j--;) {
++ sector_t s;
++ if (j == sh->pd_idx)
++ continue;
++ if (conf->level == 6 &&
++ j == sh->qd_idx)
++ continue;
++ s = compute_blocknr(sh, j, 0);
++ if (s < raid5_size(mddev, 0, 0)) {
++ skipped_disk = 1;
++ continue;
++ }
++ memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
++ set_bit(R5_Expanded, &sh->dev[j].flags);
++ set_bit(R5_UPTODATE, &sh->dev[j].flags);
++ }
++ if (!skipped_disk) {
++ set_bit(STRIPE_EXPAND_READY, &sh->state);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ }
++ list_add(&sh->lru, &stripes);
++ }
++ spin_lock_irq(&conf->device_lock);
++ if (mddev->reshape_backwards)
++ conf->reshape_progress -= reshape_sectors * new_data_disks;
++ else
++ conf->reshape_progress += reshape_sectors * new_data_disks;
++ spin_unlock_irq(&conf->device_lock);
++ /* Ok, those stripe are ready. We can start scheduling
++ * reads on the source stripes.
++ * The source stripes are determined by mapping the first and last
++ * block on the destination stripes.
++ */
++ first_sector =
++ raid5_compute_sector(conf, stripe_addr*(new_data_disks),
++ 1, &dd_idx, NULL);
++ last_sector =
++ raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
++ * new_data_disks - 1),
++ 1, &dd_idx, NULL);
++ if (last_sector >= mddev->dev_sectors)
++ last_sector = mddev->dev_sectors - 1;
++ while (first_sector <= last_sector) {
++ sh = get_active_stripe(conf, first_sector, 1, 0, 1);
++ set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++ first_sector += STRIPE_SECTORS;
++ }
++ /* Now that the sources are clearly marked, we can release
++ * the destination stripes
++ */
++ while (!list_empty(&stripes)) {
++ sh = list_entry(stripes.next, struct stripe_head, lru);
++ list_del_init(&sh->lru);
++ release_stripe(sh);
++ }
++ /* If this takes us to the resync_max point where we have to pause,
++ * then we need to write out the superblock.
++ */
++ sector_nr += reshape_sectors;
++ if ((sector_nr - mddev->curr_resync_completed) * 2
++ >= mddev->resync_max - mddev->curr_resync_completed) {
++ /* Cannot proceed until we've updated the superblock... */
++ wait_event(conf->wait_for_overlap,
++ atomic_read(&conf->reshape_stripes) == 0
++ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
++ if (atomic_read(&conf->reshape_stripes) != 0)
++ goto ret;
++ mddev->reshape_position = conf->reshape_progress;
++ mddev->curr_resync_completed = sector_nr;
++ conf->reshape_checkpoint = jiffies;
++ set_bit(MD_CHANGE_DEVS, &mddev->flags);
++ md_wakeup_thread(mddev->thread);
++ wait_event(mddev->sb_wait,
++ !test_bit(MD_CHANGE_DEVS, &mddev->flags)
++ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
++ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
++ goto ret;
++ spin_lock_irq(&conf->device_lock);
++ conf->reshape_safe = mddev->reshape_position;
++ spin_unlock_irq(&conf->device_lock);
++ wake_up(&conf->wait_for_overlap);
++ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
++ }
++ret:
++ return reshape_sectors;
++}
++
++static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
++{
++ struct r5conf *conf = mddev->private;
++ struct stripe_head *sh;
++ sector_t max_sector = mddev->dev_sectors;
++ sector_t sync_blocks;
++ int still_degraded = 0;
++ int i;
++
++ if (sector_nr >= max_sector) {
++ /* just being told to finish up .. nothing much to do */
++
++ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
++ end_reshape(conf);
++ return 0;
++ }
++
++ if (mddev->curr_resync < max_sector) /* aborted */
++ bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
++ &sync_blocks, 1);
++ else /* completed sync */
++ conf->fullsync = 0;
++ bitmap_close_sync(mddev->bitmap);
++
++ return 0;
++ }
++
++ /* Allow raid5_quiesce to complete */
++ wait_event(conf->wait_for_overlap, conf->quiesce != 2);
++
++ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
++ return reshape_request(mddev, sector_nr, skipped);
++
++ /* No need to check resync_max as we never do more than one
++ * stripe, and as resync_max will always be on a chunk boundary,
++ * if the check in md_do_sync didn't fire, there is no chance
++ * of overstepping resync_max here
++ */
++
++ /* if there is too many failed drives and we are trying
++ * to resync, then assert that we are finished, because there is
++ * nothing we can do.
++ */
++ if (mddev->degraded >= conf->max_degraded &&
++ test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
++ sector_t rv = mddev->dev_sectors - sector_nr;
++ *skipped = 1;
++ return rv;
++ }
++ if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
++ !conf->fullsync &&
++ !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
++ sync_blocks >= STRIPE_SECTORS) {
++ /* we can skip this block, and probably more */
++ sync_blocks /= STRIPE_SECTORS;
++ *skipped = 1;
++ return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
++ }
++
++ bitmap_cond_end_sync(mddev->bitmap, sector_nr);
++
++ sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
++ if (sh == NULL) {
++ sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
++ /* make sure we don't swamp the stripe cache if someone else
++ * is trying to get access
++ */
++ schedule_timeout_uninterruptible(1);
++ }
++ /* Need to check if array will still be degraded after recovery/resync
++ * Note in case of > 1 drive failures it's possible we're rebuilding
++ * one drive while leaving another faulty drive in array.
++ */
++ rcu_read_lock();
++ for (i = 0; i < conf->raid_disks; i++) {
++ struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
++
++ if (rdev == NULL || test_bit(Faulty, &rdev->flags))
++ still_degraded = 1;
++ }
++ rcu_read_unlock();
++
++ bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
++
++ set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
++ set_bit(STRIPE_HANDLE, &sh->state);
++
++ release_stripe(sh);
++
++ return STRIPE_SECTORS;
++}
++
++static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
++{
++ /* We may not be able to submit a whole bio at once as there
++ * may not be enough stripe_heads available.
++ * We cannot pre-allocate enough stripe_heads as we may need
++ * more than exist in the cache (if we allow ever large chunks).
++ * So we do one stripe head at a time and record in
++ * ->bi_hw_segments how many have been done.
++ *
++ * We *know* that this entire raid_bio is in one chunk, so
++ * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
++ */
++ struct stripe_head *sh;
++ int dd_idx;
++ sector_t sector, logical_sector, last_sector;
++ int scnt = 0;
++ int remaining;
++ int handled = 0;
++
++ logical_sector = raid_bio->bi_iter.bi_sector &
++ ~((sector_t)STRIPE_SECTORS-1);
++ sector = raid5_compute_sector(conf, logical_sector,
++ 0, &dd_idx, NULL);
++ last_sector = bio_end_sector(raid_bio);
++
++ for (; logical_sector < last_sector;
++ logical_sector += STRIPE_SECTORS,
++ sector += STRIPE_SECTORS,
++ scnt++) {
++
++ if (scnt < raid5_bi_processed_stripes(raid_bio))
++ /* already done this stripe */
++ continue;
++
++ sh = get_active_stripe(conf, sector, 0, 1, 1);
++
++ if (!sh) {
++ /* failed to get a stripe - must wait */
++ raid5_set_bi_processed_stripes(raid_bio, scnt);
++ conf->retry_read_aligned = raid_bio;
++ return handled;
++ }
++
++ if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
++ release_stripe(sh);
++ raid5_set_bi_processed_stripes(raid_bio, scnt);
++ conf->retry_read_aligned = raid_bio;
++ return handled;
++ }
++
++ set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
++ handle_stripe(sh);
++ release_stripe(sh);
++ handled++;
++ }
++ remaining = raid5_dec_bi_active_stripes(raid_bio);
++ if (remaining == 0) {
++ trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
++ raid_bio, 0);
++ bio_endio(raid_bio, 0);
++ }
++ if (atomic_dec_and_test(&conf->active_aligned_reads))
++ wake_up(&conf->wait_for_stripe);
++ return handled;
++}
++
++static int handle_active_stripes(struct r5conf *conf, int group,
++ struct r5worker *worker,
++ struct list_head *temp_inactive_list)
++{
++ struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
++ int i, batch_size = 0, hash;
++ bool release_inactive = false;
++
++ while (batch_size < MAX_STRIPE_BATCH &&
++ (sh = __get_priority_stripe(conf, group)) != NULL)
++ batch[batch_size++] = sh;
++
++ if (batch_size == 0) {
++ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
++ if (!list_empty(temp_inactive_list + i))
++ break;
++ if (i == NR_STRIPE_HASH_LOCKS)
++ return batch_size;
++ release_inactive = true;
++ }
++ spin_unlock_irq(&conf->device_lock);
++
++ release_inactive_stripe_list(conf, temp_inactive_list,
++ NR_STRIPE_HASH_LOCKS);
++
++ if (release_inactive) {
++ spin_lock_irq(&conf->device_lock);
++ return 0;
++ }
++
++ for (i = 0; i < batch_size; i++)
++ handle_stripe(batch[i]);
++
++ cond_resched();
++
++ spin_lock_irq(&conf->device_lock);
++ for (i = 0; i < batch_size; i++) {
++ hash = batch[i]->hash_lock_index;
++ __release_stripe(conf, batch[i], &temp_inactive_list[hash]);
++ }
++ return batch_size;
++}
++
++static void raid5_do_work(struct work_struct *work)
++{
++ struct r5worker *worker = container_of(work, struct r5worker, work);
++ struct r5worker_group *group = worker->group;
++ struct r5conf *conf = group->conf;
++ int group_id = group - conf->worker_groups;
++ int handled;
++ struct blk_plug plug;
++
++ pr_debug("+++ raid5worker active\n");
++
++ blk_start_plug(&plug);
++ handled = 0;
++ spin_lock_irq(&conf->device_lock);
++ while (1) {
++ int batch_size, released;
++
++ released = release_stripe_list(conf, worker->temp_inactive_list);
++
++ batch_size = handle_active_stripes(conf, group_id, worker,
++ worker->temp_inactive_list);
++ worker->working = false;
++ if (!batch_size && !released)
++ break;
++ handled += batch_size;
++ }
++ pr_debug("%d stripes handled\n", handled);
++
++ spin_unlock_irq(&conf->device_lock);
++ blk_finish_plug(&plug);
++
++ pr_debug("--- raid5worker inactive\n");
++}
++
++/*
++ * This is our raid5 kernel thread.
++ *
++ * We scan the hash table for stripes which can be handled now.
++ * During the scan, completed stripes are saved for us by the interrupt
++ * handler, so that they will not have to wait for our next wakeup.
++ */
++static void raid5d(struct md_thread *thread)
++{
++ struct mddev *mddev = thread->mddev;
++ struct r5conf *conf = mddev->private;
++ int handled;
++ struct blk_plug plug;
++
++ pr_debug("+++ raid5d active\n");
++
++ md_check_recovery(mddev);
++
++ blk_start_plug(&plug);
++ handled = 0;
++ spin_lock_irq(&conf->device_lock);
++ while (1) {
++ struct bio *bio;
++ int batch_size, released;
++
++ released = release_stripe_list(conf, conf->temp_inactive_list);
++ if (released)
++ clear_bit(R5_DID_ALLOC, &conf->cache_state);
++
++ if (
++ !list_empty(&conf->bitmap_list)) {
++ /* Now is a good time to flush some bitmap updates */
++ conf->seq_flush++;
++ spin_unlock_irq(&conf->device_lock);
++ bitmap_unplug(mddev->bitmap);
++ spin_lock_irq(&conf->device_lock);
++ conf->seq_write = conf->seq_flush;
++ activate_bit_delay(conf, conf->temp_inactive_list);
++ }
++ raid5_activate_delayed(conf);
++
++ while ((bio = remove_bio_from_retry(conf))) {
++ int ok;
++ spin_unlock_irq(&conf->device_lock);
++ ok = retry_aligned_read(conf, bio);
++ spin_lock_irq(&conf->device_lock);
++ if (!ok)
++ break;
++ handled++;
++ }
++
++ batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
++ conf->temp_inactive_list);
++ if (!batch_size && !released)
++ break;
++ handled += batch_size;
++
++ if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
++ spin_unlock_irq(&conf->device_lock);
++ md_check_recovery(mddev);
++ spin_lock_irq(&conf->device_lock);
++ }
++ }
++ pr_debug("%d stripes handled\n", handled);
++
++ spin_unlock_irq(&conf->device_lock);
++ if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
++ mutex_trylock(&conf->cache_size_mutex)) {
++ grow_one_stripe(conf, __GFP_NOWARN);
++ /* Set flag even if allocation failed. This helps
++ * slow down allocation requests when mem is short
++ */
++ set_bit(R5_DID_ALLOC, &conf->cache_state);
++ mutex_unlock(&conf->cache_size_mutex);
++ }
++
++ async_tx_issue_pending_all();
++ blk_finish_plug(&plug);
++
++ pr_debug("--- raid5d inactive\n");
++}
++
++static ssize_t
++raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
++{
++ struct r5conf *conf;
++ int ret = 0;
++ spin_lock(&mddev->lock);
++ conf = mddev->private;
++ if (conf)
++ ret = sprintf(page, "%d\n", conf->min_nr_stripes);
++ spin_unlock(&mddev->lock);
++ return ret;
++}
++
++int
++raid5_set_cache_size(struct mddev *mddev, int size)
++{
++ struct r5conf *conf = mddev->private;
++ int err;
++
++ if (size <= 16 || size > 32768)
++ return -EINVAL;
++
++ conf->min_nr_stripes = size;
++ mutex_lock(&conf->cache_size_mutex);
++ while (size < conf->max_nr_stripes &&
++ drop_one_stripe(conf))
++ ;
++ mutex_unlock(&conf->cache_size_mutex);
++
++
++ err = md_allow_write(mddev);
++ if (err)
++ return err;
++
++ mutex_lock(&conf->cache_size_mutex);
++ while (size > conf->max_nr_stripes)
++ if (!grow_one_stripe(conf, GFP_KERNEL))
++ break;
++ mutex_unlock(&conf->cache_size_mutex);
++
++ return 0;
++}
++EXPORT_SYMBOL(raid5_set_cache_size);
++
++static ssize_t
++raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
++{
++ struct r5conf *conf;
++ unsigned long new;
++ int err;
++
++ if (len >= PAGE_SIZE)
++ return -EINVAL;
++ if (kstrtoul(page, 10, &new))
++ return -EINVAL;
++ err = mddev_lock(mddev);
++ if (err)
++ return err;
++ conf = mddev->private;
++ if (!conf)
++ err = -ENODEV;
++ else
++ err = raid5_set_cache_size(mddev, new);
++ mddev_unlock(mddev);
++
++ return err ?: len;
++}
++
++static struct md_sysfs_entry
++raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
++ raid5_show_stripe_cache_size,
++ raid5_store_stripe_cache_size);
++
++static ssize_t
++raid5_show_rmw_level(struct mddev *mddev, char *page)
++{
++ struct r5conf *conf = mddev->private;
++ if (conf)
++ return sprintf(page, "%d\n", conf->rmw_level);
++ else
++ return 0;
++}
++
++static ssize_t
++raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
++{
++ struct r5conf *conf = mddev->private;
++ unsigned long new;
++
++ if (!conf)
++ return -ENODEV;
++
++ if (len >= PAGE_SIZE)
++ return -EINVAL;
++
++ if (kstrtoul(page, 10, &new))
++ return -EINVAL;
++
++ if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
++ return -EINVAL;
++
++ if (new != PARITY_DISABLE_RMW &&
++ new != PARITY_ENABLE_RMW &&
++ new != PARITY_PREFER_RMW)
++ return -EINVAL;
++
++ conf->rmw_level = new;
++ return len;
++}
++
++static struct md_sysfs_entry
++raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
++ raid5_show_rmw_level,
++ raid5_store_rmw_level);
++
++
++static ssize_t
++raid5_show_preread_threshold(struct mddev *mddev, char *page)
++{
++ struct r5conf *conf;
++ int ret = 0;
++ spin_lock(&mddev->lock);
++ conf = mddev->private;
++ if (conf)
++ ret = sprintf(page, "%d\n", conf->bypass_threshold);
++ spin_unlock(&mddev->lock);
++ return ret;
++}
++
++static ssize_t
++raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
++{
++ struct r5conf *conf;
++ unsigned long new;
++ int err;
++
++ if (len >= PAGE_SIZE)
++ return -EINVAL;
++ if (kstrtoul(page, 10, &new))
++ return -EINVAL;
++
++ err = mddev_lock(mddev);
++ if (err)
++ return err;
++ conf = mddev->private;
++ if (!conf)
++ err = -ENODEV;
++ else if (new > conf->min_nr_stripes)
++ err = -EINVAL;
++ else
++ conf->bypass_threshold = new;
++ mddev_unlock(mddev);
++ return err ?: len;
++}
++
++static struct md_sysfs_entry
++raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
++ S_IRUGO | S_IWUSR,
++ raid5_show_preread_threshold,
++ raid5_store_preread_threshold);
++
++static ssize_t
++raid5_show_skip_copy(struct mddev *mddev, char *page)
++{
++ struct r5conf *conf;
++ int ret = 0;
++ spin_lock(&mddev->lock);
++ conf = mddev->private;
++ if (conf)
++ ret = sprintf(page, "%d\n", conf->skip_copy);
++ spin_unlock(&mddev->lock);
++ return ret;
++}
++
++static ssize_t
++raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
++{
++ struct r5conf *conf;
++ unsigned long new;
++ int err;
++
++ if (len >= PAGE_SIZE)
++ return -EINVAL;
++ if (kstrtoul(page, 10, &new))
++ return -EINVAL;
++ new = !!new;
++
++ err = mddev_lock(mddev);
++ if (err)
++ return err;
++ conf = mddev->private;
++ if (!conf)
++ err = -ENODEV;
++ else if (new != conf->skip_copy) {
++ mddev_suspend(mddev);
++ conf->skip_copy = new;
++ if (new)
++ mddev->queue->backing_dev_info.capabilities |=
++ BDI_CAP_STABLE_WRITES;
++ else
++ mddev->queue->backing_dev_info.capabilities &=
++ ~BDI_CAP_STABLE_WRITES;
++ mddev_resume(mddev);
++ }
++ mddev_unlock(mddev);
++ return err ?: len;
++}
++
++static struct md_sysfs_entry
++raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
++ raid5_show_skip_copy,
++ raid5_store_skip_copy);
++
++static ssize_t
++stripe_cache_active_show(struct mddev *mddev, char *page)
++{
++ struct r5conf *conf = mddev->private;
++ if (conf)
++ return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
++ else
++ return 0;
++}
++
++static struct md_sysfs_entry
++raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
++
++static ssize_t
++raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
++{
++ struct r5conf *conf;
++ int ret = 0;
++ spin_lock(&mddev->lock);
++ conf = mddev->private;
++ if (conf)
++ ret = sprintf(page, "%d\n", conf->worker_cnt_per_group);
++ spin_unlock(&mddev->lock);
++ return ret;
++}
++
++static int alloc_thread_groups(struct r5conf *conf, int cnt,
++ int *group_cnt,
++ int *worker_cnt_per_group,
++ struct r5worker_group **worker_groups);
++static ssize_t
++raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
++{
++ struct r5conf *conf;
++ unsigned long new;
++ int err;
++ struct r5worker_group *new_groups, *old_groups;
++ int group_cnt, worker_cnt_per_group;
++
++ if (len >= PAGE_SIZE)
++ return -EINVAL;
++ if (kstrtoul(page, 10, &new))
++ return -EINVAL;
++
++ err = mddev_lock(mddev);
++ if (err)
++ return err;
++ conf = mddev->private;
++ if (!conf)
++ err = -ENODEV;
++ else if (new != conf->worker_cnt_per_group) {
++ mddev_suspend(mddev);
++
++ old_groups = conf->worker_groups;
++ if (old_groups)
++ flush_workqueue(raid5_wq);
++
++ err = alloc_thread_groups(conf, new,
++ &group_cnt, &worker_cnt_per_group,
++ &new_groups);
++ if (!err) {
++ spin_lock_irq(&conf->device_lock);
++ conf->group_cnt = group_cnt;
++ conf->worker_cnt_per_group = worker_cnt_per_group;
++ conf->worker_groups = new_groups;
++ spin_unlock_irq(&conf->device_lock);
++
++ if (old_groups)
++ kfree(old_groups[0].workers);
++ kfree(old_groups);
++ }
++ mddev_resume(mddev);
++ }
++ mddev_unlock(mddev);
++
++ return err ?: len;
++}
++
++static struct md_sysfs_entry
++raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
++ raid5_show_group_thread_cnt,
++ raid5_store_group_thread_cnt);
++
++static struct attribute *raid5_attrs[] = {
++ &raid5_stripecache_size.attr,
++ &raid5_stripecache_active.attr,
++ &raid5_preread_bypass_threshold.attr,
++ &raid5_group_thread_cnt.attr,
++ &raid5_skip_copy.attr,
++ &raid5_rmw_level.attr,
++ NULL,
++};
++static struct attribute_group raid5_attrs_group = {
++ .name = NULL,
++ .attrs = raid5_attrs,
++};
++
++static int alloc_thread_groups(struct r5conf *conf, int cnt,
++ int *group_cnt,
++ int *worker_cnt_per_group,
++ struct r5worker_group **worker_groups)
++{
++ int i, j, k;
++ ssize_t size;
++ struct r5worker *workers;
++
++ *worker_cnt_per_group = cnt;
++ if (cnt == 0) {
++ *group_cnt = 0;
++ *worker_groups = NULL;
++ return 0;
++ }
++ *group_cnt = num_possible_nodes();
++ size = sizeof(struct r5worker) * cnt;
++ workers = kzalloc(size * *group_cnt, GFP_NOIO);
++ *worker_groups = kzalloc(sizeof(struct r5worker_group) *
++ *group_cnt, GFP_NOIO);
++ if (!*worker_groups || !workers) {
++ kfree(workers);
++ kfree(*worker_groups);
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < *group_cnt; i++) {
++ struct r5worker_group *group;
++
++ group = &(*worker_groups)[i];
++ INIT_LIST_HEAD(&group->handle_list);
++ group->conf = conf;
++ group->workers = workers + i * cnt;
++
++ for (j = 0; j < cnt; j++) {
++ struct r5worker *worker = group->workers + j;
++ worker->group = group;
++ INIT_WORK(&worker->work, raid5_do_work);
++
++ for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
++ INIT_LIST_HEAD(worker->temp_inactive_list + k);
++ }
++ }
++
++ return 0;
++}
++
++static void free_thread_groups(struct r5conf *conf)
++{
++ if (conf->worker_groups)
++ kfree(conf->worker_groups[0].workers);
++ kfree(conf->worker_groups);
++ conf->worker_groups = NULL;
++}
++
++static sector_t
++raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
++{
++ struct r5conf *conf = mddev->private;
++
++ if (!sectors)
++ sectors = mddev->dev_sectors;
++ if (!raid_disks)
++ /* size is defined by the smallest of previous and new size */
++ raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
++
++ sectors &= ~((sector_t)mddev->chunk_sectors - 1);
++ sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
++ return sectors * (raid_disks - conf->max_degraded);
++}
++
++static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
++{
++ safe_put_page(percpu->spare_page);
++ if (percpu->scribble)
++ flex_array_free(percpu->scribble);
++ percpu->spare_page = NULL;
++ percpu->scribble = NULL;
++}
++
++static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
++{
++ if (conf->level == 6 && !percpu->spare_page)
++ percpu->spare_page = alloc_page(GFP_KERNEL);
++ if (!percpu->scribble)
++ percpu->scribble = scribble_alloc(max(conf->raid_disks,
++ conf->previous_raid_disks),
++ max(conf->chunk_sectors,
++ conf->prev_chunk_sectors)
++ / STRIPE_SECTORS,
++ GFP_KERNEL);
++
++ if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
++ free_scratch_buffer(conf, percpu);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static void raid5_free_percpu(struct r5conf *conf)
++{
++ unsigned long cpu;
++
++ if (!conf->percpu)
++ return;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ unregister_cpu_notifier(&conf->cpu_notify);
++#endif
++
++ get_online_cpus();
++ for_each_possible_cpu(cpu)
++ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
++ put_online_cpus();
++
++ free_percpu(conf->percpu);
++}
++
++static void free_conf(struct r5conf *conf)
++{
++ if (conf->shrinker.seeks)
++ unregister_shrinker(&conf->shrinker);
++ free_thread_groups(conf);
++ shrink_stripes(conf);
++ raid5_free_percpu(conf);
++ kfree(conf->disks);
++ kfree(conf->stripe_hashtbl);
++ kfree(conf);
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
++ void *hcpu)
++{
++ struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
++ long cpu = (long)hcpu;
++ struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
++
++ switch (action) {
++ case CPU_UP_PREPARE:
++ case CPU_UP_PREPARE_FROZEN:
++ if (alloc_scratch_buffer(conf, percpu)) {
++ pr_err("%s: failed memory allocation for cpu%ld\n",
++ __func__, cpu);
++ return notifier_from_errno(-ENOMEM);
++ }
++ break;
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
++ break;
++ default:
++ break;
++ }
++ return NOTIFY_OK;
++}
++#endif
++
++static int raid5_alloc_percpu(struct r5conf *conf)
++{
++ unsigned long cpu;
++ int err = 0;
++
++ conf->percpu = alloc_percpu(struct raid5_percpu);
++ if (!conf->percpu)
++ return -ENOMEM;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ conf->cpu_notify.notifier_call = raid456_cpu_notify;
++ conf->cpu_notify.priority = 0;
++ err = register_cpu_notifier(&conf->cpu_notify);
++ if (err)
++ return err;
++#endif
++
++ get_online_cpus();
++ for_each_present_cpu(cpu) {
++ err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
++ if (err) {
++ pr_err("%s: failed memory allocation for cpu%ld\n",
++ __func__, cpu);
++ break;
++ }
++ }
++ put_online_cpus();
++
++ return err;
++}
++
++static unsigned long raid5_cache_scan(struct shrinker *shrink,
++ struct shrink_control *sc)
++{
++ struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
++ unsigned long ret = SHRINK_STOP;
++
++ if (mutex_trylock(&conf->cache_size_mutex)) {
++ ret= 0;
++ while (ret < sc->nr_to_scan &&
++ conf->max_nr_stripes > conf->min_nr_stripes) {
++ if (drop_one_stripe(conf) == 0) {
++ ret = SHRINK_STOP;
++ break;
++ }
++ ret++;
++ }
++ mutex_unlock(&conf->cache_size_mutex);
++ }
++ return ret;
++}
++
++static unsigned long raid5_cache_count(struct shrinker *shrink,
++ struct shrink_control *sc)
++{
++ struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
++
++ if (conf->max_nr_stripes < conf->min_nr_stripes)
++ /* unlikely, but not impossible */
++ return 0;
++ return conf->max_nr_stripes - conf->min_nr_stripes;
++}
++
++static struct r5conf *setup_conf(struct mddev *mddev)
++{
++ struct r5conf *conf;
++ int raid_disk, memory, max_disks;
++ struct md_rdev *rdev;
++ struct disk_info *disk;
++ char pers_name[6];
++ int i;
++ int group_cnt, worker_cnt_per_group;
++ struct r5worker_group *new_group;
++
++ if (mddev->new_level != 5
++ && mddev->new_level != 4
++ && mddev->new_level != 6) {
++ printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
++ mdname(mddev), mddev->new_level);
++ return ERR_PTR(-EIO);
++ }
++ if ((mddev->new_level == 5
++ && !algorithm_valid_raid5(mddev->new_layout)) ||
++ (mddev->new_level == 6
++ && !algorithm_valid_raid6(mddev->new_layout))) {
++ printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
++ mdname(mddev), mddev->new_layout);
++ return ERR_PTR(-EIO);
++ }
++ if (mddev->new_level == 6 && mddev->raid_disks < 4) {
++ printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
++ mdname(mddev), mddev->raid_disks);
++ return ERR_PTR(-EINVAL);
++ }
++
++ if (!mddev->new_chunk_sectors ||
++ (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
++ !is_power_of_2(mddev->new_chunk_sectors)) {
++ printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
++ mdname(mddev), mddev->new_chunk_sectors << 9);
++ return ERR_PTR(-EINVAL);
++ }
++
++ conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
++ if (conf == NULL)
++ goto abort;
++ /* Don't enable multi-threading by default*/
++ if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
++ &new_group)) {
++ conf->group_cnt = group_cnt;
++ conf->worker_cnt_per_group = worker_cnt_per_group;
++ conf->worker_groups = new_group;
++ } else
++ goto abort;
++ spin_lock_init(&conf->device_lock);
++ seqcount_init(&conf->gen_lock);
++ mutex_init(&conf->cache_size_mutex);
++ init_waitqueue_head(&conf->wait_for_stripe);
++ init_waitqueue_head(&conf->wait_for_overlap);
++ INIT_LIST_HEAD(&conf->handle_list);
++ INIT_LIST_HEAD(&conf->hold_list);
++ INIT_LIST_HEAD(&conf->delayed_list);
++ INIT_LIST_HEAD(&conf->bitmap_list);
++ init_llist_head(&conf->released_stripes);
++ atomic_set(&conf->active_stripes, 0);
++ atomic_set(&conf->preread_active_stripes, 0);
++ atomic_set(&conf->active_aligned_reads, 0);
++ conf->bypass_threshold = BYPASS_THRESHOLD;
++ conf->recovery_disabled = mddev->recovery_disabled - 1;
++
++ conf->raid_disks = mddev->raid_disks;
++ if (mddev->reshape_position == MaxSector)
++ conf->previous_raid_disks = mddev->raid_disks;
++ else
++ conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
++ max_disks = max(conf->raid_disks, conf->previous_raid_disks);
++
++ conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
++ GFP_KERNEL);
++ if (!conf->disks)
++ goto abort;
++
++ conf->mddev = mddev;
++
++ if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
++ goto abort;
++
++ /* We init hash_locks[0] separately to that it can be used
++ * as the reference lock in the spin_lock_nest_lock() call
++ * in lock_all_device_hash_locks_irq in order to convince
++ * lockdep that we know what we are doing.
++ */
++ spin_lock_init(conf->hash_locks);
++ for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
++ spin_lock_init(conf->hash_locks + i);
++
++ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
++ INIT_LIST_HEAD(conf->inactive_list + i);
++
++ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
++ INIT_LIST_HEAD(conf->temp_inactive_list + i);
++
++ conf->level = mddev->new_level;
++ conf->chunk_sectors = mddev->new_chunk_sectors;
++ if (raid5_alloc_percpu(conf) != 0)
++ goto abort;
++
++ pr_debug("raid456: run(%s) called.\n", mdname(mddev));
++
++ rdev_for_each(rdev, mddev) {
++ raid_disk = rdev->raid_disk;
++ if (raid_disk >= max_disks
++ || raid_disk < 0)
++ continue;
++ disk = conf->disks + raid_disk;
++
++ if (test_bit(Replacement, &rdev->flags)) {
++ if (disk->replacement)
++ goto abort;
++ disk->replacement = rdev;
++ } else {
++ if (disk->rdev)
++ goto abort;
++ disk->rdev = rdev;
++ }
++
++ if (test_bit(In_sync, &rdev->flags)) {
++ char b[BDEVNAME_SIZE];
++ printk(KERN_INFO "md/raid:%s: device %s operational as raid"
++ " disk %d\n",
++ mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
++ } else if (rdev->saved_raid_disk != raid_disk)
++ /* Cannot rely on bitmap to complete recovery */
++ conf->fullsync = 1;
++ }
++
++ conf->level = mddev->new_level;
++ if (conf->level == 6) {
++ conf->max_degraded = 2;
++ if (raid6_call.xor_syndrome)
++ conf->rmw_level = PARITY_ENABLE_RMW;
++ else
++ conf->rmw_level = PARITY_DISABLE_RMW;
++ } else {
++ conf->max_degraded = 1;
++ conf->rmw_level = PARITY_ENABLE_RMW;
++ }
++ conf->algorithm = mddev->new_layout;
++ conf->reshape_progress = mddev->reshape_position;
++ if (conf->reshape_progress != MaxSector) {
++ conf->prev_chunk_sectors = mddev->chunk_sectors;
++ conf->prev_algo = mddev->layout;
++ }
++
++ conf->min_nr_stripes = NR_STRIPES;
++ memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
++ max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
++ atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
++ if (grow_stripes(conf, conf->min_nr_stripes)) {
++ printk(KERN_ERR
++ "md/raid:%s: couldn't allocate %dkB for buffers\n",
++ mdname(mddev), memory);
++ goto abort;
++ } else
++ printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
++ mdname(mddev), memory);
++ /*
++ * Losing a stripe head costs more than the time to refill it,
++ * it reduces the queue depth and so can hurt throughput.
++ * So set it rather large, scaled by number of devices.
++ */
++ conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
++ conf->shrinker.scan_objects = raid5_cache_scan;
++ conf->shrinker.count_objects = raid5_cache_count;
++ conf->shrinker.batch = 128;
++ conf->shrinker.flags = 0;
++ register_shrinker(&conf->shrinker);
++
++ sprintf(pers_name, "raid%d", mddev->new_level);
++ conf->thread = md_register_thread(raid5d, mddev, pers_name);
++ if (!conf->thread) {
++ printk(KERN_ERR
++ "md/raid:%s: couldn't allocate thread.\n",
++ mdname(mddev));
++ goto abort;
++ }
++
++ return conf;
++
++ abort:
++ if (conf) {
++ free_conf(conf);
++ return ERR_PTR(-EIO);
++ } else
++ return ERR_PTR(-ENOMEM);
++}
++
++static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
++{
++ switch (algo) {
++ case ALGORITHM_PARITY_0:
++ if (raid_disk < max_degraded)
++ return 1;
++ break;
++ case ALGORITHM_PARITY_N:
++ if (raid_disk >= raid_disks - max_degraded)
++ return 1;
++ break;
++ case ALGORITHM_PARITY_0_6:
++ if (raid_disk == 0 ||
++ raid_disk == raid_disks - 1)
++ return 1;
++ break;
++ case ALGORITHM_LEFT_ASYMMETRIC_6:
++ case ALGORITHM_RIGHT_ASYMMETRIC_6:
++ case ALGORITHM_LEFT_SYMMETRIC_6:
++ case ALGORITHM_RIGHT_SYMMETRIC_6:
++ if (raid_disk == raid_disks - 1)
++ return 1;
++ }
++ return 0;
++}
++
++static int run(struct mddev *mddev)
++{
++ struct r5conf *conf;
++ int working_disks = 0;
++ int dirty_parity_disks = 0;
++ struct md_rdev *rdev;
++ sector_t reshape_offset = 0;
++ int i;
++ long long min_offset_diff = 0;
++ int first = 1;
++
++ if (mddev->recovery_cp != MaxSector)
++ printk(KERN_NOTICE "md/raid:%s: not clean"
++ " -- starting background reconstruction\n",
++ mdname(mddev));
++
++ rdev_for_each(rdev, mddev) {
++ long long diff;
++ if (rdev->raid_disk < 0)
++ continue;
++ diff = (rdev->new_data_offset - rdev->data_offset);
++ if (first) {
++ min_offset_diff = diff;
++ first = 0;
++ } else if (mddev->reshape_backwards &&
++ diff < min_offset_diff)
++ min_offset_diff = diff;
++ else if (!mddev->reshape_backwards &&
++ diff > min_offset_diff)
++ min_offset_diff = diff;
++ }
++
++ if (mddev->reshape_position != MaxSector) {
++ /* Check that we can continue the reshape.
++ * Difficulties arise if the stripe we would write to
++ * next is at or after the stripe we would read from next.
++ * For a reshape that changes the number of devices, this
++ * is only possible for a very short time, and mdadm makes
++ * sure that time appears to have past before assembling
++ * the array. So we fail if that time hasn't passed.
++ * For a reshape that keeps the number of devices the same
++ * mdadm must be monitoring the reshape can keeping the
++ * critical areas read-only and backed up. It will start
++ * the array in read-only mode, so we check for that.
++ */
++ sector_t here_new, here_old;
++ int old_disks;
++ int max_degraded = (mddev->level == 6 ? 2 : 1);
++
++ if (mddev->new_level != mddev->level) {
++ printk(KERN_ERR "md/raid:%s: unsupported reshape "
++ "required - aborting.\n",
++ mdname(mddev));
++ return -EINVAL;
++ }
++ old_disks = mddev->raid_disks - mddev->delta_disks;
++ /* reshape_position must be on a new-stripe boundary, and one
++ * further up in new geometry must map after here in old
++ * geometry.
++ */
++ here_new = mddev->reshape_position;
++ if (sector_div(here_new, mddev->new_chunk_sectors *
++ (mddev->raid_disks - max_degraded))) {
++ printk(KERN_ERR "md/raid:%s: reshape_position not "
++ "on a stripe boundary\n", mdname(mddev));
++ return -EINVAL;
++ }
++ reshape_offset = here_new * mddev->new_chunk_sectors;
++ /* here_new is the stripe we will write to */
++ here_old = mddev->reshape_position;
++ sector_div(here_old, mddev->chunk_sectors *
++ (old_disks-max_degraded));
++ /* here_old is the first stripe that we might need to read
++ * from */
++ if (mddev->delta_disks == 0) {
++ if ((here_new * mddev->new_chunk_sectors !=
++ here_old * mddev->chunk_sectors)) {
++ printk(KERN_ERR "md/raid:%s: reshape position is"
++ " confused - aborting\n", mdname(mddev));
++ return -EINVAL;
++ }
++ /* We cannot be sure it is safe to start an in-place
++ * reshape. It is only safe if user-space is monitoring
++ * and taking constant backups.
++ * mdadm always starts a situation like this in
++ * readonly mode so it can take control before
++ * allowing any writes. So just check for that.
++ */
++ if (abs(min_offset_diff) >= mddev->chunk_sectors &&
++ abs(min_offset_diff) >= mddev->new_chunk_sectors)
++ /* not really in-place - so OK */;
++ else if (mddev->ro == 0) {
++ printk(KERN_ERR "md/raid:%s: in-place reshape "
++ "must be started in read-only mode "
++ "- aborting\n",
++ mdname(mddev));
++ return -EINVAL;
++ }
++ } else if (mddev->reshape_backwards
++ ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
++ here_old * mddev->chunk_sectors)
++ : (here_new * mddev->new_chunk_sectors >=
++ here_old * mddev->chunk_sectors + (-min_offset_diff))) {
++ /* Reading from the same stripe as writing to - bad */
++ printk(KERN_ERR "md/raid:%s: reshape_position too early for "
++ "auto-recovery - aborting.\n",
++ mdname(mddev));
++ return -EINVAL;
++ }
++ printk(KERN_INFO "md/raid:%s: reshape will continue\n",
++ mdname(mddev));
++ /* OK, we should be able to continue; */
++ } else {
++ BUG_ON(mddev->level != mddev->new_level);
++ BUG_ON(mddev->layout != mddev->new_layout);
++ BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
++ BUG_ON(mddev->delta_disks != 0);
++ }
++
++ if (mddev->private == NULL)
++ conf = setup_conf(mddev);
++ else
++ conf = mddev->private;
++
++ if (IS_ERR(conf))
++ return PTR_ERR(conf);
++
++ conf->min_offset_diff = min_offset_diff;
++ mddev->thread = conf->thread;
++ conf->thread = NULL;
++ mddev->private = conf;
++
++ for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
++ i++) {
++ rdev = conf->disks[i].rdev;
++ if (!rdev && conf->disks[i].replacement) {
++ /* The replacement is all we have yet */
++ rdev = conf->disks[i].replacement;
++ conf->disks[i].replacement = NULL;
++ clear_bit(Replacement, &rdev->flags);
++ conf->disks[i].rdev = rdev;
++ }
++ if (!rdev)
++ continue;
++ if (conf->disks[i].replacement &&
++ conf->reshape_progress != MaxSector) {
++ /* replacements and reshape simply do not mix. */
++ printk(KERN_ERR "md: cannot handle concurrent "
++ "replacement and reshape.\n");
++ goto abort;
++ }
++ if (test_bit(In_sync, &rdev->flags)) {
++ working_disks++;
++ continue;
++ }
++ /* This disc is not fully in-sync. However if it
++ * just stored parity (beyond the recovery_offset),
++ * when we don't need to be concerned about the
++ * array being dirty.
++ * When reshape goes 'backwards', we never have
++ * partially completed devices, so we only need
++ * to worry about reshape going forwards.
++ */
++ /* Hack because v0.91 doesn't store recovery_offset properly. */
++ if (mddev->major_version == 0 &&
++ mddev->minor_version > 90)
++ rdev->recovery_offset = reshape_offset;
++
++ if (rdev->recovery_offset < reshape_offset) {
++ /* We need to check old and new layout */
++ if (!only_parity(rdev->raid_disk,
++ conf->algorithm,
++ conf->raid_disks,
++ conf->max_degraded))
++ continue;
++ }
++ if (!only_parity(rdev->raid_disk,
++ conf->prev_algo,
++ conf->previous_raid_disks,
++ conf->max_degraded))
++ continue;
++ dirty_parity_disks++;
++ }
++
++ /*
++ * 0 for a fully functional array, 1 or 2 for a degraded array.
++ */
++ mddev->degraded = calc_degraded(conf);
++
++ if (has_failed(conf)) {
++ printk(KERN_ERR "md/raid:%s: not enough operational devices"
++ " (%d/%d failed)\n",
++ mdname(mddev), mddev->degraded, conf->raid_disks);
++ goto abort;
++ }
++
++ /* device size must be a multiple of chunk size */
++ mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
++ mddev->resync_max_sectors = mddev->dev_sectors;
++
++ if (mddev->degraded > dirty_parity_disks &&
++ mddev->recovery_cp != MaxSector) {
++ if (mddev->ok_start_degraded)
++ printk(KERN_WARNING
++ "md/raid:%s: starting dirty degraded array"
++ " - data corruption possible.\n",
++ mdname(mddev));
++ else {
++ printk(KERN_ERR
++ "md/raid:%s: cannot start dirty degraded array.\n",
++ mdname(mddev));
++ goto abort;
++ }
++ }
++
++ if (mddev->degraded == 0)
++ printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
++ " devices, algorithm %d\n", mdname(mddev), conf->level,
++ mddev->raid_disks-mddev->degraded, mddev->raid_disks,
++ mddev->new_layout);
++ else
++ printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
++ " out of %d devices, algorithm %d\n",
++ mdname(mddev), conf->level,
++ mddev->raid_disks - mddev->degraded,
++ mddev->raid_disks, mddev->new_layout);
++
++ print_raid5_conf(conf);
++
++ if (conf->reshape_progress != MaxSector) {
++ conf->reshape_safe = conf->reshape_progress;
++ atomic_set(&conf->reshape_stripes, 0);
++ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
++ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
++ set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
++ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
++ mddev->sync_thread = md_register_thread(md_do_sync, mddev,
++ "reshape");
++ }
++
++ /* Ok, everything is just fine now */
++ if (mddev->to_remove == &raid5_attrs_group)
++ mddev->to_remove = NULL;
++ else if (mddev->kobj.sd &&
++ sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
++ printk(KERN_WARNING
++ "raid5: failed to create sysfs attributes for %s\n",
++ mdname(mddev));
++ md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
++
++ if (mddev->queue) {
++ int chunk_size;
++ bool discard_supported = true;
++ /* read-ahead size must cover two whole stripes, which
++ * is 2 * (datadisks) * chunksize where 'n' is the
++ * number of raid devices
++ */
++ int data_disks = conf->previous_raid_disks - conf->max_degraded;
++ int stripe = data_disks *
++ ((mddev->chunk_sectors << 9) / PAGE_SIZE);
++ if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
++ mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
++
++ chunk_size = mddev->chunk_sectors << 9;
++ blk_queue_io_min(mddev->queue, chunk_size);
++ blk_queue_io_opt(mddev->queue, chunk_size *
++ (conf->raid_disks - conf->max_degraded));
++ mddev->queue->limits.raid_partial_stripes_expensive = 1;
++ /*
++ * We can only discard a whole stripe. It doesn't make sense to
++ * discard data disk but write parity disk
++ */
++ stripe = stripe * PAGE_SIZE;
++ /* Round up to power of 2, as discard handling
++ * currently assumes that */
++ while ((stripe-1) & stripe)
++ stripe = (stripe | (stripe-1)) + 1;
++ mddev->queue->limits.discard_alignment = stripe;
++ mddev->queue->limits.discard_granularity = stripe;
++ /*
++ * unaligned part of discard request will be ignored, so can't
++ * guarantee discard_zeroes_data
++ */
++ mddev->queue->limits.discard_zeroes_data = 0;
++
++ blk_queue_max_write_same_sectors(mddev->queue, 0);
++
++ rdev_for_each(rdev, mddev) {
++ disk_stack_limits(mddev->gendisk, rdev->bdev,
++ rdev->data_offset << 9);
++ disk_stack_limits(mddev->gendisk, rdev->bdev,
++ rdev->new_data_offset << 9);
++ /*
++ * discard_zeroes_data is required, otherwise data
++ * could be lost. Consider a scenario: discard a stripe
++ * (the stripe could be inconsistent if
++ * discard_zeroes_data is 0); write one disk of the
++ * stripe (the stripe could be inconsistent again
++ * depending on which disks are used to calculate
++ * parity); the disk is broken; The stripe data of this
++ * disk is lost.
++ */
++ if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
++ !bdev_get_queue(rdev->bdev)->
++ limits.discard_zeroes_data)
++ discard_supported = false;
++ /* Unfortunately, discard_zeroes_data is not currently
++ * a guarantee - just a hint. So we only allow DISCARD
++ * if the sysadmin has confirmed that only safe devices
++ * are in use by setting a module parameter.
++ */
++ if (!devices_handle_discard_safely) {
++ if (discard_supported) {
++ pr_info("md/raid456: discard support disabled due to uncertainty.\n");
++ pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
++ }
++ discard_supported = false;
++ }
++ }
++
++ if (discard_supported &&
++ mddev->queue->limits.max_discard_sectors >= stripe &&
++ mddev->queue->limits.discard_granularity >= stripe)
++ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
++ mddev->queue);
++ else
++ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
++ mddev->queue);
++ }
++
++ return 0;
++abort:
++ md_unregister_thread(&mddev->thread);
++ print_raid5_conf(conf);
++ free_conf(conf);
++ mddev->private = NULL;
++ printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
++ return -EIO;
++}
++
++static void raid5_free(struct mddev *mddev, void *priv)
++{
++ struct r5conf *conf = priv;
++
++ free_conf(conf);
++ mddev->to_remove = &raid5_attrs_group;
++}
++
++static void status(struct seq_file *seq, struct mddev *mddev)
++{
++ struct r5conf *conf = mddev->private;
++ int i;
++
++ seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
++ mddev->chunk_sectors / 2, mddev->layout);
++ seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
++ for (i = 0; i < conf->raid_disks; i++)
++ seq_printf (seq, "%s",
++ conf->disks[i].rdev &&
++ test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
++ seq_printf (seq, "]");
++}
++
++static void print_raid5_conf (struct r5conf *conf)
++{
++ int i;
++ struct disk_info *tmp;
++
++ printk(KERN_DEBUG "RAID conf printout:\n");
++ if (!conf) {
++ printk("(conf==NULL)\n");
++ return;
++ }
++ printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
++ conf->raid_disks,
++ conf->raid_disks - conf->mddev->degraded);
++
++ for (i = 0; i < conf->raid_disks; i++) {
++ char b[BDEVNAME_SIZE];
++ tmp = conf->disks + i;
++ if (tmp->rdev)
++ printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
++ i, !test_bit(Faulty, &tmp->rdev->flags),
++ bdevname(tmp->rdev->bdev, b));
++ }
++}
++
++static int raid5_spare_active(struct mddev *mddev)
++{
++ int i;
++ struct r5conf *conf = mddev->private;
++ struct disk_info *tmp;
++ int count = 0;
++ unsigned long flags;
++
++ for (i = 0; i < conf->raid_disks; i++) {
++ tmp = conf->disks + i;
++ if (tmp->replacement
++ && tmp->replacement->recovery_offset == MaxSector
++ && !test_bit(Faulty, &tmp->replacement->flags)
++ && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
++ /* Replacement has just become active. */
++ if (!tmp->rdev
++ || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
++ count++;
++ if (tmp->rdev) {
++ /* Replaced device not technically faulty,
++ * but we need to be sure it gets removed
++ * and never re-added.
++ */
++ set_bit(Faulty, &tmp->rdev->flags);
++ sysfs_notify_dirent_safe(
++ tmp->rdev->sysfs_state);
++ }
++ sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
++ } else if (tmp->rdev
++ && tmp->rdev->recovery_offset == MaxSector
++ && !test_bit(Faulty, &tmp->rdev->flags)
++ && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
++ count++;
++ sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
++ }
++ }
++ spin_lock_irqsave(&conf->device_lock, flags);
++ mddev->degraded = calc_degraded(conf);
++ spin_unlock_irqrestore(&conf->device_lock, flags);
++ print_raid5_conf(conf);
++ return count;
++}
++
++static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
++{
++ struct r5conf *conf = mddev->private;
++ int err = 0;
++ int number = rdev->raid_disk;
++ struct md_rdev **rdevp;
++ struct disk_info *p = conf->disks + number;
++
++ print_raid5_conf(conf);
++ if (rdev == p->rdev)
++ rdevp = &p->rdev;
++ else if (rdev == p->replacement)
++ rdevp = &p->replacement;
++ else
++ return 0;
++
++ if (number >= conf->raid_disks &&
++ conf->reshape_progress == MaxSector)
++ clear_bit(In_sync, &rdev->flags);
++
++ if (test_bit(In_sync, &rdev->flags) ||
++ atomic_read(&rdev->nr_pending)) {
++ err = -EBUSY;
++ goto abort;
++ }
++ /* Only remove non-faulty devices if recovery
++ * isn't possible.
++ */
++ if (!test_bit(Faulty, &rdev->flags) &&
++ mddev->recovery_disabled != conf->recovery_disabled &&
++ !has_failed(conf) &&
++ (!p->replacement || p->replacement == rdev) &&
++ number < conf->raid_disks) {
++ err = -EBUSY;
++ goto abort;
++ }
++ *rdevp = NULL;
++ synchronize_rcu();
++ if (atomic_read(&rdev->nr_pending)) {
++ /* lost the race, try later */
++ err = -EBUSY;
++ *rdevp = rdev;
++ } else if (p->replacement) {
++ /* We must have just cleared 'rdev' */
++ p->rdev = p->replacement;
++ clear_bit(Replacement, &p->replacement->flags);
++ smp_mb(); /* Make sure other CPUs may see both as identical
++ * but will never see neither - if they are careful
++ */
++ p->replacement = NULL;
++ clear_bit(WantReplacement, &rdev->flags);
++ } else
++ /* We might have just removed the Replacement as faulty-
++ * clear the bit just in case
++ */
++ clear_bit(WantReplacement, &rdev->flags);
++abort:
++
++ print_raid5_conf(conf);
++ return err;
++}
++
++static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
++{
++ struct r5conf *conf = mddev->private;
++ int err = -EEXIST;
++ int disk;
++ struct disk_info *p;
++ int first = 0;
++ int last = conf->raid_disks - 1;
++
++ if (mddev->recovery_disabled == conf->recovery_disabled)
++ return -EBUSY;
++
++ if (rdev->saved_raid_disk < 0 && has_failed(conf))
++ /* no point adding a device */
++ return -EINVAL;
++
++ if (rdev->raid_disk >= 0)
++ first = last = rdev->raid_disk;
++
++ /*
++ * find the disk ... but prefer rdev->saved_raid_disk
++ * if possible.
++ */
++ if (rdev->saved_raid_disk >= 0 &&
++ rdev->saved_raid_disk >= first &&
++ conf->disks[rdev->saved_raid_disk].rdev == NULL)
++ first = rdev->saved_raid_disk;
++
++ for (disk = first; disk <= last; disk++) {
++ p = conf->disks + disk;
++ if (p->rdev == NULL) {
++ clear_bit(In_sync, &rdev->flags);
++ rdev->raid_disk = disk;
++ err = 0;
++ if (rdev->saved_raid_disk != disk)
++ conf->fullsync = 1;
++ rcu_assign_pointer(p->rdev, rdev);
++ goto out;
++ }
++ }
++ for (disk = first; disk <= last; disk++) {
++ p = conf->disks + disk;
++ if (test_bit(WantReplacement, &p->rdev->flags) &&
++ p->replacement == NULL) {
++ clear_bit(In_sync, &rdev->flags);
++ set_bit(Replacement, &rdev->flags);
++ rdev->raid_disk = disk;
++ err = 0;
++ conf->fullsync = 1;
++ rcu_assign_pointer(p->replacement, rdev);
++ break;
++ }
++ }
++out:
++ print_raid5_conf(conf);
++ return err;
++}
++
++static int raid5_resize(struct mddev *mddev, sector_t sectors)
++{
++ /* no resync is happening, and there is enough space
++ * on all devices, so we can resize.
++ * We need to make sure resync covers any new space.
++ * If the array is shrinking we should possibly wait until
++ * any io in the removed space completes, but it hardly seems
++ * worth it.
++ */
++ sector_t newsize;
++ sectors &= ~((sector_t)mddev->chunk_sectors - 1);
++ newsize = raid5_size(mddev, sectors, mddev->raid_disks);
++ if (mddev->external_size &&
++ mddev->array_sectors > newsize)
++ return -EINVAL;
++ if (mddev->bitmap) {
++ int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
++ if (ret)
++ return ret;
++ }
++ md_set_array_sectors(mddev, newsize);
++ set_capacity(mddev->gendisk, mddev->array_sectors);
++ revalidate_disk(mddev->gendisk);
++ if (sectors > mddev->dev_sectors &&
++ mddev->recovery_cp > mddev->dev_sectors) {
++ mddev->recovery_cp = mddev->dev_sectors;
++ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
++ }
++ mddev->dev_sectors = sectors;
++ mddev->resync_max_sectors = sectors;
++ return 0;
++}
++
++static int check_stripe_cache(struct mddev *mddev)
++{
++ /* Can only proceed if there are plenty of stripe_heads.
++ * We need a minimum of one full stripe,, and for sensible progress
++ * it is best to have about 4 times that.
++ * If we require 4 times, then the default 256 4K stripe_heads will
++ * allow for chunk sizes up to 256K, which is probably OK.
++ * If the chunk size is greater, user-space should request more
++ * stripe_heads first.
++ */
++ struct r5conf *conf = mddev->private;
++ if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
++ > conf->min_nr_stripes ||
++ ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
++ > conf->min_nr_stripes) {
++ printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
++ mdname(mddev),
++ ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
++ / STRIPE_SIZE)*4);
++ return 0;
++ }
++ return 1;
++}
++
++static int check_reshape(struct mddev *mddev)
++{
++ struct r5conf *conf = mddev->private;
++
++ if (mddev->delta_disks == 0 &&
++ mddev->new_layout == mddev->layout &&
++ mddev->new_chunk_sectors == mddev->chunk_sectors)
++ return 0; /* nothing to do */
++ if (has_failed(conf))
++ return -EINVAL;
++ if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) {
++ /* We might be able to shrink, but the devices must
++ * be made bigger first.
++ * For raid6, 4 is the minimum size.
++ * Otherwise 2 is the minimum
++ */
++ int min = 2;
++ if (mddev->level == 6)
++ min = 4;
++ if (mddev->raid_disks + mddev->delta_disks < min)
++ return -EINVAL;
++ }
++
++ if (!check_stripe_cache(mddev))
++ return -ENOSPC;
++
++ if (mddev->new_chunk_sectors > mddev->chunk_sectors ||
++ mddev->delta_disks > 0)
++ if (resize_chunks(conf,
++ conf->previous_raid_disks
++ + max(0, mddev->delta_disks),
++ max(mddev->new_chunk_sectors,
++ mddev->chunk_sectors)
++ ) < 0)
++ return -ENOMEM;
++ return resize_stripes(conf, (conf->previous_raid_disks
++ + mddev->delta_disks));
++}
++
++static int raid5_start_reshape(struct mddev *mddev)
++{
++ struct r5conf *conf = mddev->private;
++ struct md_rdev *rdev;
++ int spares = 0;
++ unsigned long flags;
++
++ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
++ return -EBUSY;
++
++ if (!check_stripe_cache(mddev))
++ return -ENOSPC;
++
++ if (has_failed(conf))
++ return -EINVAL;
++
++ rdev_for_each(rdev, mddev) {
++ if (!test_bit(In_sync, &rdev->flags)
++ && !test_bit(Faulty, &rdev->flags))
++ spares++;
++ }
++
++ if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
++ /* Not enough devices even to make a degraded array
++ * of that size
++ */
++ return -EINVAL;
++
++ /* Refuse to reduce size of the array. Any reductions in
++ * array size must be through explicit setting of array_size
++ * attribute.
++ */
++ if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
++ < mddev->array_sectors) {
++ printk(KERN_ERR "md/raid:%s: array size must be reduced "
++ "before number of disks\n", mdname(mddev));
++ return -EINVAL;
++ }
++
++ atomic_set(&conf->reshape_stripes, 0);
++ spin_lock_irq(&conf->device_lock);
++ write_seqcount_begin(&conf->gen_lock);
++ conf->previous_raid_disks = conf->raid_disks;
++ conf->raid_disks += mddev->delta_disks;
++ conf->prev_chunk_sectors = conf->chunk_sectors;
++ conf->chunk_sectors = mddev->new_chunk_sectors;
++ conf->prev_algo = conf->algorithm;
++ conf->algorithm = mddev->new_layout;
++ conf->generation++;
++ /* Code that selects data_offset needs to see the generation update
++ * if reshape_progress has been set - so a memory barrier needed.
++ */
++ smp_mb();
++ if (mddev->reshape_backwards)
++ conf->reshape_progress = raid5_size(mddev, 0, 0);
++ else
++ conf->reshape_progress = 0;
++ conf->reshape_safe = conf->reshape_progress;
++ write_seqcount_end(&conf->gen_lock);
++ spin_unlock_irq(&conf->device_lock);
++
++ /* Now make sure any requests that proceeded on the assumption
++ * the reshape wasn't running - like Discard or Read - have
++ * completed.
++ */
++ mddev_suspend(mddev);
++ mddev_resume(mddev);
++
++ /* Add some new drives, as many as will fit.
++ * We know there are enough to make the newly sized array work.
++ * Don't add devices if we are reducing the number of
++ * devices in the array. This is because it is not possible
++ * to correctly record the "partially reconstructed" state of
++ * such devices during the reshape and confusion could result.
++ */
++ if (mddev->delta_disks >= 0) {
++ rdev_for_each(rdev, mddev)
++ if (rdev->raid_disk < 0 &&
++ !test_bit(Faulty, &rdev->flags)) {
++ if (raid5_add_disk(mddev, rdev) == 0) {
++ if (rdev->raid_disk
++ >= conf->previous_raid_disks)
++ set_bit(In_sync, &rdev->flags);
++ else
++ rdev->recovery_offset = 0;
++
++ if (sysfs_link_rdev(mddev, rdev))
++ /* Failure here is OK */;
++ }
++ } else if (rdev->raid_disk >= conf->previous_raid_disks
++ && !test_bit(Faulty, &rdev->flags)) {
++ /* This is a spare that was manually added */
++ set_bit(In_sync, &rdev->flags);
++ }
++
++ /* When a reshape changes the number of devices,
++ * ->degraded is measured against the larger of the
++ * pre and post number of devices.
++ */
++ spin_lock_irqsave(&conf->device_lock, flags);
++ mddev->degraded = calc_degraded(conf);
++ spin_unlock_irqrestore(&conf->device_lock, flags);
++ }
++ mddev->raid_disks = conf->raid_disks;
++ mddev->reshape_position = conf->reshape_progress;
++ set_bit(MD_CHANGE_DEVS, &mddev->flags);
++
++ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
++ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
++ clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
++ set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
++ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
++ mddev->sync_thread = md_register_thread(md_do_sync, mddev,
++ "reshape");
++ if (!mddev->sync_thread) {
++ mddev->recovery = 0;
++ spin_lock_irq(&conf->device_lock);
++ write_seqcount_begin(&conf->gen_lock);
++ mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
++ mddev->new_chunk_sectors =
++ conf->chunk_sectors = conf->prev_chunk_sectors;
++ mddev->new_layout = conf->algorithm = conf->prev_algo;
++ rdev_for_each(rdev, mddev)
++ rdev->new_data_offset = rdev->data_offset;
++ smp_wmb();
++ conf->generation --;
++ conf->reshape_progress = MaxSector;
++ mddev->reshape_position = MaxSector;
++ write_seqcount_end(&conf->gen_lock);
++ spin_unlock_irq(&conf->device_lock);
++ return -EAGAIN;
++ }
++ conf->reshape_checkpoint = jiffies;
++ md_wakeup_thread(mddev->sync_thread);
++ md_new_event(mddev);
++ return 0;
++}
++
++/* This is called from the reshape thread and should make any
++ * changes needed in 'conf'
++ */
++static void end_reshape(struct r5conf *conf)
++{
++
++ if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
++ struct md_rdev *rdev;
++
++ spin_lock_irq(&conf->device_lock);
++ conf->previous_raid_disks = conf->raid_disks;
++ rdev_for_each(rdev, conf->mddev)
++ rdev->data_offset = rdev->new_data_offset;
++ smp_wmb();
++ conf->reshape_progress = MaxSector;
++ spin_unlock_irq(&conf->device_lock);
++ wake_up(&conf->wait_for_overlap);
++
++ /* read-ahead size must cover two whole stripes, which is
++ * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
++ */
++ if (conf->mddev->queue) {
++ int data_disks = conf->raid_disks - conf->max_degraded;
++ int stripe = data_disks * ((conf->chunk_sectors << 9)
++ / PAGE_SIZE);
++ if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
++ conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
++ }
++ }
++}
++
++/* This is called from the raid5d thread with mddev_lock held.
++ * It makes config changes to the device.
++ */
++static void raid5_finish_reshape(struct mddev *mddev)
++{
++ struct r5conf *conf = mddev->private;
++
++ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
++
++ if (mddev->delta_disks > 0) {
++ md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
++ set_capacity(mddev->gendisk, mddev->array_sectors);
++ revalidate_disk(mddev->gendisk);
++ } else {
++ int d;
++ spin_lock_irq(&conf->device_lock);
++ mddev->degraded = calc_degraded(conf);
++ spin_unlock_irq(&conf->device_lock);
++ for (d = conf->raid_disks ;
++ d < conf->raid_disks - mddev->delta_disks;
++ d++) {
++ struct md_rdev *rdev = conf->disks[d].rdev;
++ if (rdev)
++ clear_bit(In_sync, &rdev->flags);
++ rdev = conf->disks[d].replacement;
++ if (rdev)
++ clear_bit(In_sync, &rdev->flags);
++ }
++ }
++ mddev->layout = conf->algorithm;
++ mddev->chunk_sectors = conf->chunk_sectors;
++ mddev->reshape_position = MaxSector;
++ mddev->delta_disks = 0;
++ mddev->reshape_backwards = 0;
++ }
++}
++
++static void raid5_quiesce(struct mddev *mddev, int state)
++{
++ struct r5conf *conf = mddev->private;
++
++ switch(state) {
++ case 2: /* resume for a suspend */
++ wake_up(&conf->wait_for_overlap);
++ break;
++
++ case 1: /* stop all writes */
++ lock_all_device_hash_locks_irq(conf);
++ /* '2' tells resync/reshape to pause so that all
++ * active stripes can drain
++ */
++ conf->quiesce = 2;
++ wait_event_cmd(conf->wait_for_stripe,
++ atomic_read(&conf->active_stripes) == 0 &&
++ atomic_read(&conf->active_aligned_reads) == 0,
++ unlock_all_device_hash_locks_irq(conf),
++ lock_all_device_hash_locks_irq(conf));
++ conf->quiesce = 1;
++ unlock_all_device_hash_locks_irq(conf);
++ /* allow reshape to continue */
++ wake_up(&conf->wait_for_overlap);
++ break;
++
++ case 0: /* re-enable writes */
++ lock_all_device_hash_locks_irq(conf);
++ conf->quiesce = 0;
++ wake_up(&conf->wait_for_stripe);
++ wake_up(&conf->wait_for_overlap);
++ unlock_all_device_hash_locks_irq(conf);
++ break;
++ }
++}
++
++static void *raid45_takeover_raid0(struct mddev *mddev, int level)
++{
++ struct r0conf *raid0_conf = mddev->private;
++ sector_t sectors;
++
++ /* for raid0 takeover only one zone is supported */
++ if (raid0_conf->nr_strip_zones > 1) {
++ printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
++ mdname(mddev));
++ return ERR_PTR(-EINVAL);
++ }
++
++ sectors = raid0_conf->strip_zone[0].zone_end;
++ sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
++ mddev->dev_sectors = sectors;
++ mddev->new_level = level;
++ mddev->new_layout = ALGORITHM_PARITY_N;
++ mddev->new_chunk_sectors = mddev->chunk_sectors;
++ mddev->raid_disks += 1;
++ mddev->delta_disks = 1;
++ /* make sure it will be not marked as dirty */
++ mddev->recovery_cp = MaxSector;
++
++ return setup_conf(mddev);
++}
++
++static void *raid5_takeover_raid1(struct mddev *mddev)
++{
++ int chunksect;
++
++ if (mddev->raid_disks != 2 ||
++ mddev->degraded > 1)
++ return ERR_PTR(-EINVAL);
++
++ /* Should check if there are write-behind devices? */
++
++ chunksect = 64*2; /* 64K by default */
++
++ /* The array must be an exact multiple of chunksize */
++ while (chunksect && (mddev->array_sectors & (chunksect-1)))
++ chunksect >>= 1;
++
++ if ((chunksect<<9) < STRIPE_SIZE)
++ /* array size does not allow a suitable chunk size */
++ return ERR_PTR(-EINVAL);
++
++ mddev->new_level = 5;
++ mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
++ mddev->new_chunk_sectors = chunksect;
++
++ return setup_conf(mddev);
++}
++
++static void *raid5_takeover_raid6(struct mddev *mddev)
++{
++ int new_layout;
++
++ switch (mddev->layout) {
++ case ALGORITHM_LEFT_ASYMMETRIC_6:
++ new_layout = ALGORITHM_LEFT_ASYMMETRIC;
++ break;
++ case ALGORITHM_RIGHT_ASYMMETRIC_6:
++ new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC_6:
++ new_layout = ALGORITHM_LEFT_SYMMETRIC;
++ break;
++ case ALGORITHM_RIGHT_SYMMETRIC_6:
++ new_layout = ALGORITHM_RIGHT_SYMMETRIC;
++ break;
++ case ALGORITHM_PARITY_0_6:
++ new_layout = ALGORITHM_PARITY_0;
++ break;
++ case ALGORITHM_PARITY_N:
++ new_layout = ALGORITHM_PARITY_N;
++ break;
++ default:
++ return ERR_PTR(-EINVAL);
++ }
++ mddev->new_level = 5;
++ mddev->new_layout = new_layout;
++ mddev->delta_disks = -1;
++ mddev->raid_disks -= 1;
++ return setup_conf(mddev);
++}
++
++static int raid5_check_reshape(struct mddev *mddev)
++{
++ /* For a 2-drive array, the layout and chunk size can be changed
++ * immediately as not restriping is needed.
++ * For larger arrays we record the new value - after validation
++ * to be used by a reshape pass.
++ */
++ struct r5conf *conf = mddev->private;
++ int new_chunk = mddev->new_chunk_sectors;
++
++ if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
++ return -EINVAL;
++ if (new_chunk > 0) {
++ if (!is_power_of_2(new_chunk))
++ return -EINVAL;
++ if (new_chunk < (PAGE_SIZE>>9))
++ return -EINVAL;
++ if (mddev->array_sectors & (new_chunk-1))
++ /* not factor of array size */
++ return -EINVAL;
++ }
++
++ /* They look valid */
++
++ if (mddev->raid_disks == 2) {
++ /* can make the change immediately */
++ if (mddev->new_layout >= 0) {
++ conf->algorithm = mddev->new_layout;
++ mddev->layout = mddev->new_layout;
++ }
++ if (new_chunk > 0) {
++ conf->chunk_sectors = new_chunk ;
++ mddev->chunk_sectors = new_chunk;
++ }
++ set_bit(MD_CHANGE_DEVS, &mddev->flags);
++ md_wakeup_thread(mddev->thread);
++ }
++ return check_reshape(mddev);
++}
++
++static int raid6_check_reshape(struct mddev *mddev)
++{
++ int new_chunk = mddev->new_chunk_sectors;
++
++ if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
++ return -EINVAL;
++ if (new_chunk > 0) {
++ if (!is_power_of_2(new_chunk))
++ return -EINVAL;
++ if (new_chunk < (PAGE_SIZE >> 9))
++ return -EINVAL;
++ if (mddev->array_sectors & (new_chunk-1))
++ /* not factor of array size */
++ return -EINVAL;
++ }
++
++ /* They look valid */
++ return check_reshape(mddev);
++}
++
++static void *raid5_takeover(struct mddev *mddev)
++{
++ /* raid5 can take over:
++ * raid0 - if there is only one strip zone - make it a raid4 layout
++ * raid1 - if there are two drives. We need to know the chunk size
++ * raid4 - trivial - just use a raid4 layout.
++ * raid6 - Providing it is a *_6 layout
++ */
++ if (mddev->level == 0)
++ return raid45_takeover_raid0(mddev, 5);
++ if (mddev->level == 1)
++ return raid5_takeover_raid1(mddev);
++ if (mddev->level == 4) {
++ mddev->new_layout = ALGORITHM_PARITY_N;
++ mddev->new_level = 5;
++ return setup_conf(mddev);
++ }
++ if (mddev->level == 6)
++ return raid5_takeover_raid6(mddev);
++
++ return ERR_PTR(-EINVAL);
++}
++
++static void *raid4_takeover(struct mddev *mddev)
++{
++ /* raid4 can take over:
++ * raid0 - if there is only one strip zone
++ * raid5 - if layout is right
++ */
++ if (mddev->level == 0)
++ return raid45_takeover_raid0(mddev, 4);
++ if (mddev->level == 5 &&
++ mddev->layout == ALGORITHM_PARITY_N) {
++ mddev->new_layout = 0;
++ mddev->new_level = 4;
++ return setup_conf(mddev);
++ }
++ return ERR_PTR(-EINVAL);
++}
++
++static struct md_personality raid5_personality;
++
++static void *raid6_takeover(struct mddev *mddev)
++{
++ /* Currently can only take over a raid5. We map the
++ * personality to an equivalent raid6 personality
++ * with the Q block at the end.
++ */
++ int new_layout;
++
++ if (mddev->pers != &raid5_personality)
++ return ERR_PTR(-EINVAL);
++ if (mddev->degraded > 1)
++ return ERR_PTR(-EINVAL);
++ if (mddev->raid_disks > 253)
++ return ERR_PTR(-EINVAL);
++ if (mddev->raid_disks < 3)
++ return ERR_PTR(-EINVAL);
++
++ switch (mddev->layout) {
++ case ALGORITHM_LEFT_ASYMMETRIC:
++ new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
++ break;
++ case ALGORITHM_RIGHT_ASYMMETRIC:
++ new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC:
++ new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
++ break;
++ case ALGORITHM_RIGHT_SYMMETRIC:
++ new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
++ break;
++ case ALGORITHM_PARITY_0:
++ new_layout = ALGORITHM_PARITY_0_6;
++ break;
++ case ALGORITHM_PARITY_N:
++ new_layout = ALGORITHM_PARITY_N;
++ break;
++ default:
++ return ERR_PTR(-EINVAL);
++ }
++ mddev->new_level = 6;
++ mddev->new_layout = new_layout;
++ mddev->delta_disks = 1;
++ mddev->raid_disks += 1;
++ return setup_conf(mddev);
++}
++
++static struct md_personality raid6_personality =
++{
++ .name = "raid6",
++ .level = 6,
++ .owner = THIS_MODULE,
++ .make_request = make_request,
++ .run = run,
++ .free = raid5_free,
++ .status = status,
++ .error_handler = error,
++ .hot_add_disk = raid5_add_disk,
++ .hot_remove_disk= raid5_remove_disk,
++ .spare_active = raid5_spare_active,
++ .sync_request = sync_request,
++ .resize = raid5_resize,
++ .size = raid5_size,
++ .check_reshape = raid6_check_reshape,
++ .start_reshape = raid5_start_reshape,
++ .finish_reshape = raid5_finish_reshape,
++ .quiesce = raid5_quiesce,
++ .takeover = raid6_takeover,
++ .congested = raid5_congested,
++ .mergeable_bvec = raid5_mergeable_bvec,
++};
++static struct md_personality raid5_personality =
++{
++ .name = "raid5",
++ .level = 5,
++ .owner = THIS_MODULE,
++ .make_request = make_request,
++ .run = run,
++ .free = raid5_free,
++ .status = status,
++ .error_handler = error,
++ .hot_add_disk = raid5_add_disk,
++ .hot_remove_disk= raid5_remove_disk,
++ .spare_active = raid5_spare_active,
++ .sync_request = sync_request,
++ .resize = raid5_resize,
++ .size = raid5_size,
++ .check_reshape = raid5_check_reshape,
++ .start_reshape = raid5_start_reshape,
++ .finish_reshape = raid5_finish_reshape,
++ .quiesce = raid5_quiesce,
++ .takeover = raid5_takeover,
++ .congested = raid5_congested,
++ .mergeable_bvec = raid5_mergeable_bvec,
++};
++
++static struct md_personality raid4_personality =
++{
++ .name = "raid4",
++ .level = 4,
++ .owner = THIS_MODULE,
++ .make_request = make_request,
++ .run = run,
++ .free = raid5_free,
++ .status = status,
++ .error_handler = error,
++ .hot_add_disk = raid5_add_disk,
++ .hot_remove_disk= raid5_remove_disk,
++ .spare_active = raid5_spare_active,
++ .sync_request = sync_request,
++ .resize = raid5_resize,
++ .size = raid5_size,
++ .check_reshape = raid5_check_reshape,
++ .start_reshape = raid5_start_reshape,
++ .finish_reshape = raid5_finish_reshape,
++ .quiesce = raid5_quiesce,
++ .takeover = raid4_takeover,
++ .congested = raid5_congested,
++ .mergeable_bvec = raid5_mergeable_bvec,
++};
++
++static int __init raid5_init(void)
++{
++ raid5_wq = alloc_workqueue("raid5wq",
++ WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
++ if (!raid5_wq)
++ return -ENOMEM;
++ register_md_personality(&raid6_personality);
++ register_md_personality(&raid5_personality);
++ register_md_personality(&raid4_personality);
++ return 0;
++}
++
++static void raid5_exit(void)
++{
++ unregister_md_personality(&raid6_personality);
++ unregister_md_personality(&raid5_personality);
++ unregister_md_personality(&raid4_personality);
++ destroy_workqueue(raid5_wq);
++}
++
++module_init(raid5_init);
++module_exit(raid5_exit);
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
++MODULE_ALIAS("md-personality-4"); /* RAID5 */
++MODULE_ALIAS("md-raid5");
++MODULE_ALIAS("md-raid4");
++MODULE_ALIAS("md-level-5");
++MODULE_ALIAS("md-level-4");
++MODULE_ALIAS("md-personality-8"); /* RAID6 */
++MODULE_ALIAS("md-raid6");
++MODULE_ALIAS("md-level-6");
++
++/* This used to be two separate modules, they were: */
++MODULE_ALIAS("raid5");
++MODULE_ALIAS("raid6");
+diff -Nur linux-4.1.10.orig/drivers/md/raid5.h linux-4.1.10/drivers/md/raid5.h
+--- linux-4.1.10.orig/drivers/md/raid5.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/md/raid5.h 2015-10-07 18:00:08.000000000 +0200
+@@ -495,6 +495,7 @@
int recovery_disabled;
/* per cpu variables */
struct raid5_percpu {
@@ -6407,9 +14286,621 @@ diff -Nur linux-4.1.6.orig/drivers/md/raid5.h linux-4.1.6/drivers/md/raid5.h
struct page *spare_page; /* Used when checking P/Q in raid6 */
struct flex_array *scribble; /* space for constructing buffer
* lists and performing address
-diff -Nur linux-4.1.6.orig/drivers/misc/hwlat_detector.c linux-4.1.6/drivers/misc/hwlat_detector.c
---- linux-4.1.6.orig/drivers/misc/hwlat_detector.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/drivers/misc/hwlat_detector.c 2015-09-08 23:49:06.082091803 +0200
+diff -Nur linux-4.1.10.orig/drivers/md/raid5.h.orig linux-4.1.10/drivers/md/raid5.h.orig
+--- linux-4.1.10.orig/drivers/md/raid5.h.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/drivers/md/raid5.h.orig 2015-10-03 13:49:38.000000000 +0200
+@@ -0,0 +1,608 @@
++#ifndef _RAID5_H
++#define _RAID5_H
++
++#include <linux/raid/xor.h>
++#include <linux/dmaengine.h>
++
++/*
++ *
++ * Each stripe contains one buffer per device. Each buffer can be in
++ * one of a number of states stored in "flags". Changes between
++ * these states happen *almost* exclusively under the protection of the
++ * STRIPE_ACTIVE flag. Some very specific changes can happen in bi_end_io, and
++ * these are not protected by STRIPE_ACTIVE.
++ *
++ * The flag bits that are used to represent these states are:
++ * R5_UPTODATE and R5_LOCKED
++ *
++ * State Empty == !UPTODATE, !LOCK
++ * We have no data, and there is no active request
++ * State Want == !UPTODATE, LOCK
++ * A read request is being submitted for this block
++ * State Dirty == UPTODATE, LOCK
++ * Some new data is in this buffer, and it is being written out
++ * State Clean == UPTODATE, !LOCK
++ * We have valid data which is the same as on disc
++ *
++ * The possible state transitions are:
++ *
++ * Empty -> Want - on read or write to get old data for parity calc
++ * Empty -> Dirty - on compute_parity to satisfy write/sync request.
++ * Empty -> Clean - on compute_block when computing a block for failed drive
++ * Want -> Empty - on failed read
++ * Want -> Clean - on successful completion of read request
++ * Dirty -> Clean - on successful completion of write request
++ * Dirty -> Clean - on failed write
++ * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
++ *
++ * The Want->Empty, Want->Clean, Dirty->Clean, transitions
++ * all happen in b_end_io at interrupt time.
++ * Each sets the Uptodate bit before releasing the Lock bit.
++ * This leaves one multi-stage transition:
++ * Want->Dirty->Clean
++ * This is safe because thinking that a Clean buffer is actually dirty
++ * will at worst delay some action, and the stripe will be scheduled
++ * for attention after the transition is complete.
++ *
++ * There is one possibility that is not covered by these states. That
++ * is if one drive has failed and there is a spare being rebuilt. We
++ * can't distinguish between a clean block that has been generated
++ * from parity calculations, and a clean block that has been
++ * successfully written to the spare ( or to parity when resyncing).
++ * To distinguish these states we have a stripe bit STRIPE_INSYNC that
++ * is set whenever a write is scheduled to the spare, or to the parity
++ * disc if there is no spare. A sync request clears this bit, and
++ * when we find it set with no buffers locked, we know the sync is
++ * complete.
++ *
++ * Buffers for the md device that arrive via make_request are attached
++ * to the appropriate stripe in one of two lists linked on b_reqnext.
++ * One list (bh_read) for read requests, one (bh_write) for write.
++ * There should never be more than one buffer on the two lists
++ * together, but we are not guaranteed of that so we allow for more.
++ *
++ * If a buffer is on the read list when the associated cache buffer is
++ * Uptodate, the data is copied into the read buffer and it's b_end_io
++ * routine is called. This may happen in the end_request routine only
++ * if the buffer has just successfully been read. end_request should
++ * remove the buffers from the list and then set the Uptodate bit on
++ * the buffer. Other threads may do this only if they first check
++ * that the Uptodate bit is set. Once they have checked that they may
++ * take buffers off the read queue.
++ *
++ * When a buffer on the write list is committed for write it is copied
++ * into the cache buffer, which is then marked dirty, and moved onto a
++ * third list, the written list (bh_written). Once both the parity
++ * block and the cached buffer are successfully written, any buffer on
++ * a written list can be returned with b_end_io.
++ *
++ * The write list and read list both act as fifos. The read list,
++ * write list and written list are protected by the device_lock.
++ * The device_lock is only for list manipulations and will only be
++ * held for a very short time. It can be claimed from interrupts.
++ *
++ *
++ * Stripes in the stripe cache can be on one of two lists (or on
++ * neither). The "inactive_list" contains stripes which are not
++ * currently being used for any request. They can freely be reused
++ * for another stripe. The "handle_list" contains stripes that need
++ * to be handled in some way. Both of these are fifo queues. Each
++ * stripe is also (potentially) linked to a hash bucket in the hash
++ * table so that it can be found by sector number. Stripes that are
++ * not hashed must be on the inactive_list, and will normally be at
++ * the front. All stripes start life this way.
++ *
++ * The inactive_list, handle_list and hash bucket lists are all protected by the
++ * device_lock.
++ * - stripes have a reference counter. If count==0, they are on a list.
++ * - If a stripe might need handling, STRIPE_HANDLE is set.
++ * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
++ * handle_list else inactive_list
++ *
++ * This, combined with the fact that STRIPE_HANDLE is only ever
++ * cleared while a stripe has a non-zero count means that if the
++ * refcount is 0 and STRIPE_HANDLE is set, then it is on the
++ * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
++ * the stripe is on inactive_list.
++ *
++ * The possible transitions are:
++ * activate an unhashed/inactive stripe (get_active_stripe())
++ * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
++ * activate a hashed, possibly active stripe (get_active_stripe())
++ * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
++ * attach a request to an active stripe (add_stripe_bh())
++ * lockdev attach-buffer unlockdev
++ * handle a stripe (handle_stripe())
++ * setSTRIPE_ACTIVE, clrSTRIPE_HANDLE ...
++ * (lockdev check-buffers unlockdev) ..
++ * change-state ..
++ * record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
++ * release an active stripe (release_stripe())
++ * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
++ *
++ * The refcount counts each thread that have activated the stripe,
++ * plus raid5d if it is handling it, plus one for each active request
++ * on a cached buffer, and plus one if the stripe is undergoing stripe
++ * operations.
++ *
++ * The stripe operations are:
++ * -copying data between the stripe cache and user application buffers
++ * -computing blocks to save a disk access, or to recover a missing block
++ * -updating the parity on a write operation (reconstruct write and
++ * read-modify-write)
++ * -checking parity correctness
++ * -running i/o to disk
++ * These operations are carried out by raid5_run_ops which uses the async_tx
++ * api to (optionally) offload operations to dedicated hardware engines.
++ * When requesting an operation handle_stripe sets the pending bit for the
++ * operation and increments the count. raid5_run_ops is then run whenever
++ * the count is non-zero.
++ * There are some critical dependencies between the operations that prevent some
++ * from being requested while another is in flight.
++ * 1/ Parity check operations destroy the in cache version of the parity block,
++ * so we prevent parity dependent operations like writes and compute_blocks
++ * from starting while a check is in progress. Some dma engines can perform
++ * the check without damaging the parity block, in these cases the parity
++ * block is re-marked up to date (assuming the check was successful) and is
++ * not re-read from disk.
++ * 2/ When a write operation is requested we immediately lock the affected
++ * blocks, and mark them as not up to date. This causes new read requests
++ * to be held off, as well as parity checks and compute block operations.
++ * 3/ Once a compute block operation has been requested handle_stripe treats
++ * that block as if it is up to date. raid5_run_ops guaruntees that any
++ * operation that is dependent on the compute block result is initiated after
++ * the compute block completes.
++ */
++
++/*
++ * Operations state - intermediate states that are visible outside of
++ * STRIPE_ACTIVE.
++ * In general _idle indicates nothing is running, _run indicates a data
++ * processing operation is active, and _result means the data processing result
++ * is stable and can be acted upon. For simple operations like biofill and
++ * compute that only have an _idle and _run state they are indicated with
++ * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
++ */
++/**
++ * enum check_states - handles syncing / repairing a stripe
++ * @check_state_idle - check operations are quiesced
++ * @check_state_run - check operation is running
++ * @check_state_result - set outside lock when check result is valid
++ * @check_state_compute_run - check failed and we are repairing
++ * @check_state_compute_result - set outside lock when compute result is valid
++ */
++enum check_states {
++ check_state_idle = 0,
++ check_state_run, /* xor parity check */
++ check_state_run_q, /* q-parity check */
++ check_state_run_pq, /* pq dual parity check */
++ check_state_check_result,
++ check_state_compute_run, /* parity repair */
++ check_state_compute_result,
++};
++
++/**
++ * enum reconstruct_states - handles writing or expanding a stripe
++ */
++enum reconstruct_states {
++ reconstruct_state_idle = 0,
++ reconstruct_state_prexor_drain_run, /* prexor-write */
++ reconstruct_state_drain_run, /* write */
++ reconstruct_state_run, /* expand */
++ reconstruct_state_prexor_drain_result,
++ reconstruct_state_drain_result,
++ reconstruct_state_result,
++};
++
++struct stripe_head {
++ struct hlist_node hash;
++ struct list_head lru; /* inactive_list or handle_list */
++ struct llist_node release_list;
++ struct r5conf *raid_conf;
++ short generation; /* increments with every
++ * reshape */
++ sector_t sector; /* sector of this row */
++ short pd_idx; /* parity disk index */
++ short qd_idx; /* 'Q' disk index for raid6 */
++ short ddf_layout;/* use DDF ordering to calculate Q */
++ short hash_lock_index;
++ unsigned long state; /* state flags */
++ atomic_t count; /* nr of active thread/requests */
++ int bm_seq; /* sequence number for bitmap flushes */
++ int disks; /* disks in stripe */
++ int overwrite_disks; /* total overwrite disks in stripe,
++ * this is only checked when stripe
++ * has STRIPE_BATCH_READY
++ */
++ enum check_states check_state;
++ enum reconstruct_states reconstruct_state;
++ spinlock_t stripe_lock;
++ int cpu;
++ struct r5worker_group *group;
++
++ struct stripe_head *batch_head; /* protected by stripe lock */
++ spinlock_t batch_lock; /* only header's lock is useful */
++ struct list_head batch_list; /* protected by head's batch lock*/
++ /**
++ * struct stripe_operations
++ * @target - STRIPE_OP_COMPUTE_BLK target
++ * @target2 - 2nd compute target in the raid6 case
++ * @zero_sum_result - P and Q verification flags
++ * @request - async service request flags for raid_run_ops
++ */
++ struct stripe_operations {
++ int target, target2;
++ enum sum_check_flags zero_sum_result;
++ } ops;
++ struct r5dev {
++ /* rreq and rvec are used for the replacement device when
++ * writing data to both devices.
++ */
++ struct bio req, rreq;
++ struct bio_vec vec, rvec;
++ struct page *page, *orig_page;
++ struct bio *toread, *read, *towrite, *written;
++ sector_t sector; /* sector of this page */
++ unsigned long flags;
++ } dev[1]; /* allocated with extra space depending of RAID geometry */
++};
++
++/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
++ * for handle_stripe.
++ */
++struct stripe_head_state {
++ /* 'syncing' means that we need to read all devices, either
++ * to check/correct parity, or to reconstruct a missing device.
++ * 'replacing' means we are replacing one or more drives and
++ * the source is valid at this point so we don't need to
++ * read all devices, just the replacement targets.
++ */
++ int syncing, expanding, expanded, replacing;
++ int locked, uptodate, to_read, to_write, failed, written;
++ int to_fill, compute, req_compute, non_overwrite;
++ int failed_num[2];
++ int p_failed, q_failed;
++ int dec_preread_active;
++ unsigned long ops_request;
++
++ struct bio *return_bi;
++ struct md_rdev *blocked_rdev;
++ int handle_bad_blocks;
++};
++
++/* Flags for struct r5dev.flags */
++enum r5dev_flags {
++ R5_UPTODATE, /* page contains current data */
++ R5_LOCKED, /* IO has been submitted on "req" */
++ R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
++ R5_OVERWRITE, /* towrite covers whole page */
++/* and some that are internal to handle_stripe */
++ R5_Insync, /* rdev && rdev->in_sync at start */
++ R5_Wantread, /* want to schedule a read */
++ R5_Wantwrite,
++ R5_Overlap, /* There is a pending overlapping request
++ * on this block */
++ R5_ReadNoMerge, /* prevent bio from merging in block-layer */
++ R5_ReadError, /* seen a read error here recently */
++ R5_ReWrite, /* have tried to over-write the readerror */
++
++ R5_Expanded, /* This block now has post-expand data */
++ R5_Wantcompute, /* compute_block in progress treat as
++ * uptodate
++ */
++ R5_Wantfill, /* dev->toread contains a bio that needs
++ * filling
++ */
++ R5_Wantdrain, /* dev->towrite needs to be drained */
++ R5_WantFUA, /* Write should be FUA */
++ R5_SyncIO, /* The IO is sync */
++ R5_WriteError, /* got a write error - need to record it */
++ R5_MadeGood, /* A bad block has been fixed by writing to it */
++ R5_ReadRepl, /* Will/did read from replacement rather than orig */
++ R5_MadeGoodRepl,/* A bad block on the replacement device has been
++ * fixed by writing to it */
++ R5_NeedReplace, /* This device has a replacement which is not
++ * up-to-date at this stripe. */
++ R5_WantReplace, /* We need to update the replacement, we have read
++ * data in, and now is a good time to write it out.
++ */
++ R5_Discard, /* Discard the stripe */
++ R5_SkipCopy, /* Don't copy data from bio to stripe cache */
++};
++
++/*
++ * Stripe state
++ */
++enum {
++ STRIPE_ACTIVE,
++ STRIPE_HANDLE,
++ STRIPE_SYNC_REQUESTED,
++ STRIPE_SYNCING,
++ STRIPE_INSYNC,
++ STRIPE_REPLACED,
++ STRIPE_PREREAD_ACTIVE,
++ STRIPE_DELAYED,
++ STRIPE_DEGRADED,
++ STRIPE_BIT_DELAY,
++ STRIPE_EXPANDING,
++ STRIPE_EXPAND_SOURCE,
++ STRIPE_EXPAND_READY,
++ STRIPE_IO_STARTED, /* do not count towards 'bypass_count' */
++ STRIPE_FULL_WRITE, /* all blocks are set to be overwritten */
++ STRIPE_BIOFILL_RUN,
++ STRIPE_COMPUTE_RUN,
++ STRIPE_OPS_REQ_PENDING,
++ STRIPE_ON_UNPLUG_LIST,
++ STRIPE_DISCARD,
++ STRIPE_ON_RELEASE_LIST,
++ STRIPE_BATCH_READY,
++ STRIPE_BATCH_ERR,
++ STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add
++ * to batch yet.
++ */
++};
++
++#define STRIPE_EXPAND_SYNC_FLAGS \
++ ((1 << STRIPE_EXPAND_SOURCE) |\
++ (1 << STRIPE_EXPAND_READY) |\
++ (1 << STRIPE_EXPANDING) |\
++ (1 << STRIPE_SYNC_REQUESTED))
++/*
++ * Operation request flags
++ */
++enum {
++ STRIPE_OP_BIOFILL,
++ STRIPE_OP_COMPUTE_BLK,
++ STRIPE_OP_PREXOR,
++ STRIPE_OP_BIODRAIN,
++ STRIPE_OP_RECONSTRUCT,
++ STRIPE_OP_CHECK,
++};
++
++/*
++ * RAID parity calculation preferences
++ */
++enum {
++ PARITY_DISABLE_RMW = 0,
++ PARITY_ENABLE_RMW,
++ PARITY_PREFER_RMW,
++};
++
++/*
++ * Pages requested from set_syndrome_sources()
++ */
++enum {
++ SYNDROME_SRC_ALL,
++ SYNDROME_SRC_WANT_DRAIN,
++ SYNDROME_SRC_WRITTEN,
++};
++/*
++ * Plugging:
++ *
++ * To improve write throughput, we need to delay the handling of some
++ * stripes until there has been a chance that several write requests
++ * for the one stripe have all been collected.
++ * In particular, any write request that would require pre-reading
++ * is put on a "delayed" queue until there are no stripes currently
++ * in a pre-read phase. Further, if the "delayed" queue is empty when
++ * a stripe is put on it then we "plug" the queue and do not process it
++ * until an unplug call is made. (the unplug_io_fn() is called).
++ *
++ * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
++ * it to the count of prereading stripes.
++ * When write is initiated, or the stripe refcnt == 0 (just in case) we
++ * clear the PREREAD_ACTIVE flag and decrement the count
++ * Whenever the 'handle' queue is empty and the device is not plugged, we
++ * move any strips from delayed to handle and clear the DELAYED flag and set
++ * PREREAD_ACTIVE.
++ * In stripe_handle, if we find pre-reading is necessary, we do it if
++ * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
++ * HANDLE gets cleared if stripe_handle leaves nothing locked.
++ */
++
++struct disk_info {
++ struct md_rdev *rdev, *replacement;
++};
++
++/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
++ * This is because we sometimes take all the spinlocks
++ * and creating that much locking depth can cause
++ * problems.
++ */
++#define NR_STRIPE_HASH_LOCKS 8
++#define STRIPE_HASH_LOCKS_MASK (NR_STRIPE_HASH_LOCKS - 1)
++
++struct r5worker {
++ struct work_struct work;
++ struct r5worker_group *group;
++ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
++ bool working;
++};
++
++struct r5worker_group {
++ struct list_head handle_list;
++ struct r5conf *conf;
++ struct r5worker *workers;
++ int stripes_cnt;
++};
++
++struct r5conf {
++ struct hlist_head *stripe_hashtbl;
++ /* only protect corresponding hash list and inactive_list */
++ spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
++ struct mddev *mddev;
++ int chunk_sectors;
++ int level, algorithm, rmw_level;
++ int max_degraded;
++ int raid_disks;
++ int max_nr_stripes;
++ int min_nr_stripes;
++
++ /* reshape_progress is the leading edge of a 'reshape'
++ * It has value MaxSector when no reshape is happening
++ * If delta_disks < 0, it is the last sector we started work on,
++ * else is it the next sector to work on.
++ */
++ sector_t reshape_progress;
++ /* reshape_safe is the trailing edge of a reshape. We know that
++ * before (or after) this address, all reshape has completed.
++ */
++ sector_t reshape_safe;
++ int previous_raid_disks;
++ int prev_chunk_sectors;
++ int prev_algo;
++ short generation; /* increments with every reshape */
++ seqcount_t gen_lock; /* lock against generation changes */
++ unsigned long reshape_checkpoint; /* Time we last updated
++ * metadata */
++ long long min_offset_diff; /* minimum difference between
++ * data_offset and
++ * new_data_offset across all
++ * devices. May be negative,
++ * but is closest to zero.
++ */
++
++ struct list_head handle_list; /* stripes needing handling */
++ struct list_head hold_list; /* preread ready stripes */
++ struct list_head delayed_list; /* stripes that have plugged requests */
++ struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
++ struct bio *retry_read_aligned; /* currently retrying aligned bios */
++ struct bio *retry_read_aligned_list; /* aligned bios retry list */
++ atomic_t preread_active_stripes; /* stripes with scheduled io */
++ atomic_t active_aligned_reads;
++ atomic_t pending_full_writes; /* full write backlog */
++ int bypass_count; /* bypassed prereads */
++ int bypass_threshold; /* preread nice */
++ int skip_copy; /* Don't copy data from bio to stripe cache */
++ struct list_head *last_hold; /* detect hold_list promotions */
++
++ atomic_t reshape_stripes; /* stripes with pending writes for reshape */
++ /* unfortunately we need two cache names as we temporarily have
++ * two caches.
++ */
++ int active_name;
++ char cache_name[2][32];
++ struct kmem_cache *slab_cache; /* for allocating stripes */
++ struct mutex cache_size_mutex; /* Protect changes to cache size */
++
++ int seq_flush, seq_write;
++ int quiesce;
++
++ int fullsync; /* set to 1 if a full sync is needed,
++ * (fresh device added).
++ * Cleared when a sync completes.
++ */
++ int recovery_disabled;
++ /* per cpu variables */
++ struct raid5_percpu {
++ struct page *spare_page; /* Used when checking P/Q in raid6 */
++ struct flex_array *scribble; /* space for constructing buffer
++ * lists and performing address
++ * conversions
++ */
++ } __percpu *percpu;
++#ifdef CONFIG_HOTPLUG_CPU
++ struct notifier_block cpu_notify;
++#endif
++
++ /*
++ * Free stripes pool
++ */
++ atomic_t active_stripes;
++ struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
++ atomic_t empty_inactive_list_nr;
++ struct llist_head released_stripes;
++ wait_queue_head_t wait_for_stripe;
++ wait_queue_head_t wait_for_overlap;
++ unsigned long cache_state;
++#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
++ * waiting for 25% to be free
++ */
++#define R5_ALLOC_MORE 2 /* It might help to allocate another
++ * stripe.
++ */
++#define R5_DID_ALLOC 4 /* A stripe was allocated, don't allocate
++ * more until at least one has been
++ * released. This avoids flooding
++ * the cache.
++ */
++ struct shrinker shrinker;
++ int pool_size; /* number of disks in stripeheads in pool */
++ spinlock_t device_lock;
++ struct disk_info *disks;
++
++ /* When taking over an array from a different personality, we store
++ * the new thread here until we fully activate the array.
++ */
++ struct md_thread *thread;
++ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
++ struct r5worker_group *worker_groups;
++ int group_cnt;
++ int worker_cnt_per_group;
++};
++
++
++/*
++ * Our supported algorithms
++ */
++#define ALGORITHM_LEFT_ASYMMETRIC 0 /* Rotating Parity N with Data Restart */
++#define ALGORITHM_RIGHT_ASYMMETRIC 1 /* Rotating Parity 0 with Data Restart */
++#define ALGORITHM_LEFT_SYMMETRIC 2 /* Rotating Parity N with Data Continuation */
++#define ALGORITHM_RIGHT_SYMMETRIC 3 /* Rotating Parity 0 with Data Continuation */
++
++/* Define non-rotating (raid4) algorithms. These allow
++ * conversion of raid4 to raid5.
++ */
++#define ALGORITHM_PARITY_0 4 /* P or P,Q are initial devices */
++#define ALGORITHM_PARITY_N 5 /* P or P,Q are final devices. */
++
++/* DDF RAID6 layouts differ from md/raid6 layouts in two ways.
++ * Firstly, the exact positioning of the parity block is slightly
++ * different between the 'LEFT_*' modes of md and the "_N_*" modes
++ * of DDF.
++ * Secondly, or order of datablocks over which the Q syndrome is computed
++ * is different.
++ * Consequently we have different layouts for DDF/raid6 than md/raid6.
++ * These layouts are from the DDFv1.2 spec.
++ * Interestingly DDFv1.2-Errata-A does not specify N_CONTINUE but
++ * leaves RLQ=3 as 'Vendor Specific'
++ */
++
++#define ALGORITHM_ROTATING_ZERO_RESTART 8 /* DDF PRL=6 RLQ=1 */
++#define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */
++#define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */
++
++/* For every RAID5 algorithm we define a RAID6 algorithm
++ * with exactly the same layout for data and parity, and
++ * with the Q block always on the last device (N-1).
++ * This allows trivial conversion from RAID5 to RAID6
++ */
++#define ALGORITHM_LEFT_ASYMMETRIC_6 16
++#define ALGORITHM_RIGHT_ASYMMETRIC_6 17
++#define ALGORITHM_LEFT_SYMMETRIC_6 18
++#define ALGORITHM_RIGHT_SYMMETRIC_6 19
++#define ALGORITHM_PARITY_0_6 20
++#define ALGORITHM_PARITY_N_6 ALGORITHM_PARITY_N
++
++static inline int algorithm_valid_raid5(int layout)
++{
++ return (layout >= 0) &&
++ (layout <= 5);
++}
++static inline int algorithm_valid_raid6(int layout)
++{
++ return (layout >= 0 && layout <= 5)
++ ||
++ (layout >= 8 && layout <= 10)
++ ||
++ (layout >= 16 && layout <= 20);
++}
++
++static inline int algorithm_is_DDF(int layout)
++{
++ return layout >= 8 && layout <= 10;
++}
++
++extern void md_raid5_kick_device(struct r5conf *conf);
++extern int raid5_set_cache_size(struct mddev *mddev, int size);
++#endif
+diff -Nur linux-4.1.10.orig/drivers/misc/hwlat_detector.c linux-4.1.10/drivers/misc/hwlat_detector.c
+--- linux-4.1.10.orig/drivers/misc/hwlat_detector.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/drivers/misc/hwlat_detector.c 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,1240 @@
+/*
+ * hwlat_detector.c - A simple Hardware Latency detector.
@@ -7651,9 +16142,9 @@ diff -Nur linux-4.1.6.orig/drivers/misc/hwlat_detector.c linux-4.1.6/drivers/mis
+
+module_init(detector_init);
+module_exit(detector_exit);
-diff -Nur linux-4.1.6.orig/drivers/misc/Kconfig linux-4.1.6/drivers/misc/Kconfig
---- linux-4.1.6.orig/drivers/misc/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/misc/Kconfig 2015-09-08 23:49:06.082091803 +0200
+diff -Nur linux-4.1.10.orig/drivers/misc/Kconfig linux-4.1.10/drivers/misc/Kconfig
+--- linux-4.1.10.orig/drivers/misc/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/misc/Kconfig 2015-10-07 18:00:08.000000000 +0200
@@ -54,6 +54,7 @@
config ATMEL_TCLIB
bool "Atmel AT32/AT91 Timer/Counter Library"
@@ -7724,9 +16215,9 @@ diff -Nur linux-4.1.6.orig/drivers/misc/Kconfig linux-4.1.6/drivers/misc/Kconfig
config PHANTOM
tristate "Sensable PHANToM (PCI)"
depends on PCI
-diff -Nur linux-4.1.6.orig/drivers/misc/Makefile linux-4.1.6/drivers/misc/Makefile
---- linux-4.1.6.orig/drivers/misc/Makefile 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/misc/Makefile 2015-09-08 23:49:06.082091803 +0200
+diff -Nur linux-4.1.10.orig/drivers/misc/Makefile linux-4.1.10/drivers/misc/Makefile
+--- linux-4.1.10.orig/drivers/misc/Makefile 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/misc/Makefile 2015-10-07 18:00:08.000000000 +0200
@@ -38,6 +38,7 @@
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
@@ -7735,9 +16226,9 @@ diff -Nur linux-4.1.6.orig/drivers/misc/Makefile linux-4.1.6/drivers/misc/Makefi
obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
-diff -Nur linux-4.1.6.orig/drivers/mmc/host/mmci.c linux-4.1.6/drivers/mmc/host/mmci.c
---- linux-4.1.6.orig/drivers/mmc/host/mmci.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/mmc/host/mmci.c 2015-09-08 23:49:06.082091803 +0200
+diff -Nur linux-4.1.10.orig/drivers/mmc/host/mmci.c linux-4.1.10/drivers/mmc/host/mmci.c
+--- linux-4.1.10.orig/drivers/mmc/host/mmci.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/mmc/host/mmci.c 2015-10-07 18:00:08.000000000 +0200
@@ -1155,15 +1155,12 @@
struct sg_mapping_iter *sg_miter = &host->sg_miter;
struct variant_data *variant = host->variant;
@@ -7763,65 +16254,9 @@ diff -Nur linux-4.1.6.orig/drivers/mmc/host/mmci.c linux-4.1.6/drivers/mmc/host/
/*
* If we have less than the fifo 'half-full' threshold to transfer,
* trigger a PIO interrupt as soon as any data is available.
-diff -Nur linux-4.1.6.orig/drivers/mmc/host/sdhci.c linux-4.1.6/drivers/mmc/host/sdhci.c
---- linux-4.1.6.orig/drivers/mmc/host/sdhci.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/mmc/host/sdhci.c 2015-09-08 23:49:06.082091803 +0200
-@@ -2691,6 +2691,31 @@
- return isr ? IRQ_HANDLED : IRQ_NONE;
- }
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+static irqreturn_t sdhci_rt_irq(int irq, void *dev_id)
-+{
-+ irqreturn_t ret;
-+
-+ local_bh_disable();
-+ ret = sdhci_irq(irq, dev_id);
-+ local_bh_enable();
-+ if (ret == IRQ_WAKE_THREAD)
-+ ret = sdhci_thread_irq(irq, dev_id);
-+ return ret;
-+}
-+#endif
-+
-+static int sdhci_req_irq(struct sdhci_host *host)
-+{
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ return request_threaded_irq(host->irq, NULL, sdhci_rt_irq,
-+ IRQF_SHARED, mmc_hostname(host->mmc), host);
-+#else
-+ return request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
-+ IRQF_SHARED, mmc_hostname(host->mmc), host);
-+#endif
-+}
-+
- /*****************************************************************************\
- * *
- * Suspend/resume *
-@@ -2758,9 +2783,7 @@
- }
-
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
-- ret = request_threaded_irq(host->irq, sdhci_irq,
-- sdhci_thread_irq, IRQF_SHARED,
-- mmc_hostname(host->mmc), host);
-+ ret = sdhci_req_irq(host);
- if (ret)
- return ret;
- } else {
-@@ -3421,8 +3444,7 @@
-
- sdhci_init(host, 0);
-
-- ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
-- IRQF_SHARED, mmc_hostname(mmc), host);
-+ ret = sdhci_req_irq(host);
- if (ret) {
- pr_err("%s: Failed to request IRQ %d: %d\n",
- mmc_hostname(mmc), host->irq, ret);
-diff -Nur linux-4.1.6.orig/drivers/net/ethernet/3com/3c59x.c linux-4.1.6/drivers/net/ethernet/3com/3c59x.c
---- linux-4.1.6.orig/drivers/net/ethernet/3com/3c59x.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/net/ethernet/3com/3c59x.c 2015-09-08 23:49:06.086091361 +0200
+diff -Nur linux-4.1.10.orig/drivers/net/ethernet/3com/3c59x.c linux-4.1.10/drivers/net/ethernet/3com/3c59x.c
+--- linux-4.1.10.orig/drivers/net/ethernet/3com/3c59x.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/net/ethernet/3com/3c59x.c 2015-10-07 18:00:08.000000000 +0200
@@ -842,9 +842,9 @@
{
struct vortex_private *vp = netdev_priv(dev);
@@ -7849,9 +16284,9 @@ diff -Nur linux-4.1.6.orig/drivers/net/ethernet/3com/3c59x.c linux-4.1.6/drivers
}
}
-diff -Nur linux-4.1.6.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-4.1.6/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
---- linux-4.1.6.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-09-08 23:49:06.086091361 +0200
+diff -Nur linux-4.1.10.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-4.1.10/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+--- linux-4.1.10.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-10-07 18:00:08.000000000 +0200
@@ -2213,11 +2213,7 @@
}
@@ -7865,9 +16300,9 @@ diff -Nur linux-4.1.6.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
-diff -Nur linux-4.1.6.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-4.1.6/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
---- linux-4.1.6.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-09-08 23:49:06.086091361 +0200
+diff -Nur linux-4.1.10.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-4.1.10/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+--- linux-4.1.10.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-10-07 18:00:08.000000000 +0200
@@ -1880,8 +1880,7 @@
return NETDEV_TX_OK;
}
@@ -7878,9 +16313,9 @@ diff -Nur linux-4.1.6.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux
if (atl1e_tpd_avail(adapter) < tpd_req) {
/* no enough descriptor, just stop queue */
-diff -Nur linux-4.1.6.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-4.1.6/drivers/net/ethernet/chelsio/cxgb/sge.c
---- linux-4.1.6.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-09-08 23:49:06.086091361 +0200
+diff -Nur linux-4.1.10.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-4.1.10/drivers/net/ethernet/chelsio/cxgb/sge.c
+--- linux-4.1.10.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-10-07 18:00:08.000000000 +0200
@@ -1664,8 +1664,7 @@
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
@@ -7891,9 +16326,9 @@ diff -Nur linux-4.1.6.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-4.1.6/d
reclaim_completed_tx(sge, q);
-diff -Nur linux-4.1.6.orig/drivers/net/ethernet/freescale/gianfar.c linux-4.1.6/drivers/net/ethernet/freescale/gianfar.c
---- linux-4.1.6.orig/drivers/net/ethernet/freescale/gianfar.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/net/ethernet/freescale/gianfar.c 2015-09-08 23:49:06.086091361 +0200
+diff -Nur linux-4.1.10.orig/drivers/net/ethernet/freescale/gianfar.c linux-4.1.10/drivers/net/ethernet/freescale/gianfar.c
+--- linux-4.1.10.orig/drivers/net/ethernet/freescale/gianfar.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/net/ethernet/freescale/gianfar.c 2015-10-07 18:00:08.000000000 +0200
@@ -1540,7 +1540,7 @@
if (netif_running(ndev)) {
@@ -7947,9 +16382,9 @@ diff -Nur linux-4.1.6.orig/drivers/net/ethernet/freescale/gianfar.c linux-4.1.6/
}
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
-diff -Nur linux-4.1.6.orig/drivers/net/ethernet/neterion/s2io.c linux-4.1.6/drivers/net/ethernet/neterion/s2io.c
---- linux-4.1.6.orig/drivers/net/ethernet/neterion/s2io.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/net/ethernet/neterion/s2io.c 2015-09-08 23:49:06.098090029 +0200
+diff -Nur linux-4.1.10.orig/drivers/net/ethernet/neterion/s2io.c linux-4.1.10/drivers/net/ethernet/neterion/s2io.c
+--- linux-4.1.10.orig/drivers/net/ethernet/neterion/s2io.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/net/ethernet/neterion/s2io.c 2015-10-07 18:00:08.000000000 +0200
@@ -4084,12 +4084,7 @@
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
@@ -7964,9 +16399,9 @@ diff -Nur linux-4.1.6.orig/drivers/net/ethernet/neterion/s2io.c linux-4.1.6/driv
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
-diff -Nur linux-4.1.6.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-4.1.6/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
---- linux-4.1.6.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-09-08 23:49:06.302067412 +0200
+diff -Nur linux-4.1.10.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-4.1.10/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+--- linux-4.1.10.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-10-07 18:00:08.000000000 +0200
@@ -2137,10 +2137,8 @@
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
unsigned long flags;
@@ -7980,9 +16415,9 @@ diff -Nur linux-4.1.6.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
-diff -Nur linux-4.1.6.orig/drivers/net/ethernet/realtek/8139too.c linux-4.1.6/drivers/net/ethernet/realtek/8139too.c
---- linux-4.1.6.orig/drivers/net/ethernet/realtek/8139too.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/net/ethernet/realtek/8139too.c 2015-09-08 23:49:06.306066968 +0200
+diff -Nur linux-4.1.10.orig/drivers/net/ethernet/realtek/8139too.c linux-4.1.10/drivers/net/ethernet/realtek/8139too.c
+--- linux-4.1.10.orig/drivers/net/ethernet/realtek/8139too.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/net/ethernet/realtek/8139too.c 2015-10-07 18:00:08.000000000 +0200
@@ -2229,7 +2229,7 @@
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
@@ -7992,9 +16427,9 @@ diff -Nur linux-4.1.6.orig/drivers/net/ethernet/realtek/8139too.c linux-4.1.6/dr
rtl8139_interrupt(irq, dev);
enable_irq(irq);
}
-diff -Nur linux-4.1.6.orig/drivers/net/ethernet/tehuti/tehuti.c linux-4.1.6/drivers/net/ethernet/tehuti/tehuti.c
---- linux-4.1.6.orig/drivers/net/ethernet/tehuti/tehuti.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/net/ethernet/tehuti/tehuti.c 2015-09-08 23:49:06.306066968 +0200
+diff -Nur linux-4.1.10.orig/drivers/net/ethernet/tehuti/tehuti.c linux-4.1.10/drivers/net/ethernet/tehuti/tehuti.c
+--- linux-4.1.10.orig/drivers/net/ethernet/tehuti/tehuti.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/net/ethernet/tehuti/tehuti.c 2015-10-07 18:00:08.000000000 +0200
@@ -1629,13 +1629,8 @@
unsigned long flags;
@@ -8011,9 +16446,9 @@ diff -Nur linux-4.1.6.orig/drivers/net/ethernet/tehuti/tehuti.c linux-4.1.6/driv
/* build tx descriptor */
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
-diff -Nur linux-4.1.6.orig/drivers/net/rionet.c linux-4.1.6/drivers/net/rionet.c
---- linux-4.1.6.orig/drivers/net/rionet.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/net/rionet.c 2015-09-08 23:49:06.306066968 +0200
+diff -Nur linux-4.1.10.orig/drivers/net/rionet.c linux-4.1.10/drivers/net/rionet.c
+--- linux-4.1.10.orig/drivers/net/rionet.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/net/rionet.c 2015-10-07 18:00:08.000000000 +0200
@@ -174,11 +174,7 @@
unsigned long flags;
int add_num = 1;
@@ -8027,9 +16462,9 @@ diff -Nur linux-4.1.6.orig/drivers/net/rionet.c linux-4.1.6/drivers/net/rionet.c
if (is_multicast_ether_addr(eth->h_dest))
add_num = nets[rnet->mport->id].nact;
-diff -Nur linux-4.1.6.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-4.1.6/drivers/net/wireless/orinoco/orinoco_usb.c
---- linux-4.1.6.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/net/wireless/orinoco/orinoco_usb.c 2015-09-08 23:49:06.306066968 +0200
+diff -Nur linux-4.1.10.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-4.1.10/drivers/net/wireless/orinoco/orinoco_usb.c
+--- linux-4.1.10.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/net/wireless/orinoco/orinoco_usb.c 2015-10-07 18:00:08.000000000 +0200
@@ -697,7 +697,7 @@
while (!ctx->done.done && msecs--)
udelay(1000);
@@ -8039,10 +16474,10 @@ diff -Nur linux-4.1.6.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-4.1.
ctx->done.done);
}
break;
-diff -Nur linux-4.1.6.orig/drivers/pci/access.c linux-4.1.6/drivers/pci/access.c
---- linux-4.1.6.orig/drivers/pci/access.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/pci/access.c 2015-09-08 23:49:06.310066523 +0200
-@@ -521,7 +521,7 @@
+diff -Nur linux-4.1.10.orig/drivers/pci/access.c linux-4.1.10/drivers/pci/access.c
+--- linux-4.1.10.orig/drivers/pci/access.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/pci/access.c 2015-10-07 18:00:08.000000000 +0200
+@@ -580,7 +580,7 @@
WARN_ON(!dev->block_cfg_access);
dev->block_cfg_access = 0;
@@ -8051,9 +16486,797 @@ diff -Nur linux-4.1.6.orig/drivers/pci/access.c linux-4.1.6/drivers/pci/access.c
raw_spin_unlock_irqrestore(&pci_lock, flags);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
-diff -Nur linux-4.1.6.orig/drivers/scsi/fcoe/fcoe.c linux-4.1.6/drivers/scsi/fcoe/fcoe.c
---- linux-4.1.6.orig/drivers/scsi/fcoe/fcoe.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/scsi/fcoe/fcoe.c 2015-09-08 23:49:06.314066079 +0200
+diff -Nur linux-4.1.10.orig/drivers/pci/access.c.orig linux-4.1.10/drivers/pci/access.c.orig
+--- linux-4.1.10.orig/drivers/pci/access.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/drivers/pci/access.c.orig 2015-10-03 13:49:38.000000000 +0200
+@@ -0,0 +1,784 @@
++#include <linux/delay.h>
++#include <linux/pci.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/ioport.h>
++#include <linux/wait.h>
++
++#include "pci.h"
++
++/*
++ * This interrupt-safe spinlock protects all accesses to PCI
++ * configuration space.
++ */
++
++DEFINE_RAW_SPINLOCK(pci_lock);
++
++/*
++ * Wrappers for all PCI configuration access functions. They just check
++ * alignment, do locking and call the low-level functions pointed to
++ * by pci_dev->ops.
++ */
++
++#define PCI_byte_BAD 0
++#define PCI_word_BAD (pos & 1)
++#define PCI_dword_BAD (pos & 3)
++
++#define PCI_OP_READ(size,type,len) \
++int pci_bus_read_config_##size \
++ (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
++{ \
++ int res; \
++ unsigned long flags; \
++ u32 data = 0; \
++ if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
++ raw_spin_lock_irqsave(&pci_lock, flags); \
++ res = bus->ops->read(bus, devfn, pos, len, &data); \
++ *value = (type)data; \
++ raw_spin_unlock_irqrestore(&pci_lock, flags); \
++ return res; \
++}
++
++#define PCI_OP_WRITE(size,type,len) \
++int pci_bus_write_config_##size \
++ (struct pci_bus *bus, unsigned int devfn, int pos, type value) \
++{ \
++ int res; \
++ unsigned long flags; \
++ if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
++ raw_spin_lock_irqsave(&pci_lock, flags); \
++ res = bus->ops->write(bus, devfn, pos, len, value); \
++ raw_spin_unlock_irqrestore(&pci_lock, flags); \
++ return res; \
++}
++
++PCI_OP_READ(byte, u8, 1)
++PCI_OP_READ(word, u16, 2)
++PCI_OP_READ(dword, u32, 4)
++PCI_OP_WRITE(byte, u8, 1)
++PCI_OP_WRITE(word, u16, 2)
++PCI_OP_WRITE(dword, u32, 4)
++
++EXPORT_SYMBOL(pci_bus_read_config_byte);
++EXPORT_SYMBOL(pci_bus_read_config_word);
++EXPORT_SYMBOL(pci_bus_read_config_dword);
++EXPORT_SYMBOL(pci_bus_write_config_byte);
++EXPORT_SYMBOL(pci_bus_write_config_word);
++EXPORT_SYMBOL(pci_bus_write_config_dword);
++
++int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 *val)
++{
++ void __iomem *addr;
++
++ addr = bus->ops->map_bus(bus, devfn, where);
++ if (!addr) {
++ *val = ~0;
++ return PCIBIOS_DEVICE_NOT_FOUND;
++ }
++
++ if (size == 1)
++ *val = readb(addr);
++ else if (size == 2)
++ *val = readw(addr);
++ else
++ *val = readl(addr);
++
++ return PCIBIOS_SUCCESSFUL;
++}
++EXPORT_SYMBOL_GPL(pci_generic_config_read);
++
++int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 val)
++{
++ void __iomem *addr;
++
++ addr = bus->ops->map_bus(bus, devfn, where);
++ if (!addr)
++ return PCIBIOS_DEVICE_NOT_FOUND;
++
++ if (size == 1)
++ writeb(val, addr);
++ else if (size == 2)
++ writew(val, addr);
++ else
++ writel(val, addr);
++
++ return PCIBIOS_SUCCESSFUL;
++}
++EXPORT_SYMBOL_GPL(pci_generic_config_write);
++
++int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 *val)
++{
++ void __iomem *addr;
++
++ addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
++ if (!addr) {
++ *val = ~0;
++ return PCIBIOS_DEVICE_NOT_FOUND;
++ }
++
++ *val = readl(addr);
++
++ if (size <= 2)
++ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
++
++ return PCIBIOS_SUCCESSFUL;
++}
++EXPORT_SYMBOL_GPL(pci_generic_config_read32);
++
++int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 val)
++{
++ void __iomem *addr;
++ u32 mask, tmp;
++
++ addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
++ if (!addr)
++ return PCIBIOS_DEVICE_NOT_FOUND;
++
++ if (size == 4) {
++ writel(val, addr);
++ return PCIBIOS_SUCCESSFUL;
++ } else {
++ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
++ }
++
++ tmp = readl(addr) & mask;
++ tmp |= val << ((where & 0x3) * 8);
++ writel(tmp, addr);
++
++ return PCIBIOS_SUCCESSFUL;
++}
++EXPORT_SYMBOL_GPL(pci_generic_config_write32);
++
++/**
++ * pci_bus_set_ops - Set raw operations of pci bus
++ * @bus: pci bus struct
++ * @ops: new raw operations
++ *
++ * Return previous raw operations
++ */
++struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
++{
++ struct pci_ops *old_ops;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&pci_lock, flags);
++ old_ops = bus->ops;
++ bus->ops = ops;
++ raw_spin_unlock_irqrestore(&pci_lock, flags);
++ return old_ops;
++}
++EXPORT_SYMBOL(pci_bus_set_ops);
++
++/**
++ * pci_read_vpd - Read one entry from Vital Product Data
++ * @dev: pci device struct
++ * @pos: offset in vpd space
++ * @count: number of bytes to read
++ * @buf: pointer to where to store result
++ *
++ */
++ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
++{
++ if (!dev->vpd || !dev->vpd->ops)
++ return -ENODEV;
++ return dev->vpd->ops->read(dev, pos, count, buf);
++}
++EXPORT_SYMBOL(pci_read_vpd);
++
++/**
++ * pci_write_vpd - Write entry to Vital Product Data
++ * @dev: pci device struct
++ * @pos: offset in vpd space
++ * @count: number of bytes to write
++ * @buf: buffer containing write data
++ *
++ */
++ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
++{
++ if (!dev->vpd || !dev->vpd->ops)
++ return -ENODEV;
++ return dev->vpd->ops->write(dev, pos, count, buf);
++}
++EXPORT_SYMBOL(pci_write_vpd);
++
++/*
++ * The following routines are to prevent the user from accessing PCI config
++ * space when it's unsafe to do so. Some devices require this during BIST and
++ * we're required to prevent it during D-state transitions.
++ *
++ * We have a bit per device to indicate it's blocked and a global wait queue
++ * for callers to sleep on until devices are unblocked.
++ */
++static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
++
++static noinline void pci_wait_cfg(struct pci_dev *dev)
++{
++ DECLARE_WAITQUEUE(wait, current);
++
++ __add_wait_queue(&pci_cfg_wait, &wait);
++ do {
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ raw_spin_unlock_irq(&pci_lock);
++ schedule();
++ raw_spin_lock_irq(&pci_lock);
++ } while (dev->block_cfg_access);
++ __remove_wait_queue(&pci_cfg_wait, &wait);
++}
++
++/* Returns 0 on success, negative values indicate error. */
++#define PCI_USER_READ_CONFIG(size,type) \
++int pci_user_read_config_##size \
++ (struct pci_dev *dev, int pos, type *val) \
++{ \
++ int ret = PCIBIOS_SUCCESSFUL; \
++ u32 data = -1; \
++ if (PCI_##size##_BAD) \
++ return -EINVAL; \
++ raw_spin_lock_irq(&pci_lock); \
++ if (unlikely(dev->block_cfg_access)) \
++ pci_wait_cfg(dev); \
++ ret = dev->bus->ops->read(dev->bus, dev->devfn, \
++ pos, sizeof(type), &data); \
++ raw_spin_unlock_irq(&pci_lock); \
++ *val = (type)data; \
++ return pcibios_err_to_errno(ret); \
++} \
++EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
++
++/* Returns 0 on success, negative values indicate error. */
++#define PCI_USER_WRITE_CONFIG(size,type) \
++int pci_user_write_config_##size \
++ (struct pci_dev *dev, int pos, type val) \
++{ \
++ int ret = PCIBIOS_SUCCESSFUL; \
++ if (PCI_##size##_BAD) \
++ return -EINVAL; \
++ raw_spin_lock_irq(&pci_lock); \
++ if (unlikely(dev->block_cfg_access)) \
++ pci_wait_cfg(dev); \
++ ret = dev->bus->ops->write(dev->bus, dev->devfn, \
++ pos, sizeof(type), val); \
++ raw_spin_unlock_irq(&pci_lock); \
++ return pcibios_err_to_errno(ret); \
++} \
++EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
++
++PCI_USER_READ_CONFIG(byte, u8)
++PCI_USER_READ_CONFIG(word, u16)
++PCI_USER_READ_CONFIG(dword, u32)
++PCI_USER_WRITE_CONFIG(byte, u8)
++PCI_USER_WRITE_CONFIG(word, u16)
++PCI_USER_WRITE_CONFIG(dword, u32)
++
++/* VPD access through PCI 2.2+ VPD capability */
++
++#define PCI_VPD_PCI22_SIZE (PCI_VPD_ADDR_MASK + 1)
++
++struct pci_vpd_pci22 {
++ struct pci_vpd base;
++ struct mutex lock;
++ u16 flag;
++ bool busy;
++ u8 cap;
++};
++
++/*
++ * Wait for last operation to complete.
++ * This code has to spin since there is no other notification from the PCI
++ * hardware. Since the VPD is often implemented by serial attachment to an
++ * EEPROM, it may take many milliseconds to complete.
++ *
++ * Returns 0 on success, negative values indicate error.
++ */
++static int pci_vpd_pci22_wait(struct pci_dev *dev)
++{
++ struct pci_vpd_pci22 *vpd =
++ container_of(dev->vpd, struct pci_vpd_pci22, base);
++ unsigned long timeout = jiffies + HZ/20 + 2;
++ u16 status;
++ int ret;
++
++ if (!vpd->busy)
++ return 0;
++
++ for (;;) {
++ ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
++ &status);
++ if (ret < 0)
++ return ret;
++
++ if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
++ vpd->busy = false;
++ return 0;
++ }
++
++ if (time_after(jiffies, timeout)) {
++ dev_printk(KERN_DEBUG, &dev->dev, "vpd r/w failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
++ return -ETIMEDOUT;
++ }
++ if (fatal_signal_pending(current))
++ return -EINTR;
++ if (!cond_resched())
++ udelay(10);
++ }
++}
++
++static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
++ void *arg)
++{
++ struct pci_vpd_pci22 *vpd =
++ container_of(dev->vpd, struct pci_vpd_pci22, base);
++ int ret;
++ loff_t end = pos + count;
++ u8 *buf = arg;
++
++ if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
++ return -EINVAL;
++
++ if (mutex_lock_killable(&vpd->lock))
++ return -EINTR;
++
++ ret = pci_vpd_pci22_wait(dev);
++ if (ret < 0)
++ goto out;
++
++ while (pos < end) {
++ u32 val;
++ unsigned int i, skip;
++
++ ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
++ pos & ~3);
++ if (ret < 0)
++ break;
++ vpd->busy = true;
++ vpd->flag = PCI_VPD_ADDR_F;
++ ret = pci_vpd_pci22_wait(dev);
++ if (ret < 0)
++ break;
++
++ ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
++ if (ret < 0)
++ break;
++
++ skip = pos & 3;
++ for (i = 0; i < sizeof(u32); i++) {
++ if (i >= skip) {
++ *buf++ = val;
++ if (++pos == end)
++ break;
++ }
++ val >>= 8;
++ }
++ }
++out:
++ mutex_unlock(&vpd->lock);
++ return ret ? ret : count;
++}
++
++static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count,
++ const void *arg)
++{
++ struct pci_vpd_pci22 *vpd =
++ container_of(dev->vpd, struct pci_vpd_pci22, base);
++ const u8 *buf = arg;
++ loff_t end = pos + count;
++ int ret = 0;
++
++ if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
++ return -EINVAL;
++
++ if (mutex_lock_killable(&vpd->lock))
++ return -EINTR;
++
++ ret = pci_vpd_pci22_wait(dev);
++ if (ret < 0)
++ goto out;
++
++ while (pos < end) {
++ u32 val;
++
++ val = *buf++;
++ val |= *buf++ << 8;
++ val |= *buf++ << 16;
++ val |= *buf++ << 24;
++
++ ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
++ if (ret < 0)
++ break;
++ ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
++ pos | PCI_VPD_ADDR_F);
++ if (ret < 0)
++ break;
++
++ vpd->busy = true;
++ vpd->flag = 0;
++ ret = pci_vpd_pci22_wait(dev);
++ if (ret < 0)
++ break;
++
++ pos += sizeof(u32);
++ }
++out:
++ mutex_unlock(&vpd->lock);
++ return ret ? ret : count;
++}
++
++static void pci_vpd_pci22_release(struct pci_dev *dev)
++{
++ kfree(container_of(dev->vpd, struct pci_vpd_pci22, base));
++}
++
++static const struct pci_vpd_ops pci_vpd_pci22_ops = {
++ .read = pci_vpd_pci22_read,
++ .write = pci_vpd_pci22_write,
++ .release = pci_vpd_pci22_release,
++};
++
++static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
++ void *arg)
++{
++ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++ ssize_t ret;
++
++ if (!tdev)
++ return -ENODEV;
++
++ ret = pci_read_vpd(tdev, pos, count, arg);
++ pci_dev_put(tdev);
++ return ret;
++}
++
++static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
++ const void *arg)
++{
++ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++ ssize_t ret;
++
++ if (!tdev)
++ return -ENODEV;
++
++ ret = pci_write_vpd(tdev, pos, count, arg);
++ pci_dev_put(tdev);
++ return ret;
++}
++
++static const struct pci_vpd_ops pci_vpd_f0_ops = {
++ .read = pci_vpd_f0_read,
++ .write = pci_vpd_f0_write,
++ .release = pci_vpd_pci22_release,
++};
++
++static int pci_vpd_f0_dev_check(struct pci_dev *dev)
++{
++ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++ int ret = 0;
++
++ if (!tdev)
++ return -ENODEV;
++ if (!tdev->vpd || !tdev->multifunction ||
++ dev->class != tdev->class || dev->vendor != tdev->vendor ||
++ dev->device != tdev->device)
++ ret = -ENODEV;
++
++ pci_dev_put(tdev);
++ return ret;
++}
++
++int pci_vpd_pci22_init(struct pci_dev *dev)
++{
++ struct pci_vpd_pci22 *vpd;
++ u8 cap;
++
++ cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
++ if (!cap)
++ return -ENODEV;
++ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
++ int ret = pci_vpd_f0_dev_check(dev);
++
++ if (ret)
++ return ret;
++ }
++ vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
++ if (!vpd)
++ return -ENOMEM;
++
++ vpd->base.len = PCI_VPD_PCI22_SIZE;
++ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
++ vpd->base.ops = &pci_vpd_f0_ops;
++ else
++ vpd->base.ops = &pci_vpd_pci22_ops;
++ mutex_init(&vpd->lock);
++ vpd->cap = cap;
++ vpd->busy = false;
++ dev->vpd = &vpd->base;
++ return 0;
++}
++
++/**
++ * pci_cfg_access_lock - Lock PCI config reads/writes
++ * @dev: pci device struct
++ *
++ * When access is locked, any userspace reads or writes to config
++ * space and concurrent lock requests will sleep until access is
++ * allowed via pci_cfg_access_unlocked again.
++ */
++void pci_cfg_access_lock(struct pci_dev *dev)
++{
++ might_sleep();
++
++ raw_spin_lock_irq(&pci_lock);
++ if (dev->block_cfg_access)
++ pci_wait_cfg(dev);
++ dev->block_cfg_access = 1;
++ raw_spin_unlock_irq(&pci_lock);
++}
++EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
++
++/**
++ * pci_cfg_access_trylock - try to lock PCI config reads/writes
++ * @dev: pci device struct
++ *
++ * Same as pci_cfg_access_lock, but will return 0 if access is
++ * already locked, 1 otherwise. This function can be used from
++ * atomic contexts.
++ */
++bool pci_cfg_access_trylock(struct pci_dev *dev)
++{
++ unsigned long flags;
++ bool locked = true;
++
++ raw_spin_lock_irqsave(&pci_lock, flags);
++ if (dev->block_cfg_access)
++ locked = false;
++ else
++ dev->block_cfg_access = 1;
++ raw_spin_unlock_irqrestore(&pci_lock, flags);
++
++ return locked;
++}
++EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
++
++/**
++ * pci_cfg_access_unlock - Unlock PCI config reads/writes
++ * @dev: pci device struct
++ *
++ * This function allows PCI config accesses to resume.
++ */
++void pci_cfg_access_unlock(struct pci_dev *dev)
++{
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&pci_lock, flags);
++
++ /* This indicates a problem in the caller, but we don't need
++ * to kill them, unlike a double-block above. */
++ WARN_ON(!dev->block_cfg_access);
++
++ dev->block_cfg_access = 0;
++ wake_up_all(&pci_cfg_wait);
++ raw_spin_unlock_irqrestore(&pci_lock, flags);
++}
++EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
++
++static inline int pcie_cap_version(const struct pci_dev *dev)
++{
++ return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
++}
++
++bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
++{
++ int type = pci_pcie_type(dev);
++
++ return type == PCI_EXP_TYPE_ENDPOINT ||
++ type == PCI_EXP_TYPE_LEG_END ||
++ type == PCI_EXP_TYPE_ROOT_PORT ||
++ type == PCI_EXP_TYPE_UPSTREAM ||
++ type == PCI_EXP_TYPE_DOWNSTREAM ||
++ type == PCI_EXP_TYPE_PCI_BRIDGE ||
++ type == PCI_EXP_TYPE_PCIE_BRIDGE;
++}
++
++static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
++{
++ int type = pci_pcie_type(dev);
++
++ return (type == PCI_EXP_TYPE_ROOT_PORT ||
++ type == PCI_EXP_TYPE_DOWNSTREAM) &&
++ pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
++}
++
++static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
++{
++ int type = pci_pcie_type(dev);
++
++ return type == PCI_EXP_TYPE_ROOT_PORT ||
++ type == PCI_EXP_TYPE_RC_EC;
++}
++
++static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
++{
++ if (!pci_is_pcie(dev))
++ return false;
++
++ switch (pos) {
++ case PCI_EXP_FLAGS:
++ return true;
++ case PCI_EXP_DEVCAP:
++ case PCI_EXP_DEVCTL:
++ case PCI_EXP_DEVSTA:
++ return true;
++ case PCI_EXP_LNKCAP:
++ case PCI_EXP_LNKCTL:
++ case PCI_EXP_LNKSTA:
++ return pcie_cap_has_lnkctl(dev);
++ case PCI_EXP_SLTCAP:
++ case PCI_EXP_SLTCTL:
++ case PCI_EXP_SLTSTA:
++ return pcie_cap_has_sltctl(dev);
++ case PCI_EXP_RTCTL:
++ case PCI_EXP_RTCAP:
++ case PCI_EXP_RTSTA:
++ return pcie_cap_has_rtctl(dev);
++ case PCI_EXP_DEVCAP2:
++ case PCI_EXP_DEVCTL2:
++ case PCI_EXP_LNKCAP2:
++ case PCI_EXP_LNKCTL2:
++ case PCI_EXP_LNKSTA2:
++ return pcie_cap_version(dev) > 1;
++ default:
++ return false;
++ }
++}
++
++/*
++ * Note that these accessor functions are only for the "PCI Express
++ * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
++ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
++ */
++int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
++{
++ int ret;
++
++ *val = 0;
++ if (pos & 1)
++ return -EINVAL;
++
++ if (pcie_capability_reg_implemented(dev, pos)) {
++ ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
++ /*
++ * Reset *val to 0 if pci_read_config_word() fails, it may
++ * have been written as 0xFFFF if hardware error happens
++ * during pci_read_config_word().
++ */
++ if (ret)
++ *val = 0;
++ return ret;
++ }
++
++ /*
++ * For Functions that do not implement the Slot Capabilities,
++ * Slot Status, and Slot Control registers, these spaces must
++ * be hardwired to 0b, with the exception of the Presence Detect
++ * State bit in the Slot Status register of Downstream Ports,
++ * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
++ */
++ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
++ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
++ *val = PCI_EXP_SLTSTA_PDS;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(pcie_capability_read_word);
++
++int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
++{
++ int ret;
++
++ *val = 0;
++ if (pos & 3)
++ return -EINVAL;
++
++ if (pcie_capability_reg_implemented(dev, pos)) {
++ ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
++ /*
++ * Reset *val to 0 if pci_read_config_dword() fails, it may
++ * have been written as 0xFFFFFFFF if hardware error happens
++ * during pci_read_config_dword().
++ */
++ if (ret)
++ *val = 0;
++ return ret;
++ }
++
++ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
++ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
++ *val = PCI_EXP_SLTSTA_PDS;
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(pcie_capability_read_dword);
++
++int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
++{
++ if (pos & 1)
++ return -EINVAL;
++
++ if (!pcie_capability_reg_implemented(dev, pos))
++ return 0;
++
++ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
++}
++EXPORT_SYMBOL(pcie_capability_write_word);
++
++int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
++{
++ if (pos & 3)
++ return -EINVAL;
++
++ if (!pcie_capability_reg_implemented(dev, pos))
++ return 0;
++
++ return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
++}
++EXPORT_SYMBOL(pcie_capability_write_dword);
++
++int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
++ u16 clear, u16 set)
++{
++ int ret;
++ u16 val;
++
++ ret = pcie_capability_read_word(dev, pos, &val);
++ if (!ret) {
++ val &= ~clear;
++ val |= set;
++ ret = pcie_capability_write_word(dev, pos, val);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
++
++int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
++ u32 clear, u32 set)
++{
++ int ret;
++ u32 val;
++
++ ret = pcie_capability_read_dword(dev, pos, &val);
++ if (!ret) {
++ val &= ~clear;
++ val |= set;
++ ret = pcie_capability_write_dword(dev, pos, val);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
+diff -Nur linux-4.1.10.orig/drivers/scsi/fcoe/fcoe.c linux-4.1.10/drivers/scsi/fcoe/fcoe.c
+--- linux-4.1.10.orig/drivers/scsi/fcoe/fcoe.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/scsi/fcoe/fcoe.c 2015-10-07 18:00:08.000000000 +0200
@@ -1287,7 +1287,7 @@
struct sk_buff *skb;
#ifdef CONFIG_SMP
@@ -8117,9 +17340,9 @@ diff -Nur linux-4.1.6.orig/drivers/scsi/fcoe/fcoe.c linux-4.1.6/drivers/scsi/fco
kfree_skb(skb);
}
-diff -Nur linux-4.1.6.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-4.1.6/drivers/scsi/fcoe/fcoe_ctlr.c
---- linux-4.1.6.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/scsi/fcoe/fcoe_ctlr.c 2015-09-08 23:49:06.314066079 +0200
+diff -Nur linux-4.1.10.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-4.1.10/drivers/scsi/fcoe/fcoe_ctlr.c
+--- linux-4.1.10.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/scsi/fcoe/fcoe_ctlr.c 2015-10-07 18:00:08.000000000 +0200
@@ -831,7 +831,7 @@
INIT_LIST_HEAD(&del_list);
@@ -8138,10 +17361,10 @@ diff -Nur linux-4.1.6.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-4.1.6/drivers/scs
list_for_each_entry_safe(fcf, next, &del_list, list) {
/* Removes fcf from current list */
-diff -Nur linux-4.1.6.orig/drivers/scsi/libfc/fc_exch.c linux-4.1.6/drivers/scsi/libfc/fc_exch.c
---- linux-4.1.6.orig/drivers/scsi/libfc/fc_exch.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/scsi/libfc/fc_exch.c 2015-09-08 23:49:06.314066079 +0200
-@@ -816,10 +816,10 @@
+diff -Nur linux-4.1.10.orig/drivers/scsi/libfc/fc_exch.c linux-4.1.10/drivers/scsi/libfc/fc_exch.c
+--- linux-4.1.10.orig/drivers/scsi/libfc/fc_exch.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/scsi/libfc/fc_exch.c 2015-10-07 18:00:08.000000000 +0200
+@@ -814,10 +814,10 @@
}
memset(ep, 0, sizeof(*ep));
@@ -8154,9 +17377,9 @@ diff -Nur linux-4.1.6.orig/drivers/scsi/libfc/fc_exch.c linux-4.1.6/drivers/scsi
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
-diff -Nur linux-4.1.6.orig/drivers/scsi/libsas/sas_ata.c linux-4.1.6/drivers/scsi/libsas/sas_ata.c
---- linux-4.1.6.orig/drivers/scsi/libsas/sas_ata.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/scsi/libsas/sas_ata.c 2015-09-08 23:49:06.314066079 +0200
+diff -Nur linux-4.1.10.orig/drivers/scsi/libsas/sas_ata.c linux-4.1.10/drivers/scsi/libsas/sas_ata.c
+--- linux-4.1.10.orig/drivers/scsi/libsas/sas_ata.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/scsi/libsas/sas_ata.c 2015-10-07 18:00:08.000000000 +0200
@@ -190,7 +190,7 @@
/* TODO: audit callers to ensure they are ready for qc_issue to
* unconditionally re-enable interrupts
@@ -8175,9 +17398,9 @@ diff -Nur linux-4.1.6.orig/drivers/scsi/libsas/sas_ata.c linux-4.1.6/drivers/scs
return ret;
}
-diff -Nur linux-4.1.6.orig/drivers/scsi/qla2xxx/qla_inline.h linux-4.1.6/drivers/scsi/qla2xxx/qla_inline.h
---- linux-4.1.6.orig/drivers/scsi/qla2xxx/qla_inline.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/scsi/qla2xxx/qla_inline.h 2015-09-08 23:49:06.314066079 +0200
+diff -Nur linux-4.1.10.orig/drivers/scsi/qla2xxx/qla_inline.h linux-4.1.10/drivers/scsi/qla2xxx/qla_inline.h
+--- linux-4.1.10.orig/drivers/scsi/qla2xxx/qla_inline.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/scsi/qla2xxx/qla_inline.h 2015-10-07 18:00:08.000000000 +0200
@@ -59,12 +59,12 @@
{
unsigned long flags;
@@ -8193,9 +17416,9 @@ diff -Nur linux-4.1.6.orig/drivers/scsi/qla2xxx/qla_inline.h linux-4.1.6/drivers
}
static inline uint8_t *
-diff -Nur linux-4.1.6.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.1.6/drivers/thermal/x86_pkg_temp_thermal.c
---- linux-4.1.6.orig/drivers/thermal/x86_pkg_temp_thermal.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/thermal/x86_pkg_temp_thermal.c 2015-09-08 23:49:06.314066079 +0200
+diff -Nur linux-4.1.10.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.1.10/drivers/thermal/x86_pkg_temp_thermal.c
+--- linux-4.1.10.orig/drivers/thermal/x86_pkg_temp_thermal.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/thermal/x86_pkg_temp_thermal.c 2015-10-07 18:00:08.000000000 +0200
@@ -29,6 +29,7 @@
#include <linux/pm.h>
#include <linux/thermal.h>
@@ -8298,9 +17521,9 @@ diff -Nur linux-4.1.6.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.1.6/dr
for_each_online_cpu(i)
cancel_delayed_work_sync(
&per_cpu(pkg_temp_thermal_threshold_work, i));
-diff -Nur linux-4.1.6.orig/drivers/tty/serial/8250/8250_core.c linux-4.1.6/drivers/tty/serial/8250/8250_core.c
---- linux-4.1.6.orig/drivers/tty/serial/8250/8250_core.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/tty/serial/8250/8250_core.c 2015-09-08 23:49:06.314066079 +0200
+diff -Nur linux-4.1.10.orig/drivers/tty/serial/8250/8250_core.c linux-4.1.10/drivers/tty/serial/8250/8250_core.c
+--- linux-4.1.10.orig/drivers/tty/serial/8250/8250_core.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/tty/serial/8250/8250_core.c 2015-10-07 18:00:08.000000000 +0200
@@ -36,6 +36,7 @@
#include <linux/nmi.h>
#include <linux/mutex.h>
@@ -8336,9 +17559,9 @@ diff -Nur linux-4.1.6.orig/drivers/tty/serial/8250/8250_core.c linux-4.1.6/drive
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
-diff -Nur linux-4.1.6.orig/drivers/tty/serial/amba-pl011.c linux-4.1.6/drivers/tty/serial/amba-pl011.c
---- linux-4.1.6.orig/drivers/tty/serial/amba-pl011.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/tty/serial/amba-pl011.c 2015-09-08 23:49:06.314066079 +0200
+diff -Nur linux-4.1.10.orig/drivers/tty/serial/amba-pl011.c linux-4.1.10/drivers/tty/serial/amba-pl011.c
+--- linux-4.1.10.orig/drivers/tty/serial/amba-pl011.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/tty/serial/amba-pl011.c 2015-10-07 18:00:08.000000000 +0200
@@ -2000,13 +2000,19 @@
clk_enable(uap->clk);
@@ -8372,9 +17595,9 @@ diff -Nur linux-4.1.6.orig/drivers/tty/serial/amba-pl011.c linux-4.1.6/drivers/t
clk_disable(uap->clk);
}
-diff -Nur linux-4.1.6.orig/drivers/tty/serial/omap-serial.c linux-4.1.6/drivers/tty/serial/omap-serial.c
---- linux-4.1.6.orig/drivers/tty/serial/omap-serial.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/tty/serial/omap-serial.c 2015-09-08 23:49:06.314066079 +0200
+diff -Nur linux-4.1.10.orig/drivers/tty/serial/omap-serial.c linux-4.1.10/drivers/tty/serial/omap-serial.c
+--- linux-4.1.10.orig/drivers/tty/serial/omap-serial.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/tty/serial/omap-serial.c 2015-10-07 18:00:08.000000000 +0200
@@ -1282,13 +1282,10 @@
pm_runtime_get_sync(up->dev);
@@ -8402,9 +17625,9 @@ diff -Nur linux-4.1.6.orig/drivers/tty/serial/omap-serial.c linux-4.1.6/drivers/
}
static int __init
-diff -Nur linux-4.1.6.orig/drivers/usb/core/hcd.c linux-4.1.6/drivers/usb/core/hcd.c
---- linux-4.1.6.orig/drivers/usb/core/hcd.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/usb/core/hcd.c 2015-09-08 23:49:06.314066079 +0200
+diff -Nur linux-4.1.10.orig/drivers/usb/core/hcd.c linux-4.1.10/drivers/usb/core/hcd.c
+--- linux-4.1.10.orig/drivers/usb/core/hcd.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/usb/core/hcd.c 2015-10-07 18:00:08.000000000 +0200
@@ -1684,9 +1684,9 @@
* and no one may trigger the above deadlock situation when
* running complete() in tasklet.
@@ -8417,9 +17640,9 @@ diff -Nur linux-4.1.6.orig/drivers/usb/core/hcd.c linux-4.1.6/drivers/usb/core/h
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
-diff -Nur linux-4.1.6.orig/drivers/usb/gadget/function/f_fs.c linux-4.1.6/drivers/usb/gadget/function/f_fs.c
---- linux-4.1.6.orig/drivers/usb/gadget/function/f_fs.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/usb/gadget/function/f_fs.c 2015-09-08 23:49:06.318065636 +0200
+diff -Nur linux-4.1.10.orig/drivers/usb/gadget/function/f_fs.c linux-4.1.10/drivers/usb/gadget/function/f_fs.c
+--- linux-4.1.10.orig/drivers/usb/gadget/function/f_fs.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/usb/gadget/function/f_fs.c 2015-10-07 18:00:08.000000000 +0200
@@ -1405,7 +1405,7 @@
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
@@ -8429,9 +17652,9 @@ diff -Nur linux-4.1.6.orig/drivers/usb/gadget/function/f_fs.c linux-4.1.6/driver
kfree(ffs->dev_name);
kfree(ffs);
}
-diff -Nur linux-4.1.6.orig/drivers/usb/gadget/legacy/inode.c linux-4.1.6/drivers/usb/gadget/legacy/inode.c
---- linux-4.1.6.orig/drivers/usb/gadget/legacy/inode.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/drivers/usb/gadget/legacy/inode.c 2015-09-08 23:49:06.318065636 +0200
+diff -Nur linux-4.1.10.orig/drivers/usb/gadget/legacy/inode.c linux-4.1.10/drivers/usb/gadget/legacy/inode.c
+--- linux-4.1.10.orig/drivers/usb/gadget/legacy/inode.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/drivers/usb/gadget/legacy/inode.c 2015-10-07 18:00:08.000000000 +0200
@@ -345,7 +345,7 @@
spin_unlock_irq (&epdata->dev->lock);
@@ -8450,9 +17673,9 @@ diff -Nur linux-4.1.6.orig/drivers/usb/gadget/legacy/inode.c linux-4.1.6/drivers
if (epdata->status == -ECONNRESET)
epdata->status = -EINTR;
} else {
-diff -Nur linux-4.1.6.orig/fs/aio.c linux-4.1.6/fs/aio.c
---- linux-4.1.6.orig/fs/aio.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/fs/aio.c 2015-09-08 23:49:06.318065636 +0200
+diff -Nur linux-4.1.10.orig/fs/aio.c linux-4.1.10/fs/aio.c
+--- linux-4.1.10.orig/fs/aio.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/aio.c 2015-10-07 18:00:08.000000000 +0200
@@ -40,6 +40,7 @@
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
@@ -8528,9 +17751,9 @@ diff -Nur linux-4.1.6.orig/fs/aio.c linux-4.1.6/fs/aio.c
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
{
unsigned i, new_nr;
-diff -Nur linux-4.1.6.orig/fs/autofs4/autofs_i.h linux-4.1.6/fs/autofs4/autofs_i.h
---- linux-4.1.6.orig/fs/autofs4/autofs_i.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/fs/autofs4/autofs_i.h 2015-09-08 23:49:06.318065636 +0200
+diff -Nur linux-4.1.10.orig/fs/autofs4/autofs_i.h linux-4.1.10/fs/autofs4/autofs_i.h
+--- linux-4.1.10.orig/fs/autofs4/autofs_i.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/autofs4/autofs_i.h 2015-10-07 18:00:08.000000000 +0200
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
@@ -8539,9 +17762,9 @@ diff -Nur linux-4.1.6.orig/fs/autofs4/autofs_i.h linux-4.1.6/fs/autofs4/autofs_i
#include <asm/current.h>
#include <asm/uaccess.h>
-diff -Nur linux-4.1.6.orig/fs/autofs4/expire.c linux-4.1.6/fs/autofs4/expire.c
---- linux-4.1.6.orig/fs/autofs4/expire.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/fs/autofs4/expire.c 2015-09-08 23:49:06.318065636 +0200
+diff -Nur linux-4.1.10.orig/fs/autofs4/expire.c linux-4.1.10/fs/autofs4/expire.c
+--- linux-4.1.10.orig/fs/autofs4/expire.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/autofs4/expire.c 2015-10-07 18:00:08.000000000 +0200
@@ -150,7 +150,7 @@
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
@@ -8551,9 +17774,9 @@ diff -Nur linux-4.1.6.orig/fs/autofs4/expire.c linux-4.1.6/fs/autofs4/expire.c
goto relock;
}
spin_unlock(&p->d_lock);
-diff -Nur linux-4.1.6.orig/fs/buffer.c linux-4.1.6/fs/buffer.c
---- linux-4.1.6.orig/fs/buffer.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/fs/buffer.c 2015-09-08 23:49:06.318065636 +0200
+diff -Nur linux-4.1.10.orig/fs/buffer.c linux-4.1.10/fs/buffer.c
+--- linux-4.1.10.orig/fs/buffer.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/buffer.c 2015-10-07 18:00:08.000000000 +0200
@@ -301,8 +301,7 @@
* decide that the page is now completely done.
*/
@@ -8621,9 +17844,9 @@ diff -Nur linux-4.1.6.orig/fs/buffer.c linux-4.1.6/fs/buffer.c
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
-diff -Nur linux-4.1.6.orig/fs/dcache.c linux-4.1.6/fs/dcache.c
---- linux-4.1.6.orig/fs/dcache.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/fs/dcache.c 2015-09-08 23:49:06.318065636 +0200
+diff -Nur linux-4.1.10.orig/fs/dcache.c linux-4.1.10/fs/dcache.c
+--- linux-4.1.10.orig/fs/dcache.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/dcache.c 2015-10-07 18:00:08.000000000 +0200
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
@@ -8650,9 +17873,9 @@ diff -Nur linux-4.1.6.orig/fs/dcache.c linux-4.1.6/fs/dcache.c
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
-diff -Nur linux-4.1.6.orig/fs/eventpoll.c linux-4.1.6/fs/eventpoll.c
---- linux-4.1.6.orig/fs/eventpoll.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/fs/eventpoll.c 2015-09-08 23:49:06.318065636 +0200
+diff -Nur linux-4.1.10.orig/fs/eventpoll.c linux-4.1.10/fs/eventpoll.c
+--- linux-4.1.10.orig/fs/eventpoll.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/eventpoll.c 2015-10-07 18:00:08.000000000 +0200
@@ -505,12 +505,12 @@
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
@@ -8668,9 +17891,9 @@ diff -Nur linux-4.1.6.orig/fs/eventpoll.c linux-4.1.6/fs/eventpoll.c
}
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
-diff -Nur linux-4.1.6.orig/fs/exec.c linux-4.1.6/fs/exec.c
---- linux-4.1.6.orig/fs/exec.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/fs/exec.c 2015-09-08 23:49:06.318065636 +0200
+diff -Nur linux-4.1.10.orig/fs/exec.c linux-4.1.10/fs/exec.c
+--- linux-4.1.10.orig/fs/exec.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/exec.c 2015-10-07 18:00:08.000000000 +0200
@@ -859,12 +859,14 @@
}
}
@@ -8686,9 +17909,9 @@ diff -Nur linux-4.1.6.orig/fs/exec.c linux-4.1.6/fs/exec.c
task_unlock(tsk);
if (old_mm) {
up_read(&old_mm->mmap_sem);
-diff -Nur linux-4.1.6.orig/fs/jbd/checkpoint.c linux-4.1.6/fs/jbd/checkpoint.c
---- linux-4.1.6.orig/fs/jbd/checkpoint.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/fs/jbd/checkpoint.c 2015-09-08 23:49:06.318065636 +0200
+diff -Nur linux-4.1.10.orig/fs/jbd/checkpoint.c linux-4.1.10/fs/jbd/checkpoint.c
+--- linux-4.1.10.orig/fs/jbd/checkpoint.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/jbd/checkpoint.c 2015-10-07 18:00:08.000000000 +0200
@@ -129,6 +129,8 @@
if (journal->j_flags & JFS_ABORT)
return;
@@ -8698,9 +17921,9 @@ diff -Nur linux-4.1.6.orig/fs/jbd/checkpoint.c linux-4.1.6/fs/jbd/checkpoint.c
mutex_lock(&journal->j_checkpoint_mutex);
/*
-diff -Nur linux-4.1.6.orig/fs/jbd2/checkpoint.c linux-4.1.6/fs/jbd2/checkpoint.c
---- linux-4.1.6.orig/fs/jbd2/checkpoint.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/fs/jbd2/checkpoint.c 2015-09-08 23:49:06.322065194 +0200
+diff -Nur linux-4.1.10.orig/fs/jbd2/checkpoint.c linux-4.1.10/fs/jbd2/checkpoint.c
+--- linux-4.1.10.orig/fs/jbd2/checkpoint.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/jbd2/checkpoint.c 2015-10-07 18:00:08.000000000 +0200
@@ -116,6 +116,8 @@
nblocks = jbd2_space_needed(journal);
while (jbd2_log_space_left(journal) < nblocks) {
@@ -8710,9 +17933,9 @@ diff -Nur linux-4.1.6.orig/fs/jbd2/checkpoint.c linux-4.1.6/fs/jbd2/checkpoint.c
mutex_lock(&journal->j_checkpoint_mutex);
/*
-diff -Nur linux-4.1.6.orig/fs/namespace.c linux-4.1.6/fs/namespace.c
---- linux-4.1.6.orig/fs/namespace.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/fs/namespace.c 2015-09-08 23:49:06.322065194 +0200
+diff -Nur linux-4.1.10.orig/fs/namespace.c linux-4.1.10/fs/namespace.c
+--- linux-4.1.10.orig/fs/namespace.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/namespace.c 2015-10-07 18:00:08.000000000 +0200
@@ -14,6 +14,7 @@
#include <linux/mnt_namespace.h>
#include <linux/user_namespace.h>
@@ -8735,9 +17958,9 @@ diff -Nur linux-4.1.6.orig/fs/namespace.c linux-4.1.6/fs/namespace.c
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until
-diff -Nur linux-4.1.6.orig/fs/ntfs/aops.c linux-4.1.6/fs/ntfs/aops.c
---- linux-4.1.6.orig/fs/ntfs/aops.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/fs/ntfs/aops.c 2015-09-08 23:49:06.322065194 +0200
+diff -Nur linux-4.1.10.orig/fs/ntfs/aops.c linux-4.1.10/fs/ntfs/aops.c
+--- linux-4.1.10.orig/fs/ntfs/aops.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/ntfs/aops.c 2015-10-07 18:00:08.000000000 +0200
@@ -107,8 +107,7 @@
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
@@ -8785,9 +18008,9 @@ diff -Nur linux-4.1.6.orig/fs/ntfs/aops.c linux-4.1.6/fs/ntfs/aops.c
}
/**
-diff -Nur linux-4.1.6.orig/fs/timerfd.c linux-4.1.6/fs/timerfd.c
---- linux-4.1.6.orig/fs/timerfd.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/fs/timerfd.c 2015-09-08 23:49:06.322065194 +0200
+diff -Nur linux-4.1.10.orig/fs/timerfd.c linux-4.1.10/fs/timerfd.c
+--- linux-4.1.10.orig/fs/timerfd.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/timerfd.c 2015-10-07 18:00:08.000000000 +0200
@@ -450,7 +450,10 @@
break;
}
@@ -8800,9 +18023,234 @@ diff -Nur linux-4.1.6.orig/fs/timerfd.c linux-4.1.6/fs/timerfd.c
}
/*
-diff -Nur linux-4.1.6.orig/include/acpi/platform/aclinux.h linux-4.1.6/include/acpi/platform/aclinux.h
---- linux-4.1.6.orig/include/acpi/platform/aclinux.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/acpi/platform/aclinux.h 2015-09-08 23:49:06.322065194 +0200
+diff -Nur linux-4.1.10.orig/fs/xfs/xfs_inode.c linux-4.1.10/fs/xfs/xfs_inode.c
+--- linux-4.1.10.orig/fs/xfs/xfs_inode.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/xfs/xfs_inode.c 2015-10-07 18:00:08.000000000 +0200
+@@ -164,7 +164,7 @@
+ (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
+ ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
+ (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
+- ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
++ ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
+
+ if (lock_flags & XFS_IOLOCK_EXCL)
+ mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
+@@ -212,7 +212,7 @@
+ (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
+ ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
+ (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
+- ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
++ ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
+
+ if (lock_flags & XFS_IOLOCK_EXCL) {
+ if (!mrtryupdate(&ip->i_iolock))
+@@ -281,7 +281,7 @@
+ (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
+ ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
+ (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
+- ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
++ ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
+ ASSERT(lock_flags != 0);
+
+ if (lock_flags & XFS_IOLOCK_EXCL)
+@@ -364,30 +364,38 @@
+
+ /*
+ * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
+- * value. This shouldn't be called for page fault locking, but we also need to
+- * ensure we don't overrun the number of lockdep subclasses for the iolock or
+- * mmaplock as that is limited to 12 by the mmap lock lockdep annotations.
++ * value. This can be called for any type of inode lock combination, including
++ * parent locking. Care must be taken to ensure we don't overrun the subclass
++ * storage fields in the class mask we build.
+ */
+ static inline int
+ xfs_lock_inumorder(int lock_mode, int subclass)
+ {
++ int class = 0;
++
++ ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
++ XFS_ILOCK_RTSUM)));
++
+ if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
+- ASSERT(subclass + XFS_LOCK_INUMORDER <
+- (1 << (XFS_MMAPLOCK_SHIFT - XFS_IOLOCK_SHIFT)));
+- lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
++ ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
++ ASSERT(subclass + XFS_IOLOCK_PARENT_VAL <
++ MAX_LOCKDEP_SUBCLASSES);
++ class += subclass << XFS_IOLOCK_SHIFT;
++ if (lock_mode & XFS_IOLOCK_PARENT)
++ class += XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT;
+ }
+
+ if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
+- ASSERT(subclass + XFS_LOCK_INUMORDER <
+- (1 << (XFS_ILOCK_SHIFT - XFS_MMAPLOCK_SHIFT)));
+- lock_mode |= (subclass + XFS_LOCK_INUMORDER) <<
+- XFS_MMAPLOCK_SHIFT;
++ ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
++ class += subclass << XFS_MMAPLOCK_SHIFT;
+ }
+
+- if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
+- lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
++ if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
++ ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
++ class += subclass << XFS_ILOCK_SHIFT;
++ }
+
+- return lock_mode;
++ return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
+ }
+
+ /*
+@@ -399,6 +407,11 @@
+ * transaction (such as truncate). This can result in deadlock since the long
+ * running trans might need to wait for the inode we just locked in order to
+ * push the tail and free space in the log.
++ *
++ * xfs_lock_inodes() can only be used to lock one type of lock at a time -
++ * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
++ * lock more than one at a time, lockdep will report false positives saying we
++ * have violated locking orders.
+ */
+ void
+ xfs_lock_inodes(
+@@ -409,8 +422,29 @@
+ int attempts = 0, i, j, try_lock;
+ xfs_log_item_t *lp;
+
+- /* currently supports between 2 and 5 inodes */
++ /*
++ * Currently supports between 2 and 5 inodes with exclusive locking. We
++ * support an arbitrary depth of locking here, but absolute limits on
++ * inodes depend on the the type of locking and the limits placed by
++ * lockdep annotations in xfs_lock_inumorder. These are all checked by
++ * the asserts.
++ */
+ ASSERT(ips && inodes >= 2 && inodes <= 5);
++ ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
++ XFS_ILOCK_EXCL));
++ ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
++ XFS_ILOCK_SHARED)));
++ ASSERT(!(lock_mode & XFS_IOLOCK_EXCL) ||
++ inodes <= XFS_IOLOCK_MAX_SUBCLASS + 1);
++ ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
++ inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
++ ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
++ inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
++
++ if (lock_mode & XFS_IOLOCK_EXCL) {
++ ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
++ } else if (lock_mode & XFS_MMAPLOCK_EXCL)
++ ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
+
+ try_lock = 0;
+ i = 0;
+diff -Nur linux-4.1.10.orig/fs/xfs/xfs_inode.h linux-4.1.10/fs/xfs/xfs_inode.h
+--- linux-4.1.10.orig/fs/xfs/xfs_inode.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/fs/xfs/xfs_inode.h 2015-10-07 18:00:08.000000000 +0200
+@@ -284,9 +284,9 @@
+ * Flags for lockdep annotations.
+ *
+ * XFS_LOCK_PARENT - for directory operations that require locking a
+- * parent directory inode and a child entry inode. The parent gets locked
+- * with this flag so it gets a lockdep subclass of 1 and the child entry
+- * lock will have a lockdep subclass of 0.
++ * parent directory inode and a child entry inode. IOLOCK requires nesting,
++ * MMAPLOCK does not support this class, ILOCK requires a single subclass
++ * to differentiate parent from child.
+ *
+ * XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary
+ * inodes do not participate in the normal lock order, and thus have their
+@@ -295,30 +295,63 @@
+ * XFS_LOCK_INUMORDER - for locking several inodes at the some time
+ * with xfs_lock_inodes(). This flag is used as the starting subclass
+ * and each subsequent lock acquired will increment the subclass by one.
+- * So the first lock acquired will have a lockdep subclass of 4, the
+- * second lock will have a lockdep subclass of 5, and so on. It is
+- * the responsibility of the class builder to shift this to the correct
+- * portion of the lock_mode lockdep mask.
++ * However, MAX_LOCKDEP_SUBCLASSES == 8, which means we are greatly
++ * limited to the subclasses we can represent via nesting. We need at least
++ * 5 inodes nest depth for the ILOCK through rename, and we also have to support
++ * XFS_ILOCK_PARENT, which gives 6 subclasses. Then we have XFS_ILOCK_RTBITMAP
++ * and XFS_ILOCK_RTSUM, which are another 2 unique subclasses, so that's all
++ * 8 subclasses supported by lockdep.
++ *
++ * This also means we have to number the sub-classes in the lowest bits of
++ * the mask we keep, and we have to ensure we never exceed 3 bits of lockdep
++ * mask and we can't use bit-masking to build the subclasses. What a mess.
++ *
++ * Bit layout:
++ *
++ * Bit Lock Region
++ * 16-19 XFS_IOLOCK_SHIFT dependencies
++ * 20-23 XFS_MMAPLOCK_SHIFT dependencies
++ * 24-31 XFS_ILOCK_SHIFT dependencies
++ *
++ * IOLOCK values
++ *
++ * 0-3 subclass value
++ * 4-7 PARENT subclass values
++ *
++ * MMAPLOCK values
++ *
++ * 0-3 subclass value
++ * 4-7 unused
++ *
++ * ILOCK values
++ * 0-4 subclass values
++ * 5 PARENT subclass (not nestable)
++ * 6 RTBITMAP subclass (not nestable)
++ * 7 RTSUM subclass (not nestable)
++ *
+ */
+-#define XFS_LOCK_PARENT 1
+-#define XFS_LOCK_RTBITMAP 2
+-#define XFS_LOCK_RTSUM 3
+-#define XFS_LOCK_INUMORDER 4
+-
+-#define XFS_IOLOCK_SHIFT 16
+-#define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT)
++#define XFS_IOLOCK_SHIFT 16
++#define XFS_IOLOCK_PARENT_VAL 4
++#define XFS_IOLOCK_MAX_SUBCLASS (XFS_IOLOCK_PARENT_VAL - 1)
++#define XFS_IOLOCK_DEP_MASK 0x000f0000
++#define XFS_IOLOCK_PARENT (XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT)
+
+-#define XFS_MMAPLOCK_SHIFT 20
++#define XFS_MMAPLOCK_SHIFT 20
++#define XFS_MMAPLOCK_NUMORDER 0
++#define XFS_MMAPLOCK_MAX_SUBCLASS 3
++#define XFS_MMAPLOCK_DEP_MASK 0x00f00000
+
+-#define XFS_ILOCK_SHIFT 24
+-#define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT)
+-#define XFS_ILOCK_RTBITMAP (XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT)
+-#define XFS_ILOCK_RTSUM (XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT)
++#define XFS_ILOCK_SHIFT 24
++#define XFS_ILOCK_PARENT_VAL 5
++#define XFS_ILOCK_MAX_SUBCLASS (XFS_ILOCK_PARENT_VAL - 1)
++#define XFS_ILOCK_RTBITMAP_VAL 6
++#define XFS_ILOCK_RTSUM_VAL 7
++#define XFS_ILOCK_DEP_MASK 0xff000000
++#define XFS_ILOCK_PARENT (XFS_ILOCK_PARENT_VAL << XFS_ILOCK_SHIFT)
++#define XFS_ILOCK_RTBITMAP (XFS_ILOCK_RTBITMAP_VAL << XFS_ILOCK_SHIFT)
++#define XFS_ILOCK_RTSUM (XFS_ILOCK_RTSUM_VAL << XFS_ILOCK_SHIFT)
+
+-#define XFS_IOLOCK_DEP_MASK 0x000f0000
+-#define XFS_MMAPLOCK_DEP_MASK 0x00f00000
+-#define XFS_ILOCK_DEP_MASK 0xff000000
+-#define XFS_LOCK_DEP_MASK (XFS_IOLOCK_DEP_MASK | \
++#define XFS_LOCK_SUBCLASS_MASK (XFS_IOLOCK_DEP_MASK | \
+ XFS_MMAPLOCK_DEP_MASK | \
+ XFS_ILOCK_DEP_MASK)
+
+diff -Nur linux-4.1.10.orig/include/acpi/platform/aclinux.h linux-4.1.10/include/acpi/platform/aclinux.h
+--- linux-4.1.10.orig/include/acpi/platform/aclinux.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/acpi/platform/aclinux.h 2015-10-07 18:00:08.000000000 +0200
@@ -123,6 +123,7 @@
#define acpi_cache_t struct kmem_cache
@@ -8832,9 +18280,9 @@ diff -Nur linux-4.1.6.orig/include/acpi/platform/aclinux.h linux-4.1.6/include/a
/*
* OSL interfaces used by debugger/disassembler
*/
-diff -Nur linux-4.1.6.orig/include/asm-generic/bug.h linux-4.1.6/include/asm-generic/bug.h
---- linux-4.1.6.orig/include/asm-generic/bug.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/asm-generic/bug.h 2015-09-08 23:49:06.322065194 +0200
+diff -Nur linux-4.1.10.orig/include/asm-generic/bug.h linux-4.1.10/include/asm-generic/bug.h
+--- linux-4.1.10.orig/include/asm-generic/bug.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/asm-generic/bug.h 2015-10-07 18:00:08.000000000 +0200
@@ -206,6 +206,20 @@
# define WARN_ON_SMP(x) ({0;})
#endif
@@ -8856,9 +18304,9 @@ diff -Nur linux-4.1.6.orig/include/asm-generic/bug.h linux-4.1.6/include/asm-gen
#endif /* __ASSEMBLY__ */
#endif
-diff -Nur linux-4.1.6.orig/include/asm-generic/futex.h linux-4.1.6/include/asm-generic/futex.h
---- linux-4.1.6.orig/include/asm-generic/futex.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/asm-generic/futex.h 2015-09-08 23:49:06.322065194 +0200
+diff -Nur linux-4.1.10.orig/include/asm-generic/futex.h linux-4.1.10/include/asm-generic/futex.h
+--- linux-4.1.10.orig/include/asm-generic/futex.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/asm-generic/futex.h 2015-10-07 18:00:08.000000000 +0200
@@ -8,8 +8,7 @@
#ifndef CONFIG_SMP
/*
@@ -8901,9 +18349,9 @@ diff -Nur linux-4.1.6.orig/include/asm-generic/futex.h linux-4.1.6/include/asm-g
return 0;
}
-diff -Nur linux-4.1.6.orig/include/linux/blkdev.h linux-4.1.6/include/linux/blkdev.h
---- linux-4.1.6.orig/include/linux/blkdev.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/blkdev.h 2015-09-08 23:49:06.530042129 +0200
+diff -Nur linux-4.1.10.orig/include/linux/blkdev.h linux-4.1.10/include/linux/blkdev.h
+--- linux-4.1.10.orig/include/linux/blkdev.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/blkdev.h 2015-10-07 18:00:08.000000000 +0200
@@ -101,6 +101,7 @@
struct list_head queuelist;
union {
@@ -8921,9 +18369,9 @@ diff -Nur linux-4.1.6.orig/include/linux/blkdev.h linux-4.1.6/include/linux/blkd
struct percpu_ref mq_usage_counter;
struct list_head all_q_node;
-diff -Nur linux-4.1.6.orig/include/linux/blk-mq.h linux-4.1.6/include/linux/blk-mq.h
---- linux-4.1.6.orig/include/linux/blk-mq.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/blk-mq.h 2015-09-08 23:49:06.322065194 +0200
+diff -Nur linux-4.1.10.orig/include/linux/blk-mq.h linux-4.1.10/include/linux/blk-mq.h
+--- linux-4.1.10.orig/include/linux/blk-mq.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/blk-mq.h 2015-10-07 18:00:08.000000000 +0200
@@ -202,6 +202,7 @@
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
@@ -8932,9 +18380,9 @@ diff -Nur linux-4.1.6.orig/include/linux/blk-mq.h linux-4.1.6/include/linux/blk-
int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);
-diff -Nur linux-4.1.6.orig/include/linux/bottom_half.h linux-4.1.6/include/linux/bottom_half.h
---- linux-4.1.6.orig/include/linux/bottom_half.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/bottom_half.h 2015-09-08 23:49:06.530042129 +0200
+diff -Nur linux-4.1.10.orig/include/linux/bottom_half.h linux-4.1.10/include/linux/bottom_half.h
+--- linux-4.1.10.orig/include/linux/bottom_half.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/bottom_half.h 2015-10-07 18:00:08.000000000 +0200
@@ -4,6 +4,17 @@
#include <linux/preempt.h>
#include <linux/preempt_mask.h>
@@ -8960,9 +18408,9 @@ diff -Nur linux-4.1.6.orig/include/linux/bottom_half.h linux-4.1.6/include/linux
+#endif
#endif /* _LINUX_BH_H */
-diff -Nur linux-4.1.6.orig/include/linux/buffer_head.h linux-4.1.6/include/linux/buffer_head.h
---- linux-4.1.6.orig/include/linux/buffer_head.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/buffer_head.h 2015-09-08 23:49:06.530042129 +0200
+diff -Nur linux-4.1.10.orig/include/linux/buffer_head.h linux-4.1.10/include/linux/buffer_head.h
+--- linux-4.1.10.orig/include/linux/buffer_head.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/buffer_head.h 2015-10-07 18:00:08.000000000 +0200
@@ -75,8 +75,52 @@
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
@@ -9016,9 +18464,9 @@ diff -Nur linux-4.1.6.orig/include/linux/buffer_head.h linux-4.1.6/include/linux
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
-diff -Nur linux-4.1.6.orig/include/linux/cgroup.h linux-4.1.6/include/linux/cgroup.h
---- linux-4.1.6.orig/include/linux/cgroup.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/cgroup.h 2015-09-08 23:49:06.534041686 +0200
+diff -Nur linux-4.1.10.orig/include/linux/cgroup.h linux-4.1.10/include/linux/cgroup.h
+--- linux-4.1.10.orig/include/linux/cgroup.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/cgroup.h 2015-10-07 18:00:08.000000000 +0200
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/kernfs.h>
@@ -9035,9 +18483,9 @@ diff -Nur linux-4.1.6.orig/include/linux/cgroup.h linux-4.1.6/include/linux/cgro
};
/* bits in struct cgroup_subsys_state flags field */
-diff -Nur linux-4.1.6.orig/include/linux/completion.h linux-4.1.6/include/linux/completion.h
---- linux-4.1.6.orig/include/linux/completion.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/completion.h 2015-09-08 23:49:06.534041686 +0200
+diff -Nur linux-4.1.10.orig/include/linux/completion.h linux-4.1.10/include/linux/completion.h
+--- linux-4.1.10.orig/include/linux/completion.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/completion.h 2015-10-07 18:00:08.000000000 +0200
@@ -7,8 +7,7 @@
* Atomic wait-for-completion handler data structures.
* See kernel/sched/completion.c for details.
@@ -9071,9 +18519,9 @@ diff -Nur linux-4.1.6.orig/include/linux/completion.h linux-4.1.6/include/linux/
}
/**
-diff -Nur linux-4.1.6.orig/include/linux/cpu.h linux-4.1.6/include/linux/cpu.h
---- linux-4.1.6.orig/include/linux/cpu.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/cpu.h 2015-09-08 23:49:06.534041686 +0200
+diff -Nur linux-4.1.10.orig/include/linux/cpu.h linux-4.1.10/include/linux/cpu.h
+--- linux-4.1.10.orig/include/linux/cpu.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/cpu.h 2015-10-07 18:00:08.000000000 +0200
@@ -231,6 +231,8 @@
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
@@ -9092,9 +18540,9 @@ diff -Nur linux-4.1.6.orig/include/linux/cpu.h linux-4.1.6/include/linux/cpu.h
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
-diff -Nur linux-4.1.6.orig/include/linux/delay.h linux-4.1.6/include/linux/delay.h
---- linux-4.1.6.orig/include/linux/delay.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/delay.h 2015-09-08 23:49:06.534041686 +0200
+diff -Nur linux-4.1.10.orig/include/linux/delay.h linux-4.1.10/include/linux/delay.h
+--- linux-4.1.10.orig/include/linux/delay.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/delay.h 2015-10-07 18:00:08.000000000 +0200
@@ -52,4 +52,10 @@
msleep(seconds * 1000);
}
@@ -9106,9 +18554,9 @@ diff -Nur linux-4.1.6.orig/include/linux/delay.h linux-4.1.6/include/linux/delay
+#endif
+
#endif /* defined(_LINUX_DELAY_H) */
-diff -Nur linux-4.1.6.orig/include/linux/ftrace_event.h linux-4.1.6/include/linux/ftrace_event.h
---- linux-4.1.6.orig/include/linux/ftrace_event.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/ftrace_event.h 2015-09-08 23:49:06.534041686 +0200
+diff -Nur linux-4.1.10.orig/include/linux/ftrace_event.h linux-4.1.10/include/linux/ftrace_event.h
+--- linux-4.1.10.orig/include/linux/ftrace_event.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/ftrace_event.h 2015-10-07 18:00:08.000000000 +0200
@@ -66,6 +66,9 @@
unsigned char flags;
unsigned char preempt_count;
@@ -9119,9 +18567,9 @@ diff -Nur linux-4.1.6.orig/include/linux/ftrace_event.h linux-4.1.6/include/linu
};
#define FTRACE_MAX_EVENT \
-diff -Nur linux-4.1.6.orig/include/linux/highmem.h linux-4.1.6/include/linux/highmem.h
---- linux-4.1.6.orig/include/linux/highmem.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/highmem.h 2015-09-08 23:49:06.534041686 +0200
+diff -Nur linux-4.1.10.orig/include/linux/highmem.h linux-4.1.10/include/linux/highmem.h
+--- linux-4.1.10.orig/include/linux/highmem.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/highmem.h 2015-10-07 18:00:08.000000000 +0200
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
@@ -9134,7 +18582,7 @@ diff -Nur linux-4.1.6.orig/include/linux/highmem.h linux-4.1.6/include/linux/hig
static inline void *kmap_atomic(struct page *page)
{
-+ preempt_disable();
++ preempt_disable_nort();
pagefault_disable();
return page_address(page);
}
@@ -9142,7 +18590,7 @@ diff -Nur linux-4.1.6.orig/include/linux/highmem.h linux-4.1.6/include/linux/hig
static inline void __kunmap_atomic(void *addr)
{
pagefault_enable();
-+ preempt_enable();
++ preempt_enable_nort();
}
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
@@ -9202,9 +18650,9 @@ diff -Nur linux-4.1.6.orig/include/linux/highmem.h linux-4.1.6/include/linux/hig
#endif
}
-diff -Nur linux-4.1.6.orig/include/linux/hrtimer.h linux-4.1.6/include/linux/hrtimer.h
---- linux-4.1.6.orig/include/linux/hrtimer.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/hrtimer.h 2015-09-08 23:49:06.534041686 +0200
+diff -Nur linux-4.1.10.orig/include/linux/hrtimer.h linux-4.1.10/include/linux/hrtimer.h
+--- linux-4.1.10.orig/include/linux/hrtimer.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/hrtimer.h 2015-10-07 18:00:08.000000000 +0200
@@ -111,6 +111,11 @@
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
@@ -9249,9 +18697,9 @@ diff -Nur linux-4.1.6.orig/include/linux/hrtimer.h linux-4.1.6/include/linux/hrt
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
-diff -Nur linux-4.1.6.orig/include/linux/idr.h linux-4.1.6/include/linux/idr.h
---- linux-4.1.6.orig/include/linux/idr.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/idr.h 2015-09-08 23:49:06.534041686 +0200
+diff -Nur linux-4.1.10.orig/include/linux/idr.h linux-4.1.10/include/linux/idr.h
+--- linux-4.1.10.orig/include/linux/idr.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/idr.h 2015-10-07 18:00:08.000000000 +0200
@@ -95,10 +95,14 @@
* Each idr_preload() should be matched with an invocation of this
* function. See idr_preload() for details.
@@ -9267,9 +18715,9 @@ diff -Nur linux-4.1.6.orig/include/linux/idr.h linux-4.1.6/include/linux/idr.h
/**
* idr_find - return pointer for given id
-diff -Nur linux-4.1.6.orig/include/linux/init_task.h linux-4.1.6/include/linux/init_task.h
---- linux-4.1.6.orig/include/linux/init_task.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/init_task.h 2015-09-08 23:49:06.542040801 +0200
+diff -Nur linux-4.1.10.orig/include/linux/init_task.h linux-4.1.10/include/linux/init_task.h
+--- linux-4.1.10.orig/include/linux/init_task.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/init_task.h 2015-10-07 18:00:08.000000000 +0200
@@ -147,9 +147,16 @@
# define INIT_PERF_EVENTS(tsk)
#endif
@@ -9296,9 +18744,9 @@ diff -Nur linux-4.1.6.orig/include/linux/init_task.h linux-4.1.6/include/linux/i
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
-diff -Nur linux-4.1.6.orig/include/linux/interrupt.h linux-4.1.6/include/linux/interrupt.h
---- linux-4.1.6.orig/include/linux/interrupt.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/interrupt.h 2015-09-08 23:49:06.542040801 +0200
+diff -Nur linux-4.1.10.orig/include/linux/interrupt.h linux-4.1.10/include/linux/interrupt.h
+--- linux-4.1.10.orig/include/linux/interrupt.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/interrupt.h 2015-10-07 18:00:08.000000000 +0200
@@ -61,6 +61,7 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
@@ -9315,7 +18763,23 @@ diff -Nur linux-4.1.6.orig/include/linux/interrupt.h linux-4.1.6/include/linux/i
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
-@@ -184,7 +186,7 @@
+@@ -102,6 +104,7 @@
+ * @flags: flags (see IRQF_* above)
+ * @thread_fn: interrupt handler function for threaded interrupts
+ * @thread: thread pointer for threaded interrupts
++ * @secondary: pointer to secondary irqaction (force threading)
+ * @thread_flags: flags related to @thread
+ * @thread_mask: bitmask for keeping track of @thread activity
+ * @dir: pointer to the proc/irq/NN/name entry
+@@ -113,6 +116,7 @@
+ struct irqaction *next;
+ irq_handler_t thread_fn;
+ struct task_struct *thread;
++ struct irqaction *secondary;
+ unsigned int irq;
+ unsigned int flags;
+ unsigned long thread_flags;
+@@ -184,7 +188,7 @@
#ifdef CONFIG_LOCKDEP
# define local_irq_enable_in_hardirq() do { } while (0)
#else
@@ -9324,7 +18788,7 @@ diff -Nur linux-4.1.6.orig/include/linux/interrupt.h linux-4.1.6/include/linux/i
#endif
extern void disable_irq_nosync(unsigned int irq);
-@@ -215,6 +217,7 @@
+@@ -215,6 +219,7 @@
unsigned int irq;
struct kref kref;
struct work_struct work;
@@ -9332,7 +18796,7 @@ diff -Nur linux-4.1.6.orig/include/linux/interrupt.h linux-4.1.6/include/linux/i
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
void (*release)(struct kref *ref);
};
-@@ -377,9 +380,13 @@
+@@ -377,9 +382,13 @@
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
@@ -9347,7 +18811,7 @@ diff -Nur linux-4.1.6.orig/include/linux/interrupt.h linux-4.1.6/include/linux/i
#endif
#ifndef __ARCH_SET_SOFTIRQ_PENDING
-@@ -435,9 +442,10 @@
+@@ -435,9 +444,10 @@
void (*action)(struct softirq_action *);
};
@@ -9359,7 +18823,7 @@ diff -Nur linux-4.1.6.orig/include/linux/interrupt.h linux-4.1.6/include/linux/i
#ifdef __ARCH_HAS_DO_SOFTIRQ
void do_softirq_own_stack(void);
#else
-@@ -446,6 +454,9 @@
+@@ -446,6 +456,9 @@
__do_softirq();
}
#endif
@@ -9369,7 +18833,7 @@ diff -Nur linux-4.1.6.orig/include/linux/interrupt.h linux-4.1.6/include/linux/i
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
-@@ -453,6 +464,7 @@
+@@ -453,6 +466,7 @@
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
@@ -9377,7 +18841,7 @@ diff -Nur linux-4.1.6.orig/include/linux/interrupt.h linux-4.1.6/include/linux/i
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-@@ -474,8 +486,9 @@
+@@ -474,8 +488,9 @@
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
@@ -9389,7 +18853,7 @@ diff -Nur linux-4.1.6.orig/include/linux/interrupt.h linux-4.1.6/include/linux/i
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
-@@ -500,27 +513,36 @@
+@@ -500,27 +515,36 @@
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
@@ -9432,7 +18896,7 @@ diff -Nur linux-4.1.6.orig/include/linux/interrupt.h linux-4.1.6/include/linux/i
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
-@@ -569,12 +591,7 @@
+@@ -569,12 +593,7 @@
smp_mb();
}
@@ -9446,7 +18910,7 @@ diff -Nur linux-4.1.6.orig/include/linux/interrupt.h linux-4.1.6/include/linux/i
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
-@@ -605,6 +622,12 @@
+@@ -605,6 +624,12 @@
tasklet_kill(&ttimer->tasklet);
}
@@ -9459,9 +18923,9 @@ diff -Nur linux-4.1.6.orig/include/linux/interrupt.h linux-4.1.6/include/linux/i
/*
* Autoprobing for irqs:
*
-diff -Nur linux-4.1.6.orig/include/linux/io-mapping.h linux-4.1.6/include/linux/io-mapping.h
---- linux-4.1.6.orig/include/linux/io-mapping.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/io-mapping.h 2015-09-08 23:49:06.542040801 +0200
+diff -Nur linux-4.1.10.orig/include/linux/io-mapping.h linux-4.1.10/include/linux/io-mapping.h
+--- linux-4.1.10.orig/include/linux/io-mapping.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/io-mapping.h 2015-10-07 18:00:08.000000000 +0200
@@ -141,6 +141,7 @@
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
@@ -9478,9 +18942,9 @@ diff -Nur linux-4.1.6.orig/include/linux/io-mapping.h linux-4.1.6/include/linux/
}
/* Non-atomic map/unmap */
-diff -Nur linux-4.1.6.orig/include/linux/irqdesc.h linux-4.1.6/include/linux/irqdesc.h
---- linux-4.1.6.orig/include/linux/irqdesc.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/irqdesc.h 2015-09-08 23:49:06.546040357 +0200
+diff -Nur linux-4.1.10.orig/include/linux/irqdesc.h linux-4.1.10/include/linux/irqdesc.h
+--- linux-4.1.10.orig/include/linux/irqdesc.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/irqdesc.h 2015-10-07 18:00:08.000000000 +0200
@@ -63,6 +63,7 @@
unsigned int irqs_unhandled;
atomic_t threads_handled;
@@ -9489,9 +18953,9 @@ diff -Nur linux-4.1.6.orig/include/linux/irqdesc.h linux-4.1.6/include/linux/irq
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
-diff -Nur linux-4.1.6.orig/include/linux/irqflags.h linux-4.1.6/include/linux/irqflags.h
---- linux-4.1.6.orig/include/linux/irqflags.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/irqflags.h 2015-09-08 23:49:06.546040357 +0200
+diff -Nur linux-4.1.10.orig/include/linux/irqflags.h linux-4.1.10/include/linux/irqflags.h
+--- linux-4.1.10.orig/include/linux/irqflags.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/irqflags.h 2015-10-07 18:00:08.000000000 +0200
@@ -25,8 +25,6 @@
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
@@ -9542,9 +19006,9 @@ diff -Nur linux-4.1.6.orig/include/linux/irqflags.h linux-4.1.6/include/linux/ir
+#endif
+
#endif
-diff -Nur linux-4.1.6.orig/include/linux/irq.h linux-4.1.6/include/linux/irq.h
---- linux-4.1.6.orig/include/linux/irq.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/irq.h 2015-09-08 23:49:06.542040801 +0200
+diff -Nur linux-4.1.10.orig/include/linux/irq.h linux-4.1.10/include/linux/irq.h
+--- linux-4.1.10.orig/include/linux/irq.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/irq.h 2015-10-07 18:00:08.000000000 +0200
@@ -72,6 +72,7 @@
* IRQ_IS_POLLED - Always polled by another interrupt. Exclude
* it from the spurious interrupt detection
@@ -9569,9 +19033,9 @@ diff -Nur linux-4.1.6.orig/include/linux/irq.h linux-4.1.6/include/linux/irq.h
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
-diff -Nur linux-4.1.6.orig/include/linux/irq_work.h linux-4.1.6/include/linux/irq_work.h
---- linux-4.1.6.orig/include/linux/irq_work.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/irq_work.h 2015-09-08 23:49:06.542040801 +0200
+diff -Nur linux-4.1.10.orig/include/linux/irq_work.h linux-4.1.10/include/linux/irq_work.h
+--- linux-4.1.10.orig/include/linux/irq_work.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/irq_work.h 2015-10-07 18:00:08.000000000 +0200
@@ -16,6 +16,7 @@
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
@@ -9580,9 +19044,9 @@ diff -Nur linux-4.1.6.orig/include/linux/irq_work.h linux-4.1.6/include/linux/ir
struct irq_work {
unsigned long flags;
-diff -Nur linux-4.1.6.orig/include/linux/jbd_common.h linux-4.1.6/include/linux/jbd_common.h
---- linux-4.1.6.orig/include/linux/jbd_common.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/jbd_common.h 2015-09-08 23:49:06.546040357 +0200
+diff -Nur linux-4.1.10.orig/include/linux/jbd_common.h linux-4.1.10/include/linux/jbd_common.h
+--- linux-4.1.10.orig/include/linux/jbd_common.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/jbd_common.h 2015-10-07 18:00:08.000000000 +0200
@@ -15,32 +15,56 @@
static inline void jbd_lock_bh_state(struct buffer_head *bh)
@@ -9640,9 +19104,9 @@ diff -Nur linux-4.1.6.orig/include/linux/jbd_common.h linux-4.1.6/include/linux/
}
#endif
-diff -Nur linux-4.1.6.orig/include/linux/kdb.h linux-4.1.6/include/linux/kdb.h
---- linux-4.1.6.orig/include/linux/kdb.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/kdb.h 2015-09-08 23:49:06.546040357 +0200
+diff -Nur linux-4.1.10.orig/include/linux/kdb.h linux-4.1.10/include/linux/kdb.h
+--- linux-4.1.10.orig/include/linux/kdb.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/kdb.h 2015-10-07 18:00:08.000000000 +0200
@@ -167,6 +167,7 @@
extern __printf(1, 2) int kdb_printf(const char *, ...);
typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
@@ -9659,9 +19123,9 @@ diff -Nur linux-4.1.6.orig/include/linux/kdb.h linux-4.1.6/include/linux/kdb.h
static inline void kdb_init(int level) {}
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
char *help, short minlen) { return 0; }
-diff -Nur linux-4.1.6.orig/include/linux/kernel.h linux-4.1.6/include/linux/kernel.h
---- linux-4.1.6.orig/include/linux/kernel.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/kernel.h 2015-09-08 23:49:06.546040357 +0200
+diff -Nur linux-4.1.10.orig/include/linux/kernel.h linux-4.1.10/include/linux/kernel.h
+--- linux-4.1.10.orig/include/linux/kernel.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/kernel.h 2015-10-07 18:00:08.000000000 +0200
@@ -188,6 +188,9 @@
*/
# define might_sleep() \
@@ -9698,9 +19162,9 @@ diff -Nur linux-4.1.6.orig/include/linux/kernel.h linux-4.1.6/include/linux/kern
} system_state;
#define TAINT_PROPRIETARY_MODULE 0
-diff -Nur linux-4.1.6.orig/include/linux/kvm_host.h linux-4.1.6/include/linux/kvm_host.h
---- linux-4.1.6.orig/include/linux/kvm_host.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/kvm_host.h 2015-09-08 23:49:06.546040357 +0200
+diff -Nur linux-4.1.10.orig/include/linux/kvm_host.h linux-4.1.10/include/linux/kvm_host.h
+--- linux-4.1.10.orig/include/linux/kvm_host.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/kvm_host.h 2015-10-07 18:00:08.000000000 +0200
@@ -230,7 +230,7 @@
int fpu_active;
@@ -9719,9 +19183,9 @@ diff -Nur linux-4.1.6.orig/include/linux/kvm_host.h linux-4.1.6/include/linux/kv
{
#ifdef __KVM_HAVE_ARCH_WQP
return vcpu->arch.wqp;
-diff -Nur linux-4.1.6.orig/include/linux/lglock.h linux-4.1.6/include/linux/lglock.h
---- linux-4.1.6.orig/include/linux/lglock.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/lglock.h 2015-09-08 23:49:06.546040357 +0200
+diff -Nur linux-4.1.10.orig/include/linux/lglock.h linux-4.1.10/include/linux/lglock.h
+--- linux-4.1.10.orig/include/linux/lglock.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/lglock.h 2015-10-07 18:00:08.000000000 +0200
@@ -34,22 +34,39 @@
#endif
@@ -9777,9 +19241,9 @@ diff -Nur linux-4.1.6.orig/include/linux/lglock.h linux-4.1.6/include/linux/lglo
#else
/* When !CONFIG_SMP, map lglock to spinlock */
#define lglock spinlock
-diff -Nur linux-4.1.6.orig/include/linux/list_bl.h linux-4.1.6/include/linux/list_bl.h
---- linux-4.1.6.orig/include/linux/list_bl.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/list_bl.h 2015-09-08 23:49:06.546040357 +0200
+diff -Nur linux-4.1.10.orig/include/linux/list_bl.h linux-4.1.10/include/linux/list_bl.h
+--- linux-4.1.10.orig/include/linux/list_bl.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/list_bl.h 2015-10-07 18:00:08.000000000 +0200
@@ -2,6 +2,7 @@
#define _LINUX_LIST_BL_H
@@ -9840,9 +19304,9 @@ diff -Nur linux-4.1.6.orig/include/linux/list_bl.h linux-4.1.6/include/linux/lis
}
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
-diff -Nur linux-4.1.6.orig/include/linux/locallock.h linux-4.1.6/include/linux/locallock.h
---- linux-4.1.6.orig/include/linux/locallock.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/linux/locallock.h 2015-09-08 23:49:06.550039912 +0200
+diff -Nur linux-4.1.10.orig/include/linux/locallock.h linux-4.1.10/include/linux/locallock.h
+--- linux-4.1.10.orig/include/linux/locallock.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/linux/locallock.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,270 @@
+#ifndef _LINUX_LOCALLOCK_H
+#define _LINUX_LOCALLOCK_H
@@ -10114,9 +19578,9 @@ diff -Nur linux-4.1.6.orig/include/linux/locallock.h linux-4.1.6/include/linux/l
+#endif
+
+#endif
-diff -Nur linux-4.1.6.orig/include/linux/mm_types.h linux-4.1.6/include/linux/mm_types.h
---- linux-4.1.6.orig/include/linux/mm_types.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/mm_types.h 2015-09-08 23:49:06.550039912 +0200
+diff -Nur linux-4.1.10.orig/include/linux/mm_types.h linux-4.1.10/include/linux/mm_types.h
+--- linux-4.1.10.orig/include/linux/mm_types.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/mm_types.h 2015-10-07 18:00:08.000000000 +0200
@@ -11,6 +11,7 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
@@ -10125,7 +19589,7 @@ diff -Nur linux-4.1.6.orig/include/linux/mm_types.h linux-4.1.6/include/linux/mm
#include <linux/page-flags-layout.h>
#include <asm/page.h>
#include <asm/mmu.h>
-@@ -462,6 +463,9 @@
+@@ -453,6 +454,9 @@
bool tlb_flush_pending;
#endif
struct uprobes_state uprobes_state;
@@ -10135,9 +19599,552 @@ diff -Nur linux-4.1.6.orig/include/linux/mm_types.h linux-4.1.6/include/linux/mm
#ifdef CONFIG_X86_INTEL_MPX
/* address of the bounds directory */
void __user *bd_addr;
-diff -Nur linux-4.1.6.orig/include/linux/mutex.h linux-4.1.6/include/linux/mutex.h
---- linux-4.1.6.orig/include/linux/mutex.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/mutex.h 2015-09-08 23:49:06.774015077 +0200
+diff -Nur linux-4.1.10.orig/include/linux/mm_types.h.orig linux-4.1.10/include/linux/mm_types.h.orig
+--- linux-4.1.10.orig/include/linux/mm_types.h.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/linux/mm_types.h.orig 2015-10-03 13:49:38.000000000 +0200
+@@ -0,0 +1,539 @@
++#ifndef _LINUX_MM_TYPES_H
++#define _LINUX_MM_TYPES_H
++
++#include <linux/auxvec.h>
++#include <linux/types.h>
++#include <linux/threads.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/rbtree.h>
++#include <linux/rwsem.h>
++#include <linux/completion.h>
++#include <linux/cpumask.h>
++#include <linux/uprobes.h>
++#include <linux/page-flags-layout.h>
++#include <asm/page.h>
++#include <asm/mmu.h>
++
++#ifndef AT_VECTOR_SIZE_ARCH
++#define AT_VECTOR_SIZE_ARCH 0
++#endif
++#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
++
++struct address_space;
++struct mem_cgroup;
++
++#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
++#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
++ IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
++#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
++
++typedef void compound_page_dtor(struct page *);
++
++/*
++ * Each physical page in the system has a struct page associated with
++ * it to keep track of whatever it is we are using the page for at the
++ * moment. Note that we have no way to track which tasks are using
++ * a page, though if it is a pagecache page, rmap structures can tell us
++ * who is mapping it.
++ *
++ * The objects in struct page are organized in double word blocks in
++ * order to allows us to use atomic double word operations on portions
++ * of struct page. That is currently only used by slub but the arrangement
++ * allows the use of atomic double word operations on the flags/mapping
++ * and lru list pointers also.
++ */
++struct page {
++ /* First double word block */
++ unsigned long flags; /* Atomic flags, some possibly
++ * updated asynchronously */
++ union {
++ struct address_space *mapping; /* If low bit clear, points to
++ * inode address_space, or NULL.
++ * If page mapped as anonymous
++ * memory, low bit is set, and
++ * it points to anon_vma object:
++ * see PAGE_MAPPING_ANON below.
++ */
++ void *s_mem; /* slab first object */
++ };
++
++ /* Second double word */
++ struct {
++ union {
++ pgoff_t index; /* Our offset within mapping. */
++ void *freelist; /* sl[aou]b first free object */
++ };
++
++ union {
++#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
++ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
++ /* Used for cmpxchg_double in slub */
++ unsigned long counters;
++#else
++ /*
++ * Keep _count separate from slub cmpxchg_double data.
++ * As the rest of the double word is protected by
++ * slab_lock but _count is not.
++ */
++ unsigned counters;
++#endif
++
++ struct {
++
++ union {
++ /*
++ * Count of ptes mapped in
++ * mms, to show when page is
++ * mapped & limit reverse map
++ * searches.
++ *
++ * Used also for tail pages
++ * refcounting instead of
++ * _count. Tail pages cannot
++ * be mapped and keeping the
++ * tail page _count zero at
++ * all times guarantees
++ * get_page_unless_zero() will
++ * never succeed on tail
++ * pages.
++ */
++ atomic_t _mapcount;
++
++ struct { /* SLUB */
++ unsigned inuse:16;
++ unsigned objects:15;
++ unsigned frozen:1;
++ };
++ int units; /* SLOB */
++ };
++ atomic_t _count; /* Usage count, see below. */
++ };
++ unsigned int active; /* SLAB */
++ };
++ };
++
++ /* Third double word block */
++ union {
++ struct list_head lru; /* Pageout list, eg. active_list
++ * protected by zone->lru_lock !
++ * Can be used as a generic list
++ * by the page owner.
++ */
++ struct { /* slub per cpu partial pages */
++ struct page *next; /* Next partial slab */
++#ifdef CONFIG_64BIT
++ int pages; /* Nr of partial slabs left */
++ int pobjects; /* Approximate # of objects */
++#else
++ short int pages;
++ short int pobjects;
++#endif
++ };
++
++ struct slab *slab_page; /* slab fields */
++ struct rcu_head rcu_head; /* Used by SLAB
++ * when destroying via RCU
++ */
++ /* First tail page of compound page */
++ struct {
++ compound_page_dtor *compound_dtor;
++ unsigned long compound_order;
++ };
++
++#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
++ pgtable_t pmd_huge_pte; /* protected by page->ptl */
++#endif
++ };
++
++ /* Remainder is not double word aligned */
++ union {
++ unsigned long private; /* Mapping-private opaque data:
++ * usually used for buffer_heads
++ * if PagePrivate set; used for
++ * swp_entry_t if PageSwapCache;
++ * indicates order in the buddy
++ * system if PG_buddy is set.
++ */
++#if USE_SPLIT_PTE_PTLOCKS
++#if ALLOC_SPLIT_PTLOCKS
++ spinlock_t *ptl;
++#else
++ spinlock_t ptl;
++#endif
++#endif
++ struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
++ struct page *first_page; /* Compound tail pages */
++ };
++
++#ifdef CONFIG_MEMCG
++ struct mem_cgroup *mem_cgroup;
++#endif
++
++ /*
++ * On machines where all RAM is mapped into kernel address space,
++ * we can simply calculate the virtual address. On machines with
++ * highmem some memory is mapped into kernel virtual memory
++ * dynamically, so we need a place to store that address.
++ * Note that this field could be 16 bits on x86 ... ;)
++ *
++ * Architectures with slow multiplication can define
++ * WANT_PAGE_VIRTUAL in asm/page.h
++ */
++#if defined(WANT_PAGE_VIRTUAL)
++ void *virtual; /* Kernel virtual address (NULL if
++ not kmapped, ie. highmem) */
++#endif /* WANT_PAGE_VIRTUAL */
++
++#ifdef CONFIG_KMEMCHECK
++ /*
++ * kmemcheck wants to track the status of each byte in a page; this
++ * is a pointer to such a status block. NULL if not tracked.
++ */
++ void *shadow;
++#endif
++
++#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
++ int _last_cpupid;
++#endif
++}
++/*
++ * The struct page can be forced to be double word aligned so that atomic ops
++ * on double words work. The SLUB allocator can make use of such a feature.
++ */
++#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
++ __aligned(2 * sizeof(unsigned long))
++#endif
++;
++
++struct page_frag {
++ struct page *page;
++#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
++ __u32 offset;
++ __u32 size;
++#else
++ __u16 offset;
++ __u16 size;
++#endif
++};
++
++typedef unsigned long __nocast vm_flags_t;
++
++/*
++ * A region containing a mapping of a non-memory backed file under NOMMU
++ * conditions. These are held in a global tree and are pinned by the VMAs that
++ * map parts of them.
++ */
++struct vm_region {
++ struct rb_node vm_rb; /* link in global region tree */
++ vm_flags_t vm_flags; /* VMA vm_flags */
++ unsigned long vm_start; /* start address of region */
++ unsigned long vm_end; /* region initialised to here */
++ unsigned long vm_top; /* region allocated to here */
++ unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
++ struct file *vm_file; /* the backing file or NULL */
++
++ int vm_usage; /* region usage count (access under nommu_region_sem) */
++ bool vm_icache_flushed : 1; /* true if the icache has been flushed for
++ * this region */
++};
++
++/*
++ * This struct defines a memory VMM memory area. There is one of these
++ * per VM-area/task. A VM area is any part of the process virtual memory
++ * space that has a special rule for the page-fault handlers (ie a shared
++ * library, the executable area etc).
++ */
++struct vm_area_struct {
++ /* The first cache line has the info for VMA tree walking. */
++
++ unsigned long vm_start; /* Our start address within vm_mm. */
++ unsigned long vm_end; /* The first byte after our end address
++ within vm_mm. */
++
++ /* linked list of VM areas per task, sorted by address */
++ struct vm_area_struct *vm_next, *vm_prev;
++
++ struct rb_node vm_rb;
++
++ /*
++ * Largest free memory gap in bytes to the left of this VMA.
++ * Either between this VMA and vma->vm_prev, or between one of the
++ * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
++ * get_unmapped_area find a free area of the right size.
++ */
++ unsigned long rb_subtree_gap;
++
++ /* Second cache line starts here. */
++
++ struct mm_struct *vm_mm; /* The address space we belong to. */
++ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
++ unsigned long vm_flags; /* Flags, see mm.h. */
++
++ /*
++ * For areas with an address space and backing store,
++ * linkage into the address_space->i_mmap interval tree.
++ */
++ struct {
++ struct rb_node rb;
++ unsigned long rb_subtree_last;
++ } shared;
++
++ /*
++ * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
++ * list, after a COW of one of the file pages. A MAP_SHARED vma
++ * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
++ * or brk vma (with NULL file) can only be in an anon_vma list.
++ */
++ struct list_head anon_vma_chain; /* Serialized by mmap_sem &
++ * page_table_lock */
++ struct anon_vma *anon_vma; /* Serialized by page_table_lock */
++
++ /* Function pointers to deal with this struct. */
++ const struct vm_operations_struct *vm_ops;
++
++ /* Information about our backing store: */
++ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
++ units, *not* PAGE_CACHE_SIZE */
++ struct file * vm_file; /* File we map to (can be NULL). */
++ void * vm_private_data; /* was vm_pte (shared mem) */
++
++#ifndef CONFIG_MMU
++ struct vm_region *vm_region; /* NOMMU mapping region */
++#endif
++#ifdef CONFIG_NUMA
++ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
++#endif
++};
++
++struct core_thread {
++ struct task_struct *task;
++ struct core_thread *next;
++};
++
++struct core_state {
++ atomic_t nr_threads;
++ struct core_thread dumper;
++ struct completion startup;
++};
++
++enum {
++ MM_FILEPAGES,
++ MM_ANONPAGES,
++ MM_SWAPENTS,
++ NR_MM_COUNTERS
++};
++
++#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
++#define SPLIT_RSS_COUNTING
++/* per-thread cached information, */
++struct task_rss_stat {
++ int events; /* for synchronization threshold */
++ int count[NR_MM_COUNTERS];
++};
++#endif /* USE_SPLIT_PTE_PTLOCKS */
++
++struct mm_rss_stat {
++ atomic_long_t count[NR_MM_COUNTERS];
++};
++
++struct kioctx_table;
++struct mm_struct {
++ struct vm_area_struct *mmap; /* list of VMAs */
++ struct rb_root mm_rb;
++ u32 vmacache_seqnum; /* per-thread vmacache */
++#ifdef CONFIG_MMU
++ unsigned long (*get_unmapped_area) (struct file *filp,
++ unsigned long addr, unsigned long len,
++ unsigned long pgoff, unsigned long flags);
++#endif
++ unsigned long mmap_base; /* base of mmap area */
++ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
++ unsigned long task_size; /* size of task vm space */
++ unsigned long highest_vm_end; /* highest vma end address */
++ pgd_t * pgd;
++ atomic_t mm_users; /* How many users with user space? */
++ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
++ atomic_long_t nr_ptes; /* PTE page table pages */
++#if CONFIG_PGTABLE_LEVELS > 2
++ atomic_long_t nr_pmds; /* PMD page table pages */
++#endif
++ int map_count; /* number of VMAs */
++
++ spinlock_t page_table_lock; /* Protects page tables and some counters */
++ struct rw_semaphore mmap_sem;
++
++ struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
++ * together off init_mm.mmlist, and are protected
++ * by mmlist_lock
++ */
++
++
++ unsigned long hiwater_rss; /* High-watermark of RSS usage */
++ unsigned long hiwater_vm; /* High-water virtual memory usage */
++
++ unsigned long total_vm; /* Total pages mapped */
++ unsigned long locked_vm; /* Pages that have PG_mlocked set */
++ unsigned long pinned_vm; /* Refcount permanently increased */
++ unsigned long shared_vm; /* Shared pages (files) */
++ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
++ unsigned long stack_vm; /* VM_GROWSUP/DOWN */
++ unsigned long def_flags;
++ unsigned long start_code, end_code, start_data, end_data;
++ unsigned long start_brk, brk, start_stack;
++ unsigned long arg_start, arg_end, env_start, env_end;
++
++ unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
++
++ /*
++ * Special counters, in some configurations protected by the
++ * page_table_lock, in other configurations by being atomic.
++ */
++ struct mm_rss_stat rss_stat;
++
++ struct linux_binfmt *binfmt;
++
++ cpumask_var_t cpu_vm_mask_var;
++
++ /* Architecture-specific MM context */
++ mm_context_t context;
++
++ unsigned long flags; /* Must use atomic bitops to access the bits */
++
++ struct core_state *core_state; /* coredumping support */
++#ifdef CONFIG_AIO
++ spinlock_t ioctx_lock;
++ struct kioctx_table __rcu *ioctx_table;
++#endif
++#ifdef CONFIG_MEMCG
++ /*
++ * "owner" points to a task that is regarded as the canonical
++ * user/owner of this mm. All of the following must be true in
++ * order for it to be changed:
++ *
++ * current == mm->owner
++ * current->mm != mm
++ * new_owner->mm == mm
++ * new_owner->alloc_lock is held
++ */
++ struct task_struct __rcu *owner;
++#endif
++
++ /* store ref to file /proc/<pid>/exe symlink points to */
++ struct file __rcu *exe_file;
++#ifdef CONFIG_MMU_NOTIFIER
++ struct mmu_notifier_mm *mmu_notifier_mm;
++#endif
++#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
++ pgtable_t pmd_huge_pte; /* protected by page_table_lock */
++#endif
++#ifdef CONFIG_CPUMASK_OFFSTACK
++ struct cpumask cpumask_allocation;
++#endif
++#ifdef CONFIG_NUMA_BALANCING
++ /*
++ * numa_next_scan is the next time that the PTEs will be marked
++ * pte_numa. NUMA hinting faults will gather statistics and migrate
++ * pages to new nodes if necessary.
++ */
++ unsigned long numa_next_scan;
++
++ /* Restart point for scanning and setting pte_numa */
++ unsigned long numa_scan_offset;
++
++ /* numa_scan_seq prevents two threads setting pte_numa */
++ int numa_scan_seq;
++#endif
++#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
++ /*
++ * An operation with batched TLB flushing is going on. Anything that
++ * can move process memory needs to flush the TLB when moving a
++ * PROT_NONE or PROT_NUMA mapped page.
++ */
++ bool tlb_flush_pending;
++#endif
++ struct uprobes_state uprobes_state;
++#ifdef CONFIG_X86_INTEL_MPX
++ /* address of the bounds directory */
++ void __user *bd_addr;
++#endif
++};
++
++static inline void mm_init_cpumask(struct mm_struct *mm)
++{
++#ifdef CONFIG_CPUMASK_OFFSTACK
++ mm->cpu_vm_mask_var = &mm->cpumask_allocation;
++#endif
++ cpumask_clear(mm->cpu_vm_mask_var);
++}
++
++/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
++static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
++{
++ return mm->cpu_vm_mask_var;
++}
++
++#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
++/*
++ * Memory barriers to keep this state in sync are graciously provided by
++ * the page table locks, outside of which no page table modifications happen.
++ * The barriers below prevent the compiler from re-ordering the instructions
++ * around the memory barriers that are already present in the code.
++ */
++static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
++{
++ barrier();
++ return mm->tlb_flush_pending;
++}
++static inline void set_tlb_flush_pending(struct mm_struct *mm)
++{
++ mm->tlb_flush_pending = true;
++
++ /*
++ * Guarantee that the tlb_flush_pending store does not leak into the
++ * critical section updating the page tables
++ */
++ smp_mb__before_spinlock();
++}
++/* Clearing is done after a TLB flush, which also provides a barrier. */
++static inline void clear_tlb_flush_pending(struct mm_struct *mm)
++{
++ barrier();
++ mm->tlb_flush_pending = false;
++}
++#else
++static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
++{
++ return false;
++}
++static inline void set_tlb_flush_pending(struct mm_struct *mm)
++{
++}
++static inline void clear_tlb_flush_pending(struct mm_struct *mm)
++{
++}
++#endif
++
++struct vm_special_mapping
++{
++ const char *name;
++ struct page **pages;
++};
++
++enum tlb_flush_reason {
++ TLB_FLUSH_ON_TASK_SWITCH,
++ TLB_REMOTE_SHOOTDOWN,
++ TLB_LOCAL_SHOOTDOWN,
++ TLB_LOCAL_MM_SHOOTDOWN,
++ NR_TLB_FLUSH_REASONS,
++};
++
++ /*
++ * A swap entry has to fit into a "unsigned long", as the entry is hidden
++ * in the "index" field of the swapper address space.
++ */
++typedef struct {
++ unsigned long val;
++} swp_entry_t;
++
++#endif /* _LINUX_MM_TYPES_H */
+diff -Nur linux-4.1.10.orig/include/linux/mutex.h linux-4.1.10/include/linux/mutex.h
+--- linux-4.1.10.orig/include/linux/mutex.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/mutex.h 2015-10-07 18:00:08.000000000 +0200
@@ -19,6 +19,17 @@
#include <asm/processor.h>
#include <linux/osq_lock.h>
@@ -10179,9 +20186,9 @@ diff -Nur linux-4.1.6.orig/include/linux/mutex.h linux-4.1.6/include/linux/mutex
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#endif /* __LINUX_MUTEX_H */
-diff -Nur linux-4.1.6.orig/include/linux/mutex_rt.h linux-4.1.6/include/linux/mutex_rt.h
---- linux-4.1.6.orig/include/linux/mutex_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/linux/mutex_rt.h 2015-09-08 23:49:06.774015077 +0200
+diff -Nur linux-4.1.10.orig/include/linux/mutex_rt.h linux-4.1.10/include/linux/mutex_rt.h
+--- linux-4.1.10.orig/include/linux/mutex_rt.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/linux/mutex_rt.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,84 @@
+#ifndef __LINUX_MUTEX_RT_H
+#define __LINUX_MUTEX_RT_H
@@ -10267,9 +20274,9 @@ diff -Nur linux-4.1.6.orig/include/linux/mutex_rt.h linux-4.1.6/include/linux/mu
+} while (0)
+
+#endif
-diff -Nur linux-4.1.6.orig/include/linux/netdevice.h linux-4.1.6/include/linux/netdevice.h
---- linux-4.1.6.orig/include/linux/netdevice.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/netdevice.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/netdevice.h linux-4.1.10/include/linux/netdevice.h
+--- linux-4.1.10.orig/include/linux/netdevice.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/netdevice.h 2015-10-07 18:00:08.000000000 +0200
@@ -2469,6 +2469,7 @@
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
@@ -10278,9 +20285,9 @@ diff -Nur linux-4.1.6.orig/include/linux/netdevice.h linux-4.1.6/include/linux/n
};
-diff -Nur linux-4.1.6.orig/include/linux/netfilter/x_tables.h linux-4.1.6/include/linux/netfilter/x_tables.h
---- linux-4.1.6.orig/include/linux/netfilter/x_tables.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/netfilter/x_tables.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/netfilter/x_tables.h linux-4.1.10/include/linux/netfilter/x_tables.h
+--- linux-4.1.10.orig/include/linux/netfilter/x_tables.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/netfilter/x_tables.h 2015-10-07 18:00:08.000000000 +0200
@@ -3,6 +3,7 @@
@@ -10316,9 +20323,9 @@ diff -Nur linux-4.1.6.orig/include/linux/netfilter/x_tables.h linux-4.1.6/includ
}
/*
-diff -Nur linux-4.1.6.orig/include/linux/notifier.h linux-4.1.6/include/linux/notifier.h
---- linux-4.1.6.orig/include/linux/notifier.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/notifier.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/notifier.h linux-4.1.10/include/linux/notifier.h
+--- linux-4.1.10.orig/include/linux/notifier.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/notifier.h 2015-10-07 18:00:08.000000000 +0200
@@ -6,7 +6,7 @@
*
* Alan Cox <Alan.Cox@linux.org>
@@ -10398,9 +20405,9 @@ diff -Nur linux-4.1.6.orig/include/linux/notifier.h linux-4.1.6/include/linux/no
/* CPU notfiers are defined in include/linux/cpu.h. */
/* netdevice notifiers are defined in include/linux/netdevice.h */
-diff -Nur linux-4.1.6.orig/include/linux/percpu.h linux-4.1.6/include/linux/percpu.h
---- linux-4.1.6.orig/include/linux/percpu.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/percpu.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/percpu.h linux-4.1.10/include/linux/percpu.h
+--- linux-4.1.10.orig/include/linux/percpu.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/percpu.h 2015-10-07 18:00:08.000000000 +0200
@@ -24,6 +24,35 @@
PERCPU_MODULE_RESERVE)
#endif
@@ -10437,9 +20444,9 @@ diff -Nur linux-4.1.6.orig/include/linux/percpu.h linux-4.1.6/include/linux/perc
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
-diff -Nur linux-4.1.6.orig/include/linux/pid.h linux-4.1.6/include/linux/pid.h
---- linux-4.1.6.orig/include/linux/pid.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/pid.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/pid.h linux-4.1.10/include/linux/pid.h
+--- linux-4.1.10.orig/include/linux/pid.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/pid.h 2015-10-07 18:00:08.000000000 +0200
@@ -2,6 +2,7 @@
#define _LINUX_PID_H
@@ -10448,9 +20455,9 @@ diff -Nur linux-4.1.6.orig/include/linux/pid.h linux-4.1.6/include/linux/pid.h
enum pid_type
{
-diff -Nur linux-4.1.6.orig/include/linux/preempt.h linux-4.1.6/include/linux/preempt.h
---- linux-4.1.6.orig/include/linux/preempt.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/preempt.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/preempt.h linux-4.1.10/include/linux/preempt.h
+--- linux-4.1.10.orig/include/linux/preempt.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/preempt.h 2015-10-07 18:00:08.000000000 +0200
@@ -33,6 +33,20 @@
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
@@ -10554,9 +20561,9 @@ diff -Nur linux-4.1.6.orig/include/linux/preempt.h linux-4.1.6/include/linux/pre
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
-diff -Nur linux-4.1.6.orig/include/linux/preempt_mask.h linux-4.1.6/include/linux/preempt_mask.h
---- linux-4.1.6.orig/include/linux/preempt_mask.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/preempt_mask.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/preempt_mask.h linux-4.1.10/include/linux/preempt_mask.h
+--- linux-4.1.10.orig/include/linux/preempt_mask.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/preempt_mask.h 2015-10-07 18:00:08.000000000 +0200
@@ -44,16 +44,26 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
@@ -10594,9 +20601,9 @@ diff -Nur linux-4.1.6.orig/include/linux/preempt_mask.h linux-4.1.6/include/linu
/*
* Are we in NMI context?
-diff -Nur linux-4.1.6.orig/include/linux/printk.h linux-4.1.6/include/linux/printk.h
---- linux-4.1.6.orig/include/linux/printk.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/printk.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/printk.h linux-4.1.10/include/linux/printk.h
+--- linux-4.1.10.orig/include/linux/printk.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/printk.h 2015-10-07 18:00:08.000000000 +0200
@@ -115,9 +115,11 @@
#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
@@ -10609,9 +20616,9 @@ diff -Nur linux-4.1.6.orig/include/linux/printk.h linux-4.1.6/include/linux/prin
#endif
typedef int(*printk_func_t)(const char *fmt, va_list args);
-diff -Nur linux-4.1.6.orig/include/linux/radix-tree.h linux-4.1.6/include/linux/radix-tree.h
---- linux-4.1.6.orig/include/linux/radix-tree.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/radix-tree.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/radix-tree.h linux-4.1.10/include/linux/radix-tree.h
+--- linux-4.1.10.orig/include/linux/radix-tree.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/radix-tree.h 2015-10-07 18:00:08.000000000 +0200
@@ -277,8 +277,13 @@
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
void ***results, unsigned long *indices,
@@ -10635,9 +20642,9 @@ diff -Nur linux-4.1.6.orig/include/linux/radix-tree.h linux-4.1.6/include/linux/
}
/**
-diff -Nur linux-4.1.6.orig/include/linux/random.h linux-4.1.6/include/linux/random.h
---- linux-4.1.6.orig/include/linux/random.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/random.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/random.h linux-4.1.10/include/linux/random.h
+--- linux-4.1.10.orig/include/linux/random.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/random.h 2015-10-07 18:00:08.000000000 +0200
@@ -11,7 +11,7 @@
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
@@ -10647,9 +20654,9 @@ diff -Nur linux-4.1.6.orig/include/linux/random.h linux-4.1.6/include/linux/rand
extern void get_random_bytes(void *buf, int nbytes);
extern void get_random_bytes_arch(void *buf, int nbytes);
-diff -Nur linux-4.1.6.orig/include/linux/rcupdate.h linux-4.1.6/include/linux/rcupdate.h
---- linux-4.1.6.orig/include/linux/rcupdate.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/rcupdate.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/rcupdate.h linux-4.1.10/include/linux/rcupdate.h
+--- linux-4.1.10.orig/include/linux/rcupdate.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/rcupdate.h 2015-10-07 18:00:08.000000000 +0200
@@ -167,6 +167,9 @@
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -10734,9 +20741,9 @@ diff -Nur linux-4.1.6.orig/include/linux/rcupdate.h linux-4.1.6/include/linux/rc
local_bh_enable();
}
-diff -Nur linux-4.1.6.orig/include/linux/rcutree.h linux-4.1.6/include/linux/rcutree.h
---- linux-4.1.6.orig/include/linux/rcutree.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/rcutree.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/rcutree.h linux-4.1.10/include/linux/rcutree.h
+--- linux-4.1.10.orig/include/linux/rcutree.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/rcutree.h 2015-10-07 18:00:08.000000000 +0200
@@ -46,7 +46,11 @@
rcu_note_context_switch();
}
@@ -10789,9 +20796,9 @@ diff -Nur linux-4.1.6.orig/include/linux/rcutree.h linux-4.1.6/include/linux/rcu
void rcu_all_qs(void);
#endif /* __LINUX_RCUTREE_H */
-diff -Nur linux-4.1.6.orig/include/linux/rtmutex.h linux-4.1.6/include/linux/rtmutex.h
---- linux-4.1.6.orig/include/linux/rtmutex.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/rtmutex.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/rtmutex.h linux-4.1.10/include/linux/rtmutex.h
+--- linux-4.1.10.orig/include/linux/rtmutex.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/rtmutex.h 2015-10-07 18:00:08.000000000 +0200
@@ -14,10 +14,14 @@
#include <linux/linkage.h>
@@ -10865,9 +20872,9 @@ diff -Nur linux-4.1.6.orig/include/linux/rtmutex.h linux-4.1.6/include/linux/rtm
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout);
-diff -Nur linux-4.1.6.orig/include/linux/rwlock_rt.h linux-4.1.6/include/linux/rwlock_rt.h
---- linux-4.1.6.orig/include/linux/rwlock_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/linux/rwlock_rt.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/rwlock_rt.h linux-4.1.10/include/linux/rwlock_rt.h
+--- linux-4.1.10.orig/include/linux/rwlock_rt.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/linux/rwlock_rt.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,99 @@
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
@@ -10968,9 +20975,9 @@ diff -Nur linux-4.1.6.orig/include/linux/rwlock_rt.h linux-4.1.6/include/linux/r
+ } while (0)
+
+#endif
-diff -Nur linux-4.1.6.orig/include/linux/rwlock_types.h linux-4.1.6/include/linux/rwlock_types.h
---- linux-4.1.6.orig/include/linux/rwlock_types.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/rwlock_types.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/rwlock_types.h linux-4.1.10/include/linux/rwlock_types.h
+--- linux-4.1.10.orig/include/linux/rwlock_types.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/rwlock_types.h 2015-10-07 18:00:08.000000000 +0200
@@ -1,6 +1,10 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
@@ -10991,9 +20998,9 @@ diff -Nur linux-4.1.6.orig/include/linux/rwlock_types.h linux-4.1.6/include/linu
+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
#endif /* __LINUX_RWLOCK_TYPES_H */
-diff -Nur linux-4.1.6.orig/include/linux/rwlock_types_rt.h linux-4.1.6/include/linux/rwlock_types_rt.h
---- linux-4.1.6.orig/include/linux/rwlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/linux/rwlock_types_rt.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/rwlock_types_rt.h linux-4.1.10/include/linux/rwlock_types_rt.h
+--- linux-4.1.10.orig/include/linux/rwlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/linux/rwlock_types_rt.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,33 @@
+#ifndef __LINUX_RWLOCK_TYPES_RT_H
+#define __LINUX_RWLOCK_TYPES_RT_H
@@ -11028,9 +21035,9 @@ diff -Nur linux-4.1.6.orig/include/linux/rwlock_types_rt.h linux-4.1.6/include/l
+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+
+#endif
-diff -Nur linux-4.1.6.orig/include/linux/rwsem.h linux-4.1.6/include/linux/rwsem.h
---- linux-4.1.6.orig/include/linux/rwsem.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/rwsem.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/rwsem.h linux-4.1.10/include/linux/rwsem.h
+--- linux-4.1.10.orig/include/linux/rwsem.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/rwsem.h 2015-10-07 18:00:08.000000000 +0200
@@ -18,6 +18,10 @@
#include <linux/osq_lock.h>
#endif
@@ -11049,9 +21056,9 @@ diff -Nur linux-4.1.6.orig/include/linux/rwsem.h linux-4.1.6/include/linux/rwsem
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* _LINUX_RWSEM_H */
-diff -Nur linux-4.1.6.orig/include/linux/rwsem_rt.h linux-4.1.6/include/linux/rwsem_rt.h
---- linux-4.1.6.orig/include/linux/rwsem_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/linux/rwsem_rt.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/rwsem_rt.h linux-4.1.10/include/linux/rwsem_rt.h
+--- linux-4.1.10.orig/include/linux/rwsem_rt.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/linux/rwsem_rt.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,140 @@
+#ifndef _LINUX_RWSEM_RT_H
+#define _LINUX_RWSEM_RT_H
@@ -11193,9 +21200,9 @@ diff -Nur linux-4.1.6.orig/include/linux/rwsem_rt.h linux-4.1.6/include/linux/rw
+}
+#endif
+#endif
-diff -Nur linux-4.1.6.orig/include/linux/sched.h linux-4.1.6/include/linux/sched.h
---- linux-4.1.6.orig/include/linux/sched.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/sched.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/sched.h linux-4.1.10/include/linux/sched.h
+--- linux-4.1.10.orig/include/linux/sched.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/sched.h 2015-10-07 18:00:08.000000000 +0200
@@ -26,6 +26,7 @@
#include <linux/nodemask.h>
#include <linux/mm_types.h>
@@ -11619,9 +21626,9 @@ diff -Nur linux-4.1.6.orig/include/linux/sched.h linux-4.1.6/include/linux/sched
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
-diff -Nur linux-4.1.6.orig/include/linux/seqlock.h linux-4.1.6/include/linux/seqlock.h
---- linux-4.1.6.orig/include/linux/seqlock.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/seqlock.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/seqlock.h linux-4.1.10/include/linux/seqlock.h
+--- linux-4.1.10.orig/include/linux/seqlock.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/seqlock.h 2015-10-07 18:00:08.000000000 +0200
@@ -219,20 +219,30 @@
return __read_seqcount_retry(s, start);
}
@@ -11751,9 +21758,9 @@ diff -Nur linux-4.1.6.orig/include/linux/seqlock.h linux-4.1.6/include/linux/seq
spin_unlock_irqrestore(&sl->lock, flags);
}
-diff -Nur linux-4.1.6.orig/include/linux/signal.h linux-4.1.6/include/linux/signal.h
---- linux-4.1.6.orig/include/linux/signal.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/signal.h 2015-09-08 23:49:06.778014632 +0200
+diff -Nur linux-4.1.10.orig/include/linux/signal.h linux-4.1.10/include/linux/signal.h
+--- linux-4.1.10.orig/include/linux/signal.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/signal.h 2015-10-07 18:00:08.000000000 +0200
@@ -218,6 +218,7 @@
}
@@ -11762,9 +21769,9 @@ diff -Nur linux-4.1.6.orig/include/linux/signal.h linux-4.1.6/include/linux/sign
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
-diff -Nur linux-4.1.6.orig/include/linux/skbuff.h linux-4.1.6/include/linux/skbuff.h
---- linux-4.1.6.orig/include/linux/skbuff.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/skbuff.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/skbuff.h linux-4.1.10/include/linux/skbuff.h
+--- linux-4.1.10.orig/include/linux/skbuff.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/skbuff.h 2015-10-07 18:00:08.000000000 +0200
@@ -187,6 +187,7 @@
__u32 qlen;
@@ -11786,9 +21793,9 @@ diff -Nur linux-4.1.6.orig/include/linux/skbuff.h linux-4.1.6/include/linux/skbu
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
-diff -Nur linux-4.1.6.orig/include/linux/smp.h linux-4.1.6/include/linux/smp.h
---- linux-4.1.6.orig/include/linux/smp.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/smp.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/smp.h linux-4.1.10/include/linux/smp.h
+--- linux-4.1.10.orig/include/linux/smp.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/smp.h 2015-10-07 18:00:08.000000000 +0200
@@ -185,6 +185,9 @@
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
@@ -11799,9 +21806,9 @@ diff -Nur linux-4.1.6.orig/include/linux/smp.h linux-4.1.6/include/linux/smp.h
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
-diff -Nur linux-4.1.6.orig/include/linux/spinlock_api_smp.h linux-4.1.6/include/linux/spinlock_api_smp.h
---- linux-4.1.6.orig/include/linux/spinlock_api_smp.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/spinlock_api_smp.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/spinlock_api_smp.h linux-4.1.10/include/linux/spinlock_api_smp.h
+--- linux-4.1.10.orig/include/linux/spinlock_api_smp.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/spinlock_api_smp.h 2015-10-07 18:00:08.000000000 +0200
@@ -189,6 +189,8 @@
return 0;
}
@@ -11812,9 +21819,9 @@ diff -Nur linux-4.1.6.orig/include/linux/spinlock_api_smp.h linux-4.1.6/include/
+#endif
#endif /* __LINUX_SPINLOCK_API_SMP_H */
-diff -Nur linux-4.1.6.orig/include/linux/spinlock.h linux-4.1.6/include/linux/spinlock.h
---- linux-4.1.6.orig/include/linux/spinlock.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/spinlock.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/spinlock.h linux-4.1.10/include/linux/spinlock.h
+--- linux-4.1.10.orig/include/linux/spinlock.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/spinlock.h 2015-10-07 18:00:08.000000000 +0200
@@ -281,7 +281,11 @@
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
@@ -11846,9 +21853,9 @@ diff -Nur linux-4.1.6.orig/include/linux/spinlock.h linux-4.1.6/include/linux/sp
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* __LINUX_SPINLOCK_H */
-diff -Nur linux-4.1.6.orig/include/linux/spinlock_rt.h linux-4.1.6/include/linux/spinlock_rt.h
---- linux-4.1.6.orig/include/linux/spinlock_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/linux/spinlock_rt.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/spinlock_rt.h linux-4.1.10/include/linux/spinlock_rt.h
+--- linux-4.1.10.orig/include/linux/spinlock_rt.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/linux/spinlock_rt.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,174 @@
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
@@ -12024,9 +22031,9 @@ diff -Nur linux-4.1.6.orig/include/linux/spinlock_rt.h linux-4.1.6/include/linux
+ atomic_dec_and_spin_lock(atomic, lock)
+
+#endif
-diff -Nur linux-4.1.6.orig/include/linux/spinlock_types.h linux-4.1.6/include/linux/spinlock_types.h
---- linux-4.1.6.orig/include/linux/spinlock_types.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/spinlock_types.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/spinlock_types.h linux-4.1.10/include/linux/spinlock_types.h
+--- linux-4.1.10.orig/include/linux/spinlock_types.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/spinlock_types.h 2015-10-07 18:00:08.000000000 +0200
@@ -9,80 +9,15 @@
* Released under the General Public License (GPL).
*/
@@ -12115,9 +22122,9 @@ diff -Nur linux-4.1.6.orig/include/linux/spinlock_types.h linux-4.1.6/include/li
-#include <linux/rwlock_types.h>
-
#endif /* __LINUX_SPINLOCK_TYPES_H */
-diff -Nur linux-4.1.6.orig/include/linux/spinlock_types_nort.h linux-4.1.6/include/linux/spinlock_types_nort.h
---- linux-4.1.6.orig/include/linux/spinlock_types_nort.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/linux/spinlock_types_nort.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/spinlock_types_nort.h linux-4.1.10/include/linux/spinlock_types_nort.h
+--- linux-4.1.10.orig/include/linux/spinlock_types_nort.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/linux/spinlock_types_nort.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,33 @@
+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
+#define __LINUX_SPINLOCK_TYPES_NORT_H
@@ -12152,9 +22159,9 @@ diff -Nur linux-4.1.6.orig/include/linux/spinlock_types_nort.h linux-4.1.6/inclu
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+
+#endif
-diff -Nur linux-4.1.6.orig/include/linux/spinlock_types_raw.h linux-4.1.6/include/linux/spinlock_types_raw.h
---- linux-4.1.6.orig/include/linux/spinlock_types_raw.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/linux/spinlock_types_raw.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/spinlock_types_raw.h linux-4.1.10/include/linux/spinlock_types_raw.h
+--- linux-4.1.10.orig/include/linux/spinlock_types_raw.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/linux/spinlock_types_raw.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,56 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+#define __LINUX_SPINLOCK_TYPES_RAW_H
@@ -12212,9 +22219,9 @@ diff -Nur linux-4.1.6.orig/include/linux/spinlock_types_raw.h linux-4.1.6/includ
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+#endif
-diff -Nur linux-4.1.6.orig/include/linux/spinlock_types_rt.h linux-4.1.6/include/linux/spinlock_types_rt.h
---- linux-4.1.6.orig/include/linux/spinlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/linux/spinlock_types_rt.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/spinlock_types_rt.h linux-4.1.10/include/linux/spinlock_types_rt.h
+--- linux-4.1.10.orig/include/linux/spinlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/linux/spinlock_types_rt.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,51 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RT_H
+#define __LINUX_SPINLOCK_TYPES_RT_H
@@ -12267,9 +22274,9 @@ diff -Nur linux-4.1.6.orig/include/linux/spinlock_types_rt.h linux-4.1.6/include
+ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
+
+#endif
-diff -Nur linux-4.1.6.orig/include/linux/srcu.h linux-4.1.6/include/linux/srcu.h
---- linux-4.1.6.orig/include/linux/srcu.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/srcu.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/srcu.h linux-4.1.10/include/linux/srcu.h
+--- linux-4.1.10.orig/include/linux/srcu.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/srcu.h 2015-10-07 18:00:08.000000000 +0200
@@ -84,10 +84,10 @@
void process_srcu(struct work_struct *work);
@@ -12292,9 +22299,9 @@ diff -Nur linux-4.1.6.orig/include/linux/srcu.h linux-4.1.6/include/linux/srcu.h
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
-diff -Nur linux-4.1.6.orig/include/linux/swap.h linux-4.1.6/include/linux/swap.h
---- linux-4.1.6.orig/include/linux/swap.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/swap.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/swap.h linux-4.1.10/include/linux/swap.h
+--- linux-4.1.10.orig/include/linux/swap.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/swap.h 2015-10-07 18:00:08.000000000 +0200
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/atomic.h>
@@ -12321,9 +22328,9 @@ diff -Nur linux-4.1.6.orig/include/linux/swap.h linux-4.1.6/include/linux/swap.h
extern void lru_cache_add(struct page *);
extern void lru_cache_add_anon(struct page *page);
extern void lru_cache_add_file(struct page *page);
-diff -Nur linux-4.1.6.orig/include/linux/thread_info.h linux-4.1.6/include/linux/thread_info.h
---- linux-4.1.6.orig/include/linux/thread_info.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/thread_info.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/thread_info.h linux-4.1.10/include/linux/thread_info.h
+--- linux-4.1.10.orig/include/linux/thread_info.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/thread_info.h 2015-10-07 18:00:08.000000000 +0200
@@ -102,7 +102,17 @@
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
@@ -12343,9 +22350,9 @@ diff -Nur linux-4.1.6.orig/include/linux/thread_info.h linux-4.1.6/include/linux
#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
/*
-diff -Nur linux-4.1.6.orig/include/linux/timer.h linux-4.1.6/include/linux/timer.h
---- linux-4.1.6.orig/include/linux/timer.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/timer.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/timer.h linux-4.1.10/include/linux/timer.h
+--- linux-4.1.10.orig/include/linux/timer.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/timer.h 2015-10-07 18:00:08.000000000 +0200
@@ -241,7 +241,7 @@
extern int try_to_del_timer_sync(struct timer_list *timer);
@@ -12355,9 +22362,9 @@ diff -Nur linux-4.1.6.orig/include/linux/timer.h linux-4.1.6/include/linux/timer
extern int del_timer_sync(struct timer_list *timer);
#else
# define del_timer_sync(t) del_timer(t)
-diff -Nur linux-4.1.6.orig/include/linux/uaccess.h linux-4.1.6/include/linux/uaccess.h
---- linux-4.1.6.orig/include/linux/uaccess.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/uaccess.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/uaccess.h linux-4.1.10/include/linux/uaccess.h
+--- linux-4.1.10.orig/include/linux/uaccess.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/uaccess.h 2015-10-07 18:00:08.000000000 +0200
@@ -1,21 +1,31 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
@@ -12436,9 +22443,9 @@ diff -Nur linux-4.1.6.orig/include/linux/uaccess.h linux-4.1.6/include/linux/uac
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
-diff -Nur linux-4.1.6.orig/include/linux/uprobes.h linux-4.1.6/include/linux/uprobes.h
---- linux-4.1.6.orig/include/linux/uprobes.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/uprobes.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/uprobes.h linux-4.1.10/include/linux/uprobes.h
+--- linux-4.1.10.orig/include/linux/uprobes.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/uprobes.h 2015-10-07 18:00:08.000000000 +0200
@@ -27,6 +27,7 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
@@ -12447,9 +22454,9 @@ diff -Nur linux-4.1.6.orig/include/linux/uprobes.h linux-4.1.6/include/linux/upr
struct vm_area_struct;
struct mm_struct;
-diff -Nur linux-4.1.6.orig/include/linux/vmstat.h linux-4.1.6/include/linux/vmstat.h
---- linux-4.1.6.orig/include/linux/vmstat.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/vmstat.h 2015-09-08 23:49:06.782014188 +0200
+diff -Nur linux-4.1.10.orig/include/linux/vmstat.h linux-4.1.10/include/linux/vmstat.h
+--- linux-4.1.10.orig/include/linux/vmstat.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/vmstat.h 2015-10-07 18:00:08.000000000 +0200
@@ -33,7 +33,9 @@
*/
static inline void __count_vm_event(enum vm_event_item item)
@@ -12470,9 +22477,9 @@ diff -Nur linux-4.1.6.orig/include/linux/vmstat.h linux-4.1.6/include/linux/vmst
}
static inline void count_vm_events(enum vm_event_item item, long delta)
-diff -Nur linux-4.1.6.orig/include/linux/wait.h linux-4.1.6/include/linux/wait.h
---- linux-4.1.6.orig/include/linux/wait.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/linux/wait.h 2015-09-08 23:49:06.786013744 +0200
+diff -Nur linux-4.1.10.orig/include/linux/wait.h linux-4.1.10/include/linux/wait.h
+--- linux-4.1.10.orig/include/linux/wait.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/linux/wait.h 2015-10-07 18:00:08.000000000 +0200
@@ -8,6 +8,7 @@
#include <linux/spinlock.h>
#include <asm/current.h>
@@ -12481,9 +22488,9 @@ diff -Nur linux-4.1.6.orig/include/linux/wait.h linux-4.1.6/include/linux/wait.h
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
-diff -Nur linux-4.1.6.orig/include/linux/wait-simple.h linux-4.1.6/include/linux/wait-simple.h
---- linux-4.1.6.orig/include/linux/wait-simple.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/linux/wait-simple.h 2015-09-08 23:49:06.786013744 +0200
+diff -Nur linux-4.1.10.orig/include/linux/wait-simple.h linux-4.1.10/include/linux/wait-simple.h
+--- linux-4.1.10.orig/include/linux/wait-simple.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/linux/wait-simple.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,207 @@
+#ifndef _LINUX_WAIT_SIMPLE_H
+#define _LINUX_WAIT_SIMPLE_H
@@ -12692,9 +22699,9 @@ diff -Nur linux-4.1.6.orig/include/linux/wait-simple.h linux-4.1.6/include/linux
+})
+
+#endif
-diff -Nur linux-4.1.6.orig/include/linux/work-simple.h linux-4.1.6/include/linux/work-simple.h
---- linux-4.1.6.orig/include/linux/work-simple.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/linux/work-simple.h 2015-09-08 23:49:06.786013744 +0200
+diff -Nur linux-4.1.10.orig/include/linux/work-simple.h linux-4.1.10/include/linux/work-simple.h
+--- linux-4.1.10.orig/include/linux/work-simple.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/linux/work-simple.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,24 @@
+#ifndef _LINUX_SWORK_H
+#define _LINUX_SWORK_H
@@ -12720,9 +22727,9 @@ diff -Nur linux-4.1.6.orig/include/linux/work-simple.h linux-4.1.6/include/linux
+void swork_put(void);
+
+#endif /* _LINUX_SWORK_H */
-diff -Nur linux-4.1.6.orig/include/net/dst.h linux-4.1.6/include/net/dst.h
---- linux-4.1.6.orig/include/net/dst.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/net/dst.h 2015-09-08 23:49:06.786013744 +0200
+diff -Nur linux-4.1.10.orig/include/net/dst.h linux-4.1.10/include/net/dst.h
+--- linux-4.1.10.orig/include/net/dst.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/net/dst.h 2015-10-07 18:00:08.000000000 +0200
@@ -403,7 +403,7 @@
static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
struct sk_buff *skb)
@@ -12732,9 +22739,9 @@ diff -Nur linux-4.1.6.orig/include/net/dst.h linux-4.1.6/include/net/dst.h
if (dst->pending_confirm) {
unsigned long now = jiffies;
-diff -Nur linux-4.1.6.orig/include/net/neighbour.h linux-4.1.6/include/net/neighbour.h
---- linux-4.1.6.orig/include/net/neighbour.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/net/neighbour.h 2015-09-08 23:49:07.409944556 +0200
+diff -Nur linux-4.1.10.orig/include/net/neighbour.h linux-4.1.10/include/net/neighbour.h
+--- linux-4.1.10.orig/include/net/neighbour.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/net/neighbour.h 2015-10-07 18:00:08.000000000 +0200
@@ -445,7 +445,7 @@
}
#endif
@@ -12753,9 +22760,9 @@ diff -Nur linux-4.1.6.orig/include/net/neighbour.h linux-4.1.6/include/net/neigh
const struct net_device *dev)
{
unsigned int seq;
-diff -Nur linux-4.1.6.orig/include/net/netns/ipv4.h linux-4.1.6/include/net/netns/ipv4.h
---- linux-4.1.6.orig/include/net/netns/ipv4.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/include/net/netns/ipv4.h 2015-09-08 23:49:08.077870489 +0200
+diff -Nur linux-4.1.10.orig/include/net/netns/ipv4.h linux-4.1.10/include/net/netns/ipv4.h
+--- linux-4.1.10.orig/include/net/netns/ipv4.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/include/net/netns/ipv4.h 2015-10-07 18:00:08.000000000 +0200
@@ -69,6 +69,7 @@
int sysctl_icmp_echo_ignore_all;
@@ -12764,9 +22771,9 @@ diff -Nur linux-4.1.6.orig/include/net/netns/ipv4.h linux-4.1.6/include/net/netn
int sysctl_icmp_ignore_bogus_error_responses;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
-diff -Nur linux-4.1.6.orig/include/trace/events/hist.h linux-4.1.6/include/trace/events/hist.h
---- linux-4.1.6.orig/include/trace/events/hist.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/trace/events/hist.h 2015-09-08 23:49:08.077870489 +0200
+diff -Nur linux-4.1.10.orig/include/trace/events/hist.h linux-4.1.10/include/trace/events/hist.h
+--- linux-4.1.10.orig/include/trace/events/hist.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/trace/events/hist.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,72 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hist
@@ -12840,9 +22847,9 @@ diff -Nur linux-4.1.6.orig/include/trace/events/hist.h linux-4.1.6/include/trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
-diff -Nur linux-4.1.6.orig/include/trace/events/latency_hist.h linux-4.1.6/include/trace/events/latency_hist.h
---- linux-4.1.6.orig/include/trace/events/latency_hist.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/include/trace/events/latency_hist.h 2015-09-08 23:49:08.077870489 +0200
+diff -Nur linux-4.1.10.orig/include/trace/events/latency_hist.h linux-4.1.10/include/trace/events/latency_hist.h
+--- linux-4.1.10.orig/include/trace/events/latency_hist.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/include/trace/events/latency_hist.h 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,29 @@
+#ifndef _LATENCY_HIST_H
+#define _LATENCY_HIST_H
@@ -12873,9 +22880,9 @@ diff -Nur linux-4.1.6.orig/include/trace/events/latency_hist.h linux-4.1.6/inclu
+}
+
+#endif /* _LATENCY_HIST_H */
-diff -Nur linux-4.1.6.orig/init/Kconfig linux-4.1.6/init/Kconfig
---- linux-4.1.6.orig/init/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/init/Kconfig 2015-09-08 23:49:08.077870489 +0200
+diff -Nur linux-4.1.10.orig/init/Kconfig linux-4.1.10/init/Kconfig
+--- linux-4.1.10.orig/init/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/init/Kconfig 2015-10-07 18:00:08.000000000 +0200
@@ -637,7 +637,7 @@
config RCU_FAST_NO_HZ
@@ -12927,9 +22934,9 @@ diff -Nur linux-4.1.6.orig/init/Kconfig linux-4.1.6/init/Kconfig
bool "SLUB per cpu partial cache"
help
Per cpu partial caches accellerate objects allocation and freeing
-diff -Nur linux-4.1.6.orig/init/main.c linux-4.1.6/init/main.c
---- linux-4.1.6.orig/init/main.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/init/main.c 2015-09-08 23:49:08.081870045 +0200
+diff -Nur linux-4.1.10.orig/init/main.c linux-4.1.10/init/main.c
+--- linux-4.1.10.orig/init/main.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/init/main.c 2015-10-07 18:00:08.000000000 +0200
@@ -525,6 +525,7 @@
setup_command_line(command_line);
setup_nr_cpu_ids();
@@ -12938,18 +22945,18 @@ diff -Nur linux-4.1.6.orig/init/main.c linux-4.1.6/init/main.c
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
build_all_zonelists(NULL, NULL);
-diff -Nur linux-4.1.6.orig/init/Makefile linux-4.1.6/init/Makefile
---- linux-4.1.6.orig/init/Makefile 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/init/Makefile 2015-09-08 23:49:08.077870489 +0200
+diff -Nur linux-4.1.10.orig/init/Makefile linux-4.1.10/init/Makefile
+--- linux-4.1.10.orig/init/Makefile 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/init/Makefile 2015-10-07 18:00:08.000000000 +0200
@@ -33,4 +33,4 @@
include/generated/compile.h: FORCE
@$($(quiet)chk_compile.h)
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
-diff -Nur linux-4.1.6.orig/ipc/mqueue.c linux-4.1.6/ipc/mqueue.c
---- linux-4.1.6.orig/ipc/mqueue.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/ipc/mqueue.c 2015-09-08 23:49:08.081870045 +0200
+diff -Nur linux-4.1.10.orig/ipc/mqueue.c linux-4.1.10/ipc/mqueue.c
+--- linux-4.1.10.orig/ipc/mqueue.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/ipc/mqueue.c 2015-10-07 18:00:08.000000000 +0200
@@ -47,8 +47,7 @@
#define RECV 1
@@ -13090,9 +23097,9 @@ diff -Nur linux-4.1.6.orig/ipc/mqueue.c linux-4.1.6/ipc/mqueue.c
ret = 0;
}
if (ret == 0) {
-diff -Nur linux-4.1.6.orig/ipc/msg.c linux-4.1.6/ipc/msg.c
---- linux-4.1.6.orig/ipc/msg.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/ipc/msg.c 2015-09-08 23:49:08.081870045 +0200
+diff -Nur linux-4.1.10.orig/ipc/msg.c linux-4.1.10/ipc/msg.c
+--- linux-4.1.10.orig/ipc/msg.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/ipc/msg.c 2015-10-07 18:00:08.000000000 +0200
@@ -188,6 +188,12 @@
struct msg_receiver *msr, *t;
@@ -13142,10 +23149,10 @@ diff -Nur linux-4.1.6.orig/ipc/msg.c linux-4.1.6/ipc/msg.c
return 0;
}
-diff -Nur linux-4.1.6.orig/ipc/sem.c linux-4.1.6/ipc/sem.c
---- linux-4.1.6.orig/ipc/sem.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/ipc/sem.c 2015-09-08 23:49:08.085869602 +0200
-@@ -680,6 +680,13 @@
+diff -Nur linux-4.1.10.orig/ipc/sem.c linux-4.1.10/ipc/sem.c
+--- linux-4.1.10.orig/ipc/sem.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/ipc/sem.c 2015-10-07 18:00:08.000000000 +0200
+@@ -690,6 +690,13 @@
static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
{
@@ -13159,7 +23166,7 @@ diff -Nur linux-4.1.6.orig/ipc/sem.c linux-4.1.6/ipc/sem.c
if (list_empty(pt)) {
/*
* Hold preempt off so that we don't get preempted and have the
-@@ -691,6 +698,7 @@
+@@ -701,6 +708,7 @@
q->pid = error;
list_add_tail(&q->list, pt);
@@ -13167,7 +23174,7 @@ diff -Nur linux-4.1.6.orig/ipc/sem.c linux-4.1.6/ipc/sem.c
}
/**
-@@ -704,6 +712,7 @@
+@@ -714,6 +722,7 @@
*/
static void wake_up_sem_queue_do(struct list_head *pt)
{
@@ -13175,7 +23182,7 @@ diff -Nur linux-4.1.6.orig/ipc/sem.c linux-4.1.6/ipc/sem.c
struct sem_queue *q, *t;
int did_something;
-@@ -716,6 +725,7 @@
+@@ -726,6 +735,7 @@
}
if (did_something)
preempt_enable();
@@ -13183,10 +23190,10 @@ diff -Nur linux-4.1.6.orig/ipc/sem.c linux-4.1.6/ipc/sem.c
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
-diff -Nur linux-4.1.6.orig/kernel/cgroup.c linux-4.1.6/kernel/cgroup.c
---- linux-4.1.6.orig/kernel/cgroup.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/cgroup.c 2015-09-08 23:49:08.089869160 +0200
-@@ -4421,10 +4421,10 @@
+diff -Nur linux-4.1.10.orig/kernel/cgroup.c linux-4.1.10/kernel/cgroup.c
+--- linux-4.1.10.orig/kernel/cgroup.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/cgroup.c 2015-10-07 18:00:08.000000000 +0200
+@@ -4422,10 +4422,10 @@
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
@@ -13199,7 +23206,7 @@ diff -Nur linux-4.1.6.orig/kernel/cgroup.c linux-4.1.6/kernel/cgroup.c
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -4463,8 +4463,8 @@
+@@ -4464,8 +4464,8 @@
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
@@ -13210,7 +23217,7 @@ diff -Nur linux-4.1.6.orig/kernel/cgroup.c linux-4.1.6/kernel/cgroup.c
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5068,6 +5068,7 @@
+@@ -5069,6 +5069,7 @@
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
@@ -13218,9 +23225,5615 @@ diff -Nur linux-4.1.6.orig/kernel/cgroup.c linux-4.1.6/kernel/cgroup.c
/*
* Used to destroy pidlists and separate to serve as flush domain.
-diff -Nur linux-4.1.6.orig/kernel/cpu.c linux-4.1.6/kernel/cpu.c
---- linux-4.1.6.orig/kernel/cpu.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/cpu.c 2015-09-08 23:49:08.089869160 +0200
+diff -Nur linux-4.1.10.orig/kernel/cgroup.c.orig linux-4.1.10/kernel/cgroup.c.orig
+--- linux-4.1.10.orig/kernel/cgroup.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/kernel/cgroup.c.orig 2015-10-03 13:49:38.000000000 +0200
+@@ -0,0 +1,5602 @@
++/*
++ * Generic process-grouping system.
++ *
++ * Based originally on the cpuset system, extracted by Paul Menage
++ * Copyright (C) 2006 Google, Inc
++ *
++ * Notifications support
++ * Copyright (C) 2009 Nokia Corporation
++ * Author: Kirill A. Shutemov
++ *
++ * Copyright notices from the original cpuset code:
++ * --------------------------------------------------
++ * Copyright (C) 2003 BULL SA.
++ * Copyright (C) 2004-2006 Silicon Graphics, Inc.
++ *
++ * Portions derived from Patrick Mochel's sysfs code.
++ * sysfs is Copyright (c) 2001-3 Patrick Mochel
++ *
++ * 2003-10-10 Written by Simon Derr.
++ * 2003-10-22 Updates by Stephen Hemminger.
++ * 2004 May-July Rework by Paul Jackson.
++ * ---------------------------------------------------
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of the Linux
++ * distribution for more details.
++ */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
++#include <linux/cgroup.h>
++#include <linux/cred.h>
++#include <linux/ctype.h>
++#include <linux/errno.h>
++#include <linux/init_task.h>
++#include <linux/kernel.h>
++#include <linux/list.h>
++#include <linux/magic.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <linux/mount.h>
++#include <linux/pagemap.h>
++#include <linux/proc_fs.h>
++#include <linux/rcupdate.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/rwsem.h>
++#include <linux/string.h>
++#include <linux/sort.h>
++#include <linux/kmod.h>
++#include <linux/delayacct.h>
++#include <linux/cgroupstats.h>
++#include <linux/hashtable.h>
++#include <linux/pid_namespace.h>
++#include <linux/idr.h>
++#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
++#include <linux/kthread.h>
++#include <linux/delay.h>
++
++#include <linux/atomic.h>
++
++/*
++ * pidlists linger the following amount before being destroyed. The goal
++ * is avoiding frequent destruction in the middle of consecutive read calls
++ * Expiring in the middle is a performance problem not a correctness one.
++ * 1 sec should be enough.
++ */
++#define CGROUP_PIDLIST_DESTROY_DELAY HZ
++
++#define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \
++ MAX_CFTYPE_NAME + 2)
++
++/*
++ * cgroup_mutex is the master lock. Any modification to cgroup or its
++ * hierarchy must be performed while holding it.
++ *
++ * css_set_rwsem protects task->cgroups pointer, the list of css_set
++ * objects, and the chain of tasks off each css_set.
++ *
++ * These locks are exported if CONFIG_PROVE_RCU so that accessors in
++ * cgroup.h can use them for lockdep annotations.
++ */
++#ifdef CONFIG_PROVE_RCU
++DEFINE_MUTEX(cgroup_mutex);
++DECLARE_RWSEM(css_set_rwsem);
++EXPORT_SYMBOL_GPL(cgroup_mutex);
++EXPORT_SYMBOL_GPL(css_set_rwsem);
++#else
++static DEFINE_MUTEX(cgroup_mutex);
++static DECLARE_RWSEM(css_set_rwsem);
++#endif
++
++/*
++ * Protects cgroup_idr and css_idr so that IDs can be released without
++ * grabbing cgroup_mutex.
++ */
++static DEFINE_SPINLOCK(cgroup_idr_lock);
++
++/*
++ * Protects cgroup_subsys->release_agent_path. Modifying it also requires
++ * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
++ */
++static DEFINE_SPINLOCK(release_agent_path_lock);
++
++#define cgroup_assert_mutex_or_rcu_locked() \
++ rcu_lockdep_assert(rcu_read_lock_held() || \
++ lockdep_is_held(&cgroup_mutex), \
++ "cgroup_mutex or RCU read lock required");
++
++/*
++ * cgroup destruction makes heavy use of work items and there can be a lot
++ * of concurrent destructions. Use a separate workqueue so that cgroup
++ * destruction work items don't end up filling up max_active of system_wq
++ * which may lead to deadlock.
++ */
++static struct workqueue_struct *cgroup_destroy_wq;
++
++/*
++ * pidlist destructions need to be flushed on cgroup destruction. Use a
++ * separate workqueue as flush domain.
++ */
++static struct workqueue_struct *cgroup_pidlist_destroy_wq;
++
++/* generate an array of cgroup subsystem pointers */
++#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
++static struct cgroup_subsys *cgroup_subsys[] = {
++#include <linux/cgroup_subsys.h>
++};
++#undef SUBSYS
++
++/* array of cgroup subsystem names */
++#define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
++static const char *cgroup_subsys_name[] = {
++#include <linux/cgroup_subsys.h>
++};
++#undef SUBSYS
++
++/*
++ * The default hierarchy, reserved for the subsystems that are otherwise
++ * unattached - it never has more than a single cgroup, and all tasks are
++ * part of that cgroup.
++ */
++struct cgroup_root cgrp_dfl_root;
++
++/*
++ * The default hierarchy always exists but is hidden until mounted for the
++ * first time. This is for backward compatibility.
++ */
++static bool cgrp_dfl_root_visible;
++
++/*
++ * Set by the boot param of the same name and makes subsystems with NULL
++ * ->dfl_files to use ->legacy_files on the default hierarchy.
++ */
++static bool cgroup_legacy_files_on_dfl;
++
++/* some controllers are not supported in the default hierarchy */
++static unsigned int cgrp_dfl_root_inhibit_ss_mask;
++
++/* The list of hierarchy roots */
++
++static LIST_HEAD(cgroup_roots);
++static int cgroup_root_count;
++
++/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
++static DEFINE_IDR(cgroup_hierarchy_idr);
++
++/*
++ * Assign a monotonically increasing serial number to csses. It guarantees
++ * cgroups with bigger numbers are newer than those with smaller numbers.
++ * Also, as csses are always appended to the parent's ->children list, it
++ * guarantees that sibling csses are always sorted in the ascending serial
++ * number order on the list. Protected by cgroup_mutex.
++ */
++static u64 css_serial_nr_next = 1;
++
++/* This flag indicates whether tasks in the fork and exit paths should
++ * check for fork/exit handlers to call. This avoids us having to do
++ * extra work in the fork/exit path if none of the subsystems need to
++ * be called.
++ */
++static int need_forkexit_callback __read_mostly;
++
++static struct cftype cgroup_dfl_base_files[];
++static struct cftype cgroup_legacy_base_files[];
++
++static int rebind_subsystems(struct cgroup_root *dst_root,
++ unsigned int ss_mask);
++static int cgroup_destroy_locked(struct cgroup *cgrp);
++static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
++ bool visible);
++static void css_release(struct percpu_ref *ref);
++static void kill_css(struct cgroup_subsys_state *css);
++static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
++ bool is_add);
++
++/* IDR wrappers which synchronize using cgroup_idr_lock */
++static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
++ gfp_t gfp_mask)
++{
++ int ret;
++
++ idr_preload(gfp_mask);
++ spin_lock_bh(&cgroup_idr_lock);
++ ret = idr_alloc(idr, ptr, start, end, gfp_mask);
++ spin_unlock_bh(&cgroup_idr_lock);
++ idr_preload_end();
++ return ret;
++}
++
++static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
++{
++ void *ret;
++
++ spin_lock_bh(&cgroup_idr_lock);
++ ret = idr_replace(idr, ptr, id);
++ spin_unlock_bh(&cgroup_idr_lock);
++ return ret;
++}
++
++static void cgroup_idr_remove(struct idr *idr, int id)
++{
++ spin_lock_bh(&cgroup_idr_lock);
++ idr_remove(idr, id);
++ spin_unlock_bh(&cgroup_idr_lock);
++}
++
++static struct cgroup *cgroup_parent(struct cgroup *cgrp)
++{
++ struct cgroup_subsys_state *parent_css = cgrp->self.parent;
++
++ if (parent_css)
++ return container_of(parent_css, struct cgroup, self);
++ return NULL;
++}
++
++/**
++ * cgroup_css - obtain a cgroup's css for the specified subsystem
++ * @cgrp: the cgroup of interest
++ * @ss: the subsystem of interest (%NULL returns @cgrp->self)
++ *
++ * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
++ * function must be called either under cgroup_mutex or rcu_read_lock() and
++ * the caller is responsible for pinning the returned css if it wants to
++ * keep accessing it outside the said locks. This function may return
++ * %NULL if @cgrp doesn't have @subsys_id enabled.
++ */
++static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
++ struct cgroup_subsys *ss)
++{
++ if (ss)
++ return rcu_dereference_check(cgrp->subsys[ss->id],
++ lockdep_is_held(&cgroup_mutex));
++ else
++ return &cgrp->self;
++}
++
++/**
++ * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
++ * @cgrp: the cgroup of interest
++ * @ss: the subsystem of interest (%NULL returns @cgrp->self)
++ *
++ * Similar to cgroup_css() but returns the effctive css, which is defined
++ * as the matching css of the nearest ancestor including self which has @ss
++ * enabled. If @ss is associated with the hierarchy @cgrp is on, this
++ * function is guaranteed to return non-NULL css.
++ */
++static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
++ struct cgroup_subsys *ss)
++{
++ lockdep_assert_held(&cgroup_mutex);
++
++ if (!ss)
++ return &cgrp->self;
++
++ if (!(cgrp->root->subsys_mask & (1 << ss->id)))
++ return NULL;
++
++ /*
++ * This function is used while updating css associations and thus
++ * can't test the csses directly. Use ->child_subsys_mask.
++ */
++ while (cgroup_parent(cgrp) &&
++ !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id)))
++ cgrp = cgroup_parent(cgrp);
++
++ return cgroup_css(cgrp, ss);
++}
++
++/**
++ * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
++ * @cgrp: the cgroup of interest
++ * @ss: the subsystem of interest
++ *
++ * Find and get the effective css of @cgrp for @ss. The effective css is
++ * defined as the matching css of the nearest ancestor including self which
++ * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
++ * the root css is returned, so this function always returns a valid css.
++ * The returned css must be put using css_put().
++ */
++struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
++ struct cgroup_subsys *ss)
++{
++ struct cgroup_subsys_state *css;
++
++ rcu_read_lock();
++
++ do {
++ css = cgroup_css(cgrp, ss);
++
++ if (css && css_tryget_online(css))
++ goto out_unlock;
++ cgrp = cgroup_parent(cgrp);
++ } while (cgrp);
++
++ css = init_css_set.subsys[ss->id];
++ css_get(css);
++out_unlock:
++ rcu_read_unlock();
++ return css;
++}
++
++/* convenient tests for these bits */
++static inline bool cgroup_is_dead(const struct cgroup *cgrp)
++{
++ return !(cgrp->self.flags & CSS_ONLINE);
++}
++
++struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
++{
++ struct cgroup *cgrp = of->kn->parent->priv;
++ struct cftype *cft = of_cft(of);
++
++ /*
++ * This is open and unprotected implementation of cgroup_css().
++ * seq_css() is only called from a kernfs file operation which has
++ * an active reference on the file. Because all the subsystem
++ * files are drained before a css is disassociated with a cgroup,
++ * the matching css from the cgroup's subsys table is guaranteed to
++ * be and stay valid until the enclosing operation is complete.
++ */
++ if (cft->ss)
++ return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
++ else
++ return &cgrp->self;
++}
++EXPORT_SYMBOL_GPL(of_css);
++
++/**
++ * cgroup_is_descendant - test ancestry
++ * @cgrp: the cgroup to be tested
++ * @ancestor: possible ancestor of @cgrp
++ *
++ * Test whether @cgrp is a descendant of @ancestor. It also returns %true
++ * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
++ * and @ancestor are accessible.
++ */
++bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor)
++{
++ while (cgrp) {
++ if (cgrp == ancestor)
++ return true;
++ cgrp = cgroup_parent(cgrp);
++ }
++ return false;
++}
++
++static int notify_on_release(const struct cgroup *cgrp)
++{
++ return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
++}
++
++/**
++ * for_each_css - iterate all css's of a cgroup
++ * @css: the iteration cursor
++ * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
++ * @cgrp: the target cgroup to iterate css's of
++ *
++ * Should be called under cgroup_[tree_]mutex.
++ */
++#define for_each_css(css, ssid, cgrp) \
++ for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
++ if (!((css) = rcu_dereference_check( \
++ (cgrp)->subsys[(ssid)], \
++ lockdep_is_held(&cgroup_mutex)))) { } \
++ else
++
++/**
++ * for_each_e_css - iterate all effective css's of a cgroup
++ * @css: the iteration cursor
++ * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
++ * @cgrp: the target cgroup to iterate css's of
++ *
++ * Should be called under cgroup_[tree_]mutex.
++ */
++#define for_each_e_css(css, ssid, cgrp) \
++ for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
++ if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
++ ; \
++ else
++
++/**
++ * for_each_subsys - iterate all enabled cgroup subsystems
++ * @ss: the iteration cursor
++ * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
++ */
++#define for_each_subsys(ss, ssid) \
++ for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \
++ (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
++
++/* iterate across the hierarchies */
++#define for_each_root(root) \
++ list_for_each_entry((root), &cgroup_roots, root_list)
++
++/* iterate over child cgrps, lock should be held throughout iteration */
++#define cgroup_for_each_live_child(child, cgrp) \
++ list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
++ if (({ lockdep_assert_held(&cgroup_mutex); \
++ cgroup_is_dead(child); })) \
++ ; \
++ else
++
++static void cgroup_release_agent(struct work_struct *work);
++static void check_for_release(struct cgroup *cgrp);
++
++/*
++ * A cgroup can be associated with multiple css_sets as different tasks may
++ * belong to different cgroups on different hierarchies. In the other
++ * direction, a css_set is naturally associated with multiple cgroups.
++ * This M:N relationship is represented by the following link structure
++ * which exists for each association and allows traversing the associations
++ * from both sides.
++ */
++struct cgrp_cset_link {
++ /* the cgroup and css_set this link associates */
++ struct cgroup *cgrp;
++ struct css_set *cset;
++
++ /* list of cgrp_cset_links anchored at cgrp->cset_links */
++ struct list_head cset_link;
++
++ /* list of cgrp_cset_links anchored at css_set->cgrp_links */
++ struct list_head cgrp_link;
++};
++
++/*
++ * The default css_set - used by init and its children prior to any
++ * hierarchies being mounted. It contains a pointer to the root state
++ * for each subsystem. Also used to anchor the list of css_sets. Not
++ * reference-counted, to improve performance when child cgroups
++ * haven't been created.
++ */
++struct css_set init_css_set = {
++ .refcount = ATOMIC_INIT(1),
++ .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
++ .tasks = LIST_HEAD_INIT(init_css_set.tasks),
++ .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
++ .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node),
++ .mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
++};
++
++static int css_set_count = 1; /* 1 for init_css_set */
++
++/**
++ * cgroup_update_populated - updated populated count of a cgroup
++ * @cgrp: the target cgroup
++ * @populated: inc or dec populated count
++ *
++ * @cgrp is either getting the first task (css_set) or losing the last.
++ * Update @cgrp->populated_cnt accordingly. The count is propagated
++ * towards root so that a given cgroup's populated_cnt is zero iff the
++ * cgroup and all its descendants are empty.
++ *
++ * @cgrp's interface file "cgroup.populated" is zero if
++ * @cgrp->populated_cnt is zero and 1 otherwise. When @cgrp->populated_cnt
++ * changes from or to zero, userland is notified that the content of the
++ * interface file has changed. This can be used to detect when @cgrp and
++ * its descendants become populated or empty.
++ */
++static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
++{
++ lockdep_assert_held(&css_set_rwsem);
++
++ do {
++ bool trigger;
++
++ if (populated)
++ trigger = !cgrp->populated_cnt++;
++ else
++ trigger = !--cgrp->populated_cnt;
++
++ if (!trigger)
++ break;
++
++ if (cgrp->populated_kn)
++ kernfs_notify(cgrp->populated_kn);
++ cgrp = cgroup_parent(cgrp);
++ } while (cgrp);
++}
++
++/*
++ * hash table for cgroup groups. This improves the performance to find
++ * an existing css_set. This hash doesn't (currently) take into
++ * account cgroups in empty hierarchies.
++ */
++#define CSS_SET_HASH_BITS 7
++static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
++
++static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
++{
++ unsigned long key = 0UL;
++ struct cgroup_subsys *ss;
++ int i;
++
++ for_each_subsys(ss, i)
++ key += (unsigned long)css[i];
++ key = (key >> 16) ^ key;
++
++ return key;
++}
++
++static void put_css_set_locked(struct css_set *cset)
++{
++ struct cgrp_cset_link *link, *tmp_link;
++ struct cgroup_subsys *ss;
++ int ssid;
++
++ lockdep_assert_held(&css_set_rwsem);
++
++ if (!atomic_dec_and_test(&cset->refcount))
++ return;
++
++ /* This css_set is dead. unlink it and release cgroup refcounts */
++ for_each_subsys(ss, ssid)
++ list_del(&cset->e_cset_node[ssid]);
++ hash_del(&cset->hlist);
++ css_set_count--;
++
++ list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
++ struct cgroup *cgrp = link->cgrp;
++
++ list_del(&link->cset_link);
++ list_del(&link->cgrp_link);
++
++ /* @cgrp can't go away while we're holding css_set_rwsem */
++ if (list_empty(&cgrp->cset_links)) {
++ cgroup_update_populated(cgrp, false);
++ check_for_release(cgrp);
++ }
++
++ kfree(link);
++ }
++
++ kfree_rcu(cset, rcu_head);
++}
++
++static void put_css_set(struct css_set *cset)
++{
++ /*
++ * Ensure that the refcount doesn't hit zero while any readers
++ * can see it. Similar to atomic_dec_and_lock(), but for an
++ * rwlock
++ */
++ if (atomic_add_unless(&cset->refcount, -1, 1))
++ return;
++
++ down_write(&css_set_rwsem);
++ put_css_set_locked(cset);
++ up_write(&css_set_rwsem);
++}
++
++/*
++ * refcounted get/put for css_set objects
++ */
++static inline void get_css_set(struct css_set *cset)
++{
++ atomic_inc(&cset->refcount);
++}
++
++/**
++ * compare_css_sets - helper function for find_existing_css_set().
++ * @cset: candidate css_set being tested
++ * @old_cset: existing css_set for a task
++ * @new_cgrp: cgroup that's being entered by the task
++ * @template: desired set of css pointers in css_set (pre-calculated)
++ *
++ * Returns true if "cset" matches "old_cset" except for the hierarchy
++ * which "new_cgrp" belongs to, for which it should match "new_cgrp".
++ */
++static bool compare_css_sets(struct css_set *cset,
++ struct css_set *old_cset,
++ struct cgroup *new_cgrp,
++ struct cgroup_subsys_state *template[])
++{
++ struct list_head *l1, *l2;
++
++ /*
++ * On the default hierarchy, there can be csets which are
++ * associated with the same set of cgroups but different csses.
++ * Let's first ensure that csses match.
++ */
++ if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
++ return false;
++
++ /*
++ * Compare cgroup pointers in order to distinguish between
++ * different cgroups in hierarchies. As different cgroups may
++ * share the same effective css, this comparison is always
++ * necessary.
++ */
++ l1 = &cset->cgrp_links;
++ l2 = &old_cset->cgrp_links;
++ while (1) {
++ struct cgrp_cset_link *link1, *link2;
++ struct cgroup *cgrp1, *cgrp2;
++
++ l1 = l1->next;
++ l2 = l2->next;
++ /* See if we reached the end - both lists are equal length. */
++ if (l1 == &cset->cgrp_links) {
++ BUG_ON(l2 != &old_cset->cgrp_links);
++ break;
++ } else {
++ BUG_ON(l2 == &old_cset->cgrp_links);
++ }
++ /* Locate the cgroups associated with these links. */
++ link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
++ link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
++ cgrp1 = link1->cgrp;
++ cgrp2 = link2->cgrp;
++ /* Hierarchies should be linked in the same order. */
++ BUG_ON(cgrp1->root != cgrp2->root);
++
++ /*
++ * If this hierarchy is the hierarchy of the cgroup
++ * that's changing, then we need to check that this
++ * css_set points to the new cgroup; if it's any other
++ * hierarchy, then this css_set should point to the
++ * same cgroup as the old css_set.
++ */
++ if (cgrp1->root == new_cgrp->root) {
++ if (cgrp1 != new_cgrp)
++ return false;
++ } else {
++ if (cgrp1 != cgrp2)
++ return false;
++ }
++ }
++ return true;
++}
++
++/**
++ * find_existing_css_set - init css array and find the matching css_set
++ * @old_cset: the css_set that we're using before the cgroup transition
++ * @cgrp: the cgroup that we're moving into
++ * @template: out param for the new set of csses, should be clear on entry
++ */
++static struct css_set *find_existing_css_set(struct css_set *old_cset,
++ struct cgroup *cgrp,
++ struct cgroup_subsys_state *template[])
++{
++ struct cgroup_root *root = cgrp->root;
++ struct cgroup_subsys *ss;
++ struct css_set *cset;
++ unsigned long key;
++ int i;
++
++ /*
++ * Build the set of subsystem state objects that we want to see in the
++ * new css_set. while subsystems can change globally, the entries here
++ * won't change, so no need for locking.
++ */
++ for_each_subsys(ss, i) {
++ if (root->subsys_mask & (1UL << i)) {
++ /*
++ * @ss is in this hierarchy, so we want the
++ * effective css from @cgrp.
++ */
++ template[i] = cgroup_e_css(cgrp, ss);
++ } else {
++ /*
++ * @ss is not in this hierarchy, so we don't want
++ * to change the css.
++ */
++ template[i] = old_cset->subsys[i];
++ }
++ }
++
++ key = css_set_hash(template);
++ hash_for_each_possible(css_set_table, cset, hlist, key) {
++ if (!compare_css_sets(cset, old_cset, cgrp, template))
++ continue;
++
++ /* This css_set matches what we need */
++ return cset;
++ }
++
++ /* No existing cgroup group matched */
++ return NULL;
++}
++
++static void free_cgrp_cset_links(struct list_head *links_to_free)
++{
++ struct cgrp_cset_link *link, *tmp_link;
++
++ list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
++ list_del(&link->cset_link);
++ kfree(link);
++ }
++}
++
++/**
++ * allocate_cgrp_cset_links - allocate cgrp_cset_links
++ * @count: the number of links to allocate
++ * @tmp_links: list_head the allocated links are put on
++ *
++ * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
++ * through ->cset_link. Returns 0 on success or -errno.
++ */
++static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
++{
++ struct cgrp_cset_link *link;
++ int i;
++
++ INIT_LIST_HEAD(tmp_links);
++
++ for (i = 0; i < count; i++) {
++ link = kzalloc(sizeof(*link), GFP_KERNEL);
++ if (!link) {
++ free_cgrp_cset_links(tmp_links);
++ return -ENOMEM;
++ }
++ list_add(&link->cset_link, tmp_links);
++ }
++ return 0;
++}
++
++/**
++ * link_css_set - a helper function to link a css_set to a cgroup
++ * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
++ * @cset: the css_set to be linked
++ * @cgrp: the destination cgroup
++ */
++static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
++ struct cgroup *cgrp)
++{
++ struct cgrp_cset_link *link;
++
++ BUG_ON(list_empty(tmp_links));
++
++ if (cgroup_on_dfl(cgrp))
++ cset->dfl_cgrp = cgrp;
++
++ link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
++ link->cset = cset;
++ link->cgrp = cgrp;
++
++ if (list_empty(&cgrp->cset_links))
++ cgroup_update_populated(cgrp, true);
++ list_move(&link->cset_link, &cgrp->cset_links);
++
++ /*
++ * Always add links to the tail of the list so that the list
++ * is sorted by order of hierarchy creation
++ */
++ list_add_tail(&link->cgrp_link, &cset->cgrp_links);
++}
++
++/**
++ * find_css_set - return a new css_set with one cgroup updated
++ * @old_cset: the baseline css_set
++ * @cgrp: the cgroup to be updated
++ *
++ * Return a new css_set that's equivalent to @old_cset, but with @cgrp
++ * substituted into the appropriate hierarchy.
++ */
++static struct css_set *find_css_set(struct css_set *old_cset,
++ struct cgroup *cgrp)
++{
++ struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
++ struct css_set *cset;
++ struct list_head tmp_links;
++ struct cgrp_cset_link *link;
++ struct cgroup_subsys *ss;
++ unsigned long key;
++ int ssid;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ /* First see if we already have a cgroup group that matches
++ * the desired set */
++ down_read(&css_set_rwsem);
++ cset = find_existing_css_set(old_cset, cgrp, template);
++ if (cset)
++ get_css_set(cset);
++ up_read(&css_set_rwsem);
++
++ if (cset)
++ return cset;
++
++ cset = kzalloc(sizeof(*cset), GFP_KERNEL);
++ if (!cset)
++ return NULL;
++
++ /* Allocate all the cgrp_cset_link objects that we'll need */
++ if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
++ kfree(cset);
++ return NULL;
++ }
++
++ atomic_set(&cset->refcount, 1);
++ INIT_LIST_HEAD(&cset->cgrp_links);
++ INIT_LIST_HEAD(&cset->tasks);
++ INIT_LIST_HEAD(&cset->mg_tasks);
++ INIT_LIST_HEAD(&cset->mg_preload_node);
++ INIT_LIST_HEAD(&cset->mg_node);
++ INIT_HLIST_NODE(&cset->hlist);
++
++ /* Copy the set of subsystem state objects generated in
++ * find_existing_css_set() */
++ memcpy(cset->subsys, template, sizeof(cset->subsys));
++
++ down_write(&css_set_rwsem);
++ /* Add reference counts and links from the new css_set. */
++ list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
++ struct cgroup *c = link->cgrp;
++
++ if (c->root == cgrp->root)
++ c = cgrp;
++ link_css_set(&tmp_links, cset, c);
++ }
++
++ BUG_ON(!list_empty(&tmp_links));
++
++ css_set_count++;
++
++ /* Add @cset to the hash table */
++ key = css_set_hash(cset->subsys);
++ hash_add(css_set_table, &cset->hlist, key);
++
++ for_each_subsys(ss, ssid)
++ list_add_tail(&cset->e_cset_node[ssid],
++ &cset->subsys[ssid]->cgroup->e_csets[ssid]);
++
++ up_write(&css_set_rwsem);
++
++ return cset;
++}
++
++static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
++{
++ struct cgroup *root_cgrp = kf_root->kn->priv;
++
++ return root_cgrp->root;
++}
++
++static int cgroup_init_root_id(struct cgroup_root *root)
++{
++ int id;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
++ if (id < 0)
++ return id;
++
++ root->hierarchy_id = id;
++ return 0;
++}
++
++static void cgroup_exit_root_id(struct cgroup_root *root)
++{
++ lockdep_assert_held(&cgroup_mutex);
++
++ if (root->hierarchy_id) {
++ idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
++ root->hierarchy_id = 0;
++ }
++}
++
++static void cgroup_free_root(struct cgroup_root *root)
++{
++ if (root) {
++ /* hierarhcy ID shoulid already have been released */
++ WARN_ON_ONCE(root->hierarchy_id);
++
++ idr_destroy(&root->cgroup_idr);
++ kfree(root);
++ }
++}
++
++static void cgroup_destroy_root(struct cgroup_root *root)
++{
++ struct cgroup *cgrp = &root->cgrp;
++ struct cgrp_cset_link *link, *tmp_link;
++
++ mutex_lock(&cgroup_mutex);
++
++ BUG_ON(atomic_read(&root->nr_cgrps));
++ BUG_ON(!list_empty(&cgrp->self.children));
++
++ /* Rebind all subsystems back to the default hierarchy */
++ rebind_subsystems(&cgrp_dfl_root, root->subsys_mask);
++
++ /*
++ * Release all the links from cset_links to this hierarchy's
++ * root cgroup
++ */
++ down_write(&css_set_rwsem);
++
++ list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
++ list_del(&link->cset_link);
++ list_del(&link->cgrp_link);
++ kfree(link);
++ }
++ up_write(&css_set_rwsem);
++
++ if (!list_empty(&root->root_list)) {
++ list_del(&root->root_list);
++ cgroup_root_count--;
++ }
++
++ cgroup_exit_root_id(root);
++
++ mutex_unlock(&cgroup_mutex);
++
++ kernfs_destroy_root(root->kf_root);
++ cgroup_free_root(root);
++}
++
++/* look up cgroup associated with given css_set on the specified hierarchy */
++static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
++ struct cgroup_root *root)
++{
++ struct cgroup *res = NULL;
++
++ lockdep_assert_held(&cgroup_mutex);
++ lockdep_assert_held(&css_set_rwsem);
++
++ if (cset == &init_css_set) {
++ res = &root->cgrp;
++ } else {
++ struct cgrp_cset_link *link;
++
++ list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
++ struct cgroup *c = link->cgrp;
++
++ if (c->root == root) {
++ res = c;
++ break;
++ }
++ }
++ }
++
++ BUG_ON(!res);
++ return res;
++}
++
++/*
++ * Return the cgroup for "task" from the given hierarchy. Must be
++ * called with cgroup_mutex and css_set_rwsem held.
++ */
++static struct cgroup *task_cgroup_from_root(struct task_struct *task,
++ struct cgroup_root *root)
++{
++ /*
++ * No need to lock the task - since we hold cgroup_mutex the
++ * task can't change groups, so the only thing that can happen
++ * is that it exits and its css is set back to init_css_set.
++ */
++ return cset_cgroup_from_root(task_css_set(task), root);
++}
++
++/*
++ * A task must hold cgroup_mutex to modify cgroups.
++ *
++ * Any task can increment and decrement the count field without lock.
++ * So in general, code holding cgroup_mutex can't rely on the count
++ * field not changing. However, if the count goes to zero, then only
++ * cgroup_attach_task() can increment it again. Because a count of zero
++ * means that no tasks are currently attached, therefore there is no
++ * way a task attached to that cgroup can fork (the other way to
++ * increment the count). So code holding cgroup_mutex can safely
++ * assume that if the count is zero, it will stay zero. Similarly, if
++ * a task holds cgroup_mutex on a cgroup with zero count, it
++ * knows that the cgroup won't be removed, as cgroup_rmdir()
++ * needs that mutex.
++ *
++ * A cgroup can only be deleted if both its 'count' of using tasks
++ * is zero, and its list of 'children' cgroups is empty. Since all
++ * tasks in the system use _some_ cgroup, and since there is always at
++ * least one task in the system (init, pid == 1), therefore, root cgroup
++ * always has either children cgroups and/or using tasks. So we don't
++ * need a special hack to ensure that root cgroup cannot be deleted.
++ *
++ * P.S. One more locking exception. RCU is used to guard the
++ * update of a tasks cgroup pointer by cgroup_attach_task()
++ */
++
++static int cgroup_populate_dir(struct cgroup *cgrp, unsigned int subsys_mask);
++static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
++static const struct file_operations proc_cgroupstats_operations;
++
++static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
++ char *buf)
++{
++ if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
++ !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
++ snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
++ cft->ss->name, cft->name);
++ else
++ strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
++ return buf;
++}
++
++/**
++ * cgroup_file_mode - deduce file mode of a control file
++ * @cft: the control file in question
++ *
++ * returns cft->mode if ->mode is not 0
++ * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
++ * returns S_IRUGO if it has only a read handler
++ * returns S_IWUSR if it has only a write hander
++ */
++static umode_t cgroup_file_mode(const struct cftype *cft)
++{
++ umode_t mode = 0;
++
++ if (cft->mode)
++ return cft->mode;
++
++ if (cft->read_u64 || cft->read_s64 || cft->seq_show)
++ mode |= S_IRUGO;
++
++ if (cft->write_u64 || cft->write_s64 || cft->write)
++ mode |= S_IWUSR;
++
++ return mode;
++}
++
++static void cgroup_get(struct cgroup *cgrp)
++{
++ WARN_ON_ONCE(cgroup_is_dead(cgrp));
++ css_get(&cgrp->self);
++}
++
++static bool cgroup_tryget(struct cgroup *cgrp)
++{
++ return css_tryget(&cgrp->self);
++}
++
++static void cgroup_put(struct cgroup *cgrp)
++{
++ css_put(&cgrp->self);
++}
++
++/**
++ * cgroup_calc_child_subsys_mask - calculate child_subsys_mask
++ * @cgrp: the target cgroup
++ * @subtree_control: the new subtree_control mask to consider
++ *
++ * On the default hierarchy, a subsystem may request other subsystems to be
++ * enabled together through its ->depends_on mask. In such cases, more
++ * subsystems than specified in "cgroup.subtree_control" may be enabled.
++ *
++ * This function calculates which subsystems need to be enabled if
++ * @subtree_control is to be applied to @cgrp. The returned mask is always
++ * a superset of @subtree_control and follows the usual hierarchy rules.
++ */
++static unsigned int cgroup_calc_child_subsys_mask(struct cgroup *cgrp,
++ unsigned int subtree_control)
++{
++ struct cgroup *parent = cgroup_parent(cgrp);
++ unsigned int cur_ss_mask = subtree_control;
++ struct cgroup_subsys *ss;
++ int ssid;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ if (!cgroup_on_dfl(cgrp))
++ return cur_ss_mask;
++
++ while (true) {
++ unsigned int new_ss_mask = cur_ss_mask;
++
++ for_each_subsys(ss, ssid)
++ if (cur_ss_mask & (1 << ssid))
++ new_ss_mask |= ss->depends_on;
++
++ /*
++ * Mask out subsystems which aren't available. This can
++ * happen only if some depended-upon subsystems were bound
++ * to non-default hierarchies.
++ */
++ if (parent)
++ new_ss_mask &= parent->child_subsys_mask;
++ else
++ new_ss_mask &= cgrp->root->subsys_mask;
++
++ if (new_ss_mask == cur_ss_mask)
++ break;
++ cur_ss_mask = new_ss_mask;
++ }
++
++ return cur_ss_mask;
++}
++
++/**
++ * cgroup_refresh_child_subsys_mask - update child_subsys_mask
++ * @cgrp: the target cgroup
++ *
++ * Update @cgrp->child_subsys_mask according to the current
++ * @cgrp->subtree_control using cgroup_calc_child_subsys_mask().
++ */
++static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
++{
++ cgrp->child_subsys_mask =
++ cgroup_calc_child_subsys_mask(cgrp, cgrp->subtree_control);
++}
++
++/**
++ * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
++ * @kn: the kernfs_node being serviced
++ *
++ * This helper undoes cgroup_kn_lock_live() and should be invoked before
++ * the method finishes if locking succeeded. Note that once this function
++ * returns the cgroup returned by cgroup_kn_lock_live() may become
++ * inaccessible any time. If the caller intends to continue to access the
++ * cgroup, it should pin it before invoking this function.
++ */
++static void cgroup_kn_unlock(struct kernfs_node *kn)
++{
++ struct cgroup *cgrp;
++
++ if (kernfs_type(kn) == KERNFS_DIR)
++ cgrp = kn->priv;
++ else
++ cgrp = kn->parent->priv;
++
++ mutex_unlock(&cgroup_mutex);
++
++ kernfs_unbreak_active_protection(kn);
++ cgroup_put(cgrp);
++}
++
++/**
++ * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
++ * @kn: the kernfs_node being serviced
++ *
++ * This helper is to be used by a cgroup kernfs method currently servicing
++ * @kn. It breaks the active protection, performs cgroup locking and
++ * verifies that the associated cgroup is alive. Returns the cgroup if
++ * alive; otherwise, %NULL. A successful return should be undone by a
++ * matching cgroup_kn_unlock() invocation.
++ *
++ * Any cgroup kernfs method implementation which requires locking the
++ * associated cgroup should use this helper. It avoids nesting cgroup
++ * locking under kernfs active protection and allows all kernfs operations
++ * including self-removal.
++ */
++static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn)
++{
++ struct cgroup *cgrp;
++
++ if (kernfs_type(kn) == KERNFS_DIR)
++ cgrp = kn->priv;
++ else
++ cgrp = kn->parent->priv;
++
++ /*
++ * We're gonna grab cgroup_mutex which nests outside kernfs
++ * active_ref. cgroup liveliness check alone provides enough
++ * protection against removal. Ensure @cgrp stays accessible and
++ * break the active_ref protection.
++ */
++ if (!cgroup_tryget(cgrp))
++ return NULL;
++ kernfs_break_active_protection(kn);
++
++ mutex_lock(&cgroup_mutex);
++
++ if (!cgroup_is_dead(cgrp))
++ return cgrp;
++
++ cgroup_kn_unlock(kn);
++ return NULL;
++}
++
++static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
++{
++ char name[CGROUP_FILE_NAME_MAX];
++
++ lockdep_assert_held(&cgroup_mutex);
++ kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
++}
++
++/**
++ * cgroup_clear_dir - remove subsys files in a cgroup directory
++ * @cgrp: target cgroup
++ * @subsys_mask: mask of the subsystem ids whose files should be removed
++ */
++static void cgroup_clear_dir(struct cgroup *cgrp, unsigned int subsys_mask)
++{
++ struct cgroup_subsys *ss;
++ int i;
++
++ for_each_subsys(ss, i) {
++ struct cftype *cfts;
++
++ if (!(subsys_mask & (1 << i)))
++ continue;
++ list_for_each_entry(cfts, &ss->cfts, node)
++ cgroup_addrm_files(cgrp, cfts, false);
++ }
++}
++
++static int rebind_subsystems(struct cgroup_root *dst_root, unsigned int ss_mask)
++{
++ struct cgroup_subsys *ss;
++ unsigned int tmp_ss_mask;
++ int ssid, i, ret;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ for_each_subsys(ss, ssid) {
++ if (!(ss_mask & (1 << ssid)))
++ continue;
++
++ /* if @ss has non-root csses attached to it, can't move */
++ if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)))
++ return -EBUSY;
++
++ /* can't move between two non-dummy roots either */
++ if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
++ return -EBUSY;
++ }
++
++ /* skip creating root files on dfl_root for inhibited subsystems */
++ tmp_ss_mask = ss_mask;
++ if (dst_root == &cgrp_dfl_root)
++ tmp_ss_mask &= ~cgrp_dfl_root_inhibit_ss_mask;
++
++ ret = cgroup_populate_dir(&dst_root->cgrp, tmp_ss_mask);
++ if (ret) {
++ if (dst_root != &cgrp_dfl_root)
++ return ret;
++
++ /*
++ * Rebinding back to the default root is not allowed to
++ * fail. Using both default and non-default roots should
++ * be rare. Moving subsystems back and forth even more so.
++ * Just warn about it and continue.
++ */
++ if (cgrp_dfl_root_visible) {
++ pr_warn("failed to create files (%d) while rebinding 0x%x to default root\n",
++ ret, ss_mask);
++ pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");
++ }
++ }
++
++ /*
++ * Nothing can fail from this point on. Remove files for the
++ * removed subsystems and rebind each subsystem.
++ */
++ for_each_subsys(ss, ssid)
++ if (ss_mask & (1 << ssid))
++ cgroup_clear_dir(&ss->root->cgrp, 1 << ssid);
++
++ for_each_subsys(ss, ssid) {
++ struct cgroup_root *src_root;
++ struct cgroup_subsys_state *css;
++ struct css_set *cset;
++
++ if (!(ss_mask & (1 << ssid)))
++ continue;
++
++ src_root = ss->root;
++ css = cgroup_css(&src_root->cgrp, ss);
++
++ WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss));
++
++ RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL);
++ rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css);
++ ss->root = dst_root;
++ css->cgroup = &dst_root->cgrp;
++
++ down_write(&css_set_rwsem);
++ hash_for_each(css_set_table, i, cset, hlist)
++ list_move_tail(&cset->e_cset_node[ss->id],
++ &dst_root->cgrp.e_csets[ss->id]);
++ up_write(&css_set_rwsem);
++
++ src_root->subsys_mask &= ~(1 << ssid);
++ src_root->cgrp.subtree_control &= ~(1 << ssid);
++ cgroup_refresh_child_subsys_mask(&src_root->cgrp);
++
++ /* default hierarchy doesn't enable controllers by default */
++ dst_root->subsys_mask |= 1 << ssid;
++ if (dst_root != &cgrp_dfl_root) {
++ dst_root->cgrp.subtree_control |= 1 << ssid;
++ cgroup_refresh_child_subsys_mask(&dst_root->cgrp);
++ }
++
++ if (ss->bind)
++ ss->bind(css);
++ }
++
++ kernfs_activate(dst_root->cgrp.kn);
++ return 0;
++}
++
++static int cgroup_show_options(struct seq_file *seq,
++ struct kernfs_root *kf_root)
++{
++ struct cgroup_root *root = cgroup_root_from_kf(kf_root);
++ struct cgroup_subsys *ss;
++ int ssid;
++
++ for_each_subsys(ss, ssid)
++ if (root->subsys_mask & (1 << ssid))
++ seq_show_option(seq, ss->name, NULL);
++ if (root->flags & CGRP_ROOT_NOPREFIX)
++ seq_puts(seq, ",noprefix");
++ if (root->flags & CGRP_ROOT_XATTR)
++ seq_puts(seq, ",xattr");
++
++ spin_lock(&release_agent_path_lock);
++ if (strlen(root->release_agent_path))
++ seq_show_option(seq, "release_agent",
++ root->release_agent_path);
++ spin_unlock(&release_agent_path_lock);
++
++ if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
++ seq_puts(seq, ",clone_children");
++ if (strlen(root->name))
++ seq_show_option(seq, "name", root->name);
++ return 0;
++}
++
++struct cgroup_sb_opts {
++ unsigned int subsys_mask;
++ unsigned int flags;
++ char *release_agent;
++ bool cpuset_clone_children;
++ char *name;
++ /* User explicitly requested empty subsystem */
++ bool none;
++};
++
++static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
++{
++ char *token, *o = data;
++ bool all_ss = false, one_ss = false;
++ unsigned int mask = -1U;
++ struct cgroup_subsys *ss;
++ int nr_opts = 0;
++ int i;
++
++#ifdef CONFIG_CPUSETS
++ mask = ~(1U << cpuset_cgrp_id);
++#endif
++
++ memset(opts, 0, sizeof(*opts));
++
++ while ((token = strsep(&o, ",")) != NULL) {
++ nr_opts++;
++
++ if (!*token)
++ return -EINVAL;
++ if (!strcmp(token, "none")) {
++ /* Explicitly have no subsystems */
++ opts->none = true;
++ continue;
++ }
++ if (!strcmp(token, "all")) {
++ /* Mutually exclusive option 'all' + subsystem name */
++ if (one_ss)
++ return -EINVAL;
++ all_ss = true;
++ continue;
++ }
++ if (!strcmp(token, "__DEVEL__sane_behavior")) {
++ opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
++ continue;
++ }
++ if (!strcmp(token, "noprefix")) {
++ opts->flags |= CGRP_ROOT_NOPREFIX;
++ continue;
++ }
++ if (!strcmp(token, "clone_children")) {
++ opts->cpuset_clone_children = true;
++ continue;
++ }
++ if (!strcmp(token, "xattr")) {
++ opts->flags |= CGRP_ROOT_XATTR;
++ continue;
++ }
++ if (!strncmp(token, "release_agent=", 14)) {
++ /* Specifying two release agents is forbidden */
++ if (opts->release_agent)
++ return -EINVAL;
++ opts->release_agent =
++ kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
++ if (!opts->release_agent)
++ return -ENOMEM;
++ continue;
++ }
++ if (!strncmp(token, "name=", 5)) {
++ const char *name = token + 5;
++ /* Can't specify an empty name */
++ if (!strlen(name))
++ return -EINVAL;
++ /* Must match [\w.-]+ */
++ for (i = 0; i < strlen(name); i++) {
++ char c = name[i];
++ if (isalnum(c))
++ continue;
++ if ((c == '.') || (c == '-') || (c == '_'))
++ continue;
++ return -EINVAL;
++ }
++ /* Specifying two names is forbidden */
++ if (opts->name)
++ return -EINVAL;
++ opts->name = kstrndup(name,
++ MAX_CGROUP_ROOT_NAMELEN - 1,
++ GFP_KERNEL);
++ if (!opts->name)
++ return -ENOMEM;
++
++ continue;
++ }
++
++ for_each_subsys(ss, i) {
++ if (strcmp(token, ss->name))
++ continue;
++ if (ss->disabled)
++ continue;
++
++ /* Mutually exclusive option 'all' + subsystem name */
++ if (all_ss)
++ return -EINVAL;
++ opts->subsys_mask |= (1 << i);
++ one_ss = true;
++
++ break;
++ }
++ if (i == CGROUP_SUBSYS_COUNT)
++ return -ENOENT;
++ }
++
++ if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
++ pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
++ if (nr_opts != 1) {
++ pr_err("sane_behavior: no other mount options allowed\n");
++ return -EINVAL;
++ }
++ return 0;
++ }
++
++ /*
++ * If the 'all' option was specified select all the subsystems,
++ * otherwise if 'none', 'name=' and a subsystem name options were
++ * not specified, let's default to 'all'
++ */
++ if (all_ss || (!one_ss && !opts->none && !opts->name))
++ for_each_subsys(ss, i)
++ if (!ss->disabled)
++ opts->subsys_mask |= (1 << i);
++
++ /*
++ * We either have to specify by name or by subsystems. (So all
++ * empty hierarchies must have a name).
++ */
++ if (!opts->subsys_mask && !opts->name)
++ return -EINVAL;
++
++ /*
++ * Option noprefix was introduced just for backward compatibility
++ * with the old cpuset, so we allow noprefix only if mounting just
++ * the cpuset subsystem.
++ */
++ if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
++ return -EINVAL;
++
++ /* Can't specify "none" and some subsystems */
++ if (opts->subsys_mask && opts->none)
++ return -EINVAL;
++
++ return 0;
++}
++
++static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
++{
++ int ret = 0;
++ struct cgroup_root *root = cgroup_root_from_kf(kf_root);
++ struct cgroup_sb_opts opts;
++ unsigned int added_mask, removed_mask;
++
++ if (root == &cgrp_dfl_root) {
++ pr_err("remount is not allowed\n");
++ return -EINVAL;
++ }
++
++ mutex_lock(&cgroup_mutex);
++
++ /* See what subsystems are wanted */
++ ret = parse_cgroupfs_options(data, &opts);
++ if (ret)
++ goto out_unlock;
++
++ if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
++ pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
++ task_tgid_nr(current), current->comm);
++
++ added_mask = opts.subsys_mask & ~root->subsys_mask;
++ removed_mask = root->subsys_mask & ~opts.subsys_mask;
++
++ /* Don't allow flags or name to change at remount */
++ if ((opts.flags ^ root->flags) ||
++ (opts.name && strcmp(opts.name, root->name))) {
++ pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
++ opts.flags, opts.name ?: "", root->flags, root->name);
++ ret = -EINVAL;
++ goto out_unlock;
++ }
++
++ /* remounting is not allowed for populated hierarchies */
++ if (!list_empty(&root->cgrp.self.children)) {
++ ret = -EBUSY;
++ goto out_unlock;
++ }
++
++ ret = rebind_subsystems(root, added_mask);
++ if (ret)
++ goto out_unlock;
++
++ rebind_subsystems(&cgrp_dfl_root, removed_mask);
++
++ if (opts.release_agent) {
++ spin_lock(&release_agent_path_lock);
++ strcpy(root->release_agent_path, opts.release_agent);
++ spin_unlock(&release_agent_path_lock);
++ }
++ out_unlock:
++ kfree(opts.release_agent);
++ kfree(opts.name);
++ mutex_unlock(&cgroup_mutex);
++ return ret;
++}
++
++/*
++ * To reduce the fork() overhead for systems that are not actually using
++ * their cgroups capability, we don't maintain the lists running through
++ * each css_set to its tasks until we see the list actually used - in other
++ * words after the first mount.
++ */
++static bool use_task_css_set_links __read_mostly;
++
++static void cgroup_enable_task_cg_lists(void)
++{
++ struct task_struct *p, *g;
++
++ down_write(&css_set_rwsem);
++
++ if (use_task_css_set_links)
++ goto out_unlock;
++
++ use_task_css_set_links = true;
++
++ /*
++ * We need tasklist_lock because RCU is not safe against
++ * while_each_thread(). Besides, a forking task that has passed
++ * cgroup_post_fork() without seeing use_task_css_set_links = 1
++ * is not guaranteed to have its child immediately visible in the
++ * tasklist if we walk through it with RCU.
++ */
++ read_lock(&tasklist_lock);
++ do_each_thread(g, p) {
++ WARN_ON_ONCE(!list_empty(&p->cg_list) ||
++ task_css_set(p) != &init_css_set);
++
++ /*
++ * We should check if the process is exiting, otherwise
++ * it will race with cgroup_exit() in that the list
++ * entry won't be deleted though the process has exited.
++ * Do it while holding siglock so that we don't end up
++ * racing against cgroup_exit().
++ */
++ spin_lock_irq(&p->sighand->siglock);
++ if (!(p->flags & PF_EXITING)) {
++ struct css_set *cset = task_css_set(p);
++
++ list_add(&p->cg_list, &cset->tasks);
++ get_css_set(cset);
++ }
++ spin_unlock_irq(&p->sighand->siglock);
++ } while_each_thread(g, p);
++ read_unlock(&tasklist_lock);
++out_unlock:
++ up_write(&css_set_rwsem);
++}
++
++static void init_cgroup_housekeeping(struct cgroup *cgrp)
++{
++ struct cgroup_subsys *ss;
++ int ssid;
++
++ INIT_LIST_HEAD(&cgrp->self.sibling);
++ INIT_LIST_HEAD(&cgrp->self.children);
++ INIT_LIST_HEAD(&cgrp->cset_links);
++ INIT_LIST_HEAD(&cgrp->pidlists);
++ mutex_init(&cgrp->pidlist_mutex);
++ cgrp->self.cgroup = cgrp;
++ cgrp->self.flags |= CSS_ONLINE;
++
++ for_each_subsys(ss, ssid)
++ INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
++
++ init_waitqueue_head(&cgrp->offline_waitq);
++ INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent);
++}
++
++static void init_cgroup_root(struct cgroup_root *root,
++ struct cgroup_sb_opts *opts)
++{
++ struct cgroup *cgrp = &root->cgrp;
++
++ INIT_LIST_HEAD(&root->root_list);
++ atomic_set(&root->nr_cgrps, 1);
++ cgrp->root = root;
++ init_cgroup_housekeeping(cgrp);
++ idr_init(&root->cgroup_idr);
++
++ root->flags = opts->flags;
++ if (opts->release_agent)
++ strcpy(root->release_agent_path, opts->release_agent);
++ if (opts->name)
++ strcpy(root->name, opts->name);
++ if (opts->cpuset_clone_children)
++ set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
++}
++
++static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
++{
++ LIST_HEAD(tmp_links);
++ struct cgroup *root_cgrp = &root->cgrp;
++ struct cftype *base_files;
++ struct css_set *cset;
++ int i, ret;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT);
++ if (ret < 0)
++ goto out;
++ root_cgrp->id = ret;
++
++ ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0,
++ GFP_KERNEL);
++ if (ret)
++ goto out;
++
++ /*
++ * We're accessing css_set_count without locking css_set_rwsem here,
++ * but that's OK - it can only be increased by someone holding
++ * cgroup_lock, and that's us. The worst that can happen is that we
++ * have some link structures left over
++ */
++ ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
++ if (ret)
++ goto cancel_ref;
++
++ ret = cgroup_init_root_id(root);
++ if (ret)
++ goto cancel_ref;
++
++ root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
++ KERNFS_ROOT_CREATE_DEACTIVATED,
++ root_cgrp);
++ if (IS_ERR(root->kf_root)) {
++ ret = PTR_ERR(root->kf_root);
++ goto exit_root_id;
++ }
++ root_cgrp->kn = root->kf_root->kn;
++
++ if (root == &cgrp_dfl_root)
++ base_files = cgroup_dfl_base_files;
++ else
++ base_files = cgroup_legacy_base_files;
++
++ ret = cgroup_addrm_files(root_cgrp, base_files, true);
++ if (ret)
++ goto destroy_root;
++
++ ret = rebind_subsystems(root, ss_mask);
++ if (ret)
++ goto destroy_root;
++
++ /*
++ * There must be no failure case after here, since rebinding takes
++ * care of subsystems' refcounts, which are explicitly dropped in
++ * the failure exit path.
++ */
++ list_add(&root->root_list, &cgroup_roots);
++ cgroup_root_count++;
++
++ /*
++ * Link the root cgroup in this hierarchy into all the css_set
++ * objects.
++ */
++ down_write(&css_set_rwsem);
++ hash_for_each(css_set_table, i, cset, hlist)
++ link_css_set(&tmp_links, cset, root_cgrp);
++ up_write(&css_set_rwsem);
++
++ BUG_ON(!list_empty(&root_cgrp->self.children));
++ BUG_ON(atomic_read(&root->nr_cgrps) != 1);
++
++ kernfs_activate(root_cgrp->kn);
++ ret = 0;
++ goto out;
++
++destroy_root:
++ kernfs_destroy_root(root->kf_root);
++ root->kf_root = NULL;
++exit_root_id:
++ cgroup_exit_root_id(root);
++cancel_ref:
++ percpu_ref_exit(&root_cgrp->self.refcnt);
++out:
++ free_cgrp_cset_links(&tmp_links);
++ return ret;
++}
++
++static struct dentry *cgroup_mount(struct file_system_type *fs_type,
++ int flags, const char *unused_dev_name,
++ void *data)
++{
++ struct super_block *pinned_sb = NULL;
++ struct cgroup_subsys *ss;
++ struct cgroup_root *root;
++ struct cgroup_sb_opts opts;
++ struct dentry *dentry;
++ int ret;
++ int i;
++ bool new_sb;
++
++ /*
++ * The first time anyone tries to mount a cgroup, enable the list
++ * linking each css_set to its tasks and fix up all existing tasks.
++ */
++ if (!use_task_css_set_links)
++ cgroup_enable_task_cg_lists();
++
++ mutex_lock(&cgroup_mutex);
++
++ /* First find the desired set of subsystems */
++ ret = parse_cgroupfs_options(data, &opts);
++ if (ret)
++ goto out_unlock;
++
++ /* look for a matching existing root */
++ if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) {
++ cgrp_dfl_root_visible = true;
++ root = &cgrp_dfl_root;
++ cgroup_get(&root->cgrp);
++ ret = 0;
++ goto out_unlock;
++ }
++
++ /*
++ * Destruction of cgroup root is asynchronous, so subsystems may
++ * still be dying after the previous unmount. Let's drain the
++ * dying subsystems. We just need to ensure that the ones
++ * unmounted previously finish dying and don't care about new ones
++ * starting. Testing ref liveliness is good enough.
++ */
++ for_each_subsys(ss, i) {
++ if (!(opts.subsys_mask & (1 << i)) ||
++ ss->root == &cgrp_dfl_root)
++ continue;
++
++ if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
++ mutex_unlock(&cgroup_mutex);
++ msleep(10);
++ ret = restart_syscall();
++ goto out_free;
++ }
++ cgroup_put(&ss->root->cgrp);
++ }
++
++ for_each_root(root) {
++ bool name_match = false;
++
++ if (root == &cgrp_dfl_root)
++ continue;
++
++ /*
++ * If we asked for a name then it must match. Also, if
++ * name matches but sybsys_mask doesn't, we should fail.
++ * Remember whether name matched.
++ */
++ if (opts.name) {
++ if (strcmp(opts.name, root->name))
++ continue;
++ name_match = true;
++ }
++
++ /*
++ * If we asked for subsystems (or explicitly for no
++ * subsystems) then they must match.
++ */
++ if ((opts.subsys_mask || opts.none) &&
++ (opts.subsys_mask != root->subsys_mask)) {
++ if (!name_match)
++ continue;
++ ret = -EBUSY;
++ goto out_unlock;
++ }
++
++ if (root->flags ^ opts.flags)
++ pr_warn("new mount options do not match the existing superblock, will be ignored\n");
++
++ /*
++ * We want to reuse @root whose lifetime is governed by its
++ * ->cgrp. Let's check whether @root is alive and keep it
++ * that way. As cgroup_kill_sb() can happen anytime, we
++ * want to block it by pinning the sb so that @root doesn't
++ * get killed before mount is complete.
++ *
++ * With the sb pinned, tryget_live can reliably indicate
++ * whether @root can be reused. If it's being killed,
++ * drain it. We can use wait_queue for the wait but this
++ * path is super cold. Let's just sleep a bit and retry.
++ */
++ pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
++ if (IS_ERR(pinned_sb) ||
++ !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
++ mutex_unlock(&cgroup_mutex);
++ if (!IS_ERR_OR_NULL(pinned_sb))
++ deactivate_super(pinned_sb);
++ msleep(10);
++ ret = restart_syscall();
++ goto out_free;
++ }
++
++ ret = 0;
++ goto out_unlock;
++ }
++
++ /*
++ * No such thing, create a new one. name= matching without subsys
++ * specification is allowed for already existing hierarchies but we
++ * can't create new one without subsys specification.
++ */
++ if (!opts.subsys_mask && !opts.none) {
++ ret = -EINVAL;
++ goto out_unlock;
++ }
++
++ root = kzalloc(sizeof(*root), GFP_KERNEL);
++ if (!root) {
++ ret = -ENOMEM;
++ goto out_unlock;
++ }
++
++ init_cgroup_root(root, &opts);
++
++ ret = cgroup_setup_root(root, opts.subsys_mask);
++ if (ret)
++ cgroup_free_root(root);
++
++out_unlock:
++ mutex_unlock(&cgroup_mutex);
++out_free:
++ kfree(opts.release_agent);
++ kfree(opts.name);
++
++ if (ret)
++ return ERR_PTR(ret);
++
++ dentry = kernfs_mount(fs_type, flags, root->kf_root,
++ CGROUP_SUPER_MAGIC, &new_sb);
++ if (IS_ERR(dentry) || !new_sb)
++ cgroup_put(&root->cgrp);
++
++ /*
++ * If @pinned_sb, we're reusing an existing root and holding an
++ * extra ref on its sb. Mount is complete. Put the extra ref.
++ */
++ if (pinned_sb) {
++ WARN_ON(new_sb);
++ deactivate_super(pinned_sb);
++ }
++
++ return dentry;
++}
++
++static void cgroup_kill_sb(struct super_block *sb)
++{
++ struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
++ struct cgroup_root *root = cgroup_root_from_kf(kf_root);
++
++ /*
++ * If @root doesn't have any mounts or children, start killing it.
++ * This prevents new mounts by disabling percpu_ref_tryget_live().
++ * cgroup_mount() may wait for @root's release.
++ *
++ * And don't kill the default root.
++ */
++ if (!list_empty(&root->cgrp.self.children) ||
++ root == &cgrp_dfl_root)
++ cgroup_put(&root->cgrp);
++ else
++ percpu_ref_kill(&root->cgrp.self.refcnt);
++
++ kernfs_kill_sb(sb);
++}
++
++static struct file_system_type cgroup_fs_type = {
++ .name = "cgroup",
++ .mount = cgroup_mount,
++ .kill_sb = cgroup_kill_sb,
++};
++
++/**
++ * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
++ * @task: target task
++ * @buf: the buffer to write the path into
++ * @buflen: the length of the buffer
++ *
++ * Determine @task's cgroup on the first (the one with the lowest non-zero
++ * hierarchy_id) cgroup hierarchy and copy its path into @buf. This
++ * function grabs cgroup_mutex and shouldn't be used inside locks used by
++ * cgroup controller callbacks.
++ *
++ * Return value is the same as kernfs_path().
++ */
++char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
++{
++ struct cgroup_root *root;
++ struct cgroup *cgrp;
++ int hierarchy_id = 1;
++ char *path = NULL;
++
++ mutex_lock(&cgroup_mutex);
++ down_read(&css_set_rwsem);
++
++ root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
++
++ if (root) {
++ cgrp = task_cgroup_from_root(task, root);
++ path = cgroup_path(cgrp, buf, buflen);
++ } else {
++ /* if no hierarchy exists, everyone is in "/" */
++ if (strlcpy(buf, "/", buflen) < buflen)
++ path = buf;
++ }
++
++ up_read(&css_set_rwsem);
++ mutex_unlock(&cgroup_mutex);
++ return path;
++}
++EXPORT_SYMBOL_GPL(task_cgroup_path);
++
++/* used to track tasks and other necessary states during migration */
++struct cgroup_taskset {
++ /* the src and dst cset list running through cset->mg_node */
++ struct list_head src_csets;
++ struct list_head dst_csets;
++
++ /*
++ * Fields for cgroup_taskset_*() iteration.
++ *
++ * Before migration is committed, the target migration tasks are on
++ * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of
++ * the csets on ->dst_csets. ->csets point to either ->src_csets
++ * or ->dst_csets depending on whether migration is committed.
++ *
++ * ->cur_csets and ->cur_task point to the current task position
++ * during iteration.
++ */
++ struct list_head *csets;
++ struct css_set *cur_cset;
++ struct task_struct *cur_task;
++};
++
++/**
++ * cgroup_taskset_first - reset taskset and return the first task
++ * @tset: taskset of interest
++ *
++ * @tset iteration is initialized and the first task is returned.
++ */
++struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
++{
++ tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
++ tset->cur_task = NULL;
++
++ return cgroup_taskset_next(tset);
++}
++
++/**
++ * cgroup_taskset_next - iterate to the next task in taskset
++ * @tset: taskset of interest
++ *
++ * Return the next task in @tset. Iteration must have been initialized
++ * with cgroup_taskset_first().
++ */
++struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
++{
++ struct css_set *cset = tset->cur_cset;
++ struct task_struct *task = tset->cur_task;
++
++ while (&cset->mg_node != tset->csets) {
++ if (!task)
++ task = list_first_entry(&cset->mg_tasks,
++ struct task_struct, cg_list);
++ else
++ task = list_next_entry(task, cg_list);
++
++ if (&task->cg_list != &cset->mg_tasks) {
++ tset->cur_cset = cset;
++ tset->cur_task = task;
++ return task;
++ }
++
++ cset = list_next_entry(cset, mg_node);
++ task = NULL;
++ }
++
++ return NULL;
++}
++
++/**
++ * cgroup_task_migrate - move a task from one cgroup to another.
++ * @old_cgrp: the cgroup @tsk is being migrated from
++ * @tsk: the task being migrated
++ * @new_cset: the new css_set @tsk is being attached to
++ *
++ * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked.
++ */
++static void cgroup_task_migrate(struct cgroup *old_cgrp,
++ struct task_struct *tsk,
++ struct css_set *new_cset)
++{
++ struct css_set *old_cset;
++
++ lockdep_assert_held(&cgroup_mutex);
++ lockdep_assert_held(&css_set_rwsem);
++
++ /*
++ * We are synchronized through threadgroup_lock() against PF_EXITING
++ * setting such that we can't race against cgroup_exit() changing the
++ * css_set to init_css_set and dropping the old one.
++ */
++ WARN_ON_ONCE(tsk->flags & PF_EXITING);
++ old_cset = task_css_set(tsk);
++
++ get_css_set(new_cset);
++ rcu_assign_pointer(tsk->cgroups, new_cset);
++
++ /*
++ * Use move_tail so that cgroup_taskset_first() still returns the
++ * leader after migration. This works because cgroup_migrate()
++ * ensures that the dst_cset of the leader is the first on the
++ * tset's dst_csets list.
++ */
++ list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
++
++ /*
++ * We just gained a reference on old_cset by taking it from the
++ * task. As trading it for new_cset is protected by cgroup_mutex,
++ * we're safe to drop it here; it will be freed under RCU.
++ */
++ put_css_set_locked(old_cset);
++}
++
++/**
++ * cgroup_migrate_finish - cleanup after attach
++ * @preloaded_csets: list of preloaded css_sets
++ *
++ * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See
++ * those functions for details.
++ */
++static void cgroup_migrate_finish(struct list_head *preloaded_csets)
++{
++ struct css_set *cset, *tmp_cset;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ down_write(&css_set_rwsem);
++ list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
++ cset->mg_src_cgrp = NULL;
++ cset->mg_dst_cset = NULL;
++ list_del_init(&cset->mg_preload_node);
++ put_css_set_locked(cset);
++ }
++ up_write(&css_set_rwsem);
++}
++
++/**
++ * cgroup_migrate_add_src - add a migration source css_set
++ * @src_cset: the source css_set to add
++ * @dst_cgrp: the destination cgroup
++ * @preloaded_csets: list of preloaded css_sets
++ *
++ * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin
++ * @src_cset and add it to @preloaded_csets, which should later be cleaned
++ * up by cgroup_migrate_finish().
++ *
++ * This function may be called without holding threadgroup_lock even if the
++ * target is a process. Threads may be created and destroyed but as long
++ * as cgroup_mutex is not dropped, no new css_set can be put into play and
++ * the preloaded css_sets are guaranteed to cover all migrations.
++ */
++static void cgroup_migrate_add_src(struct css_set *src_cset,
++ struct cgroup *dst_cgrp,
++ struct list_head *preloaded_csets)
++{
++ struct cgroup *src_cgrp;
++
++ lockdep_assert_held(&cgroup_mutex);
++ lockdep_assert_held(&css_set_rwsem);
++
++ src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
++
++ if (!list_empty(&src_cset->mg_preload_node))
++ return;
++
++ WARN_ON(src_cset->mg_src_cgrp);
++ WARN_ON(!list_empty(&src_cset->mg_tasks));
++ WARN_ON(!list_empty(&src_cset->mg_node));
++
++ src_cset->mg_src_cgrp = src_cgrp;
++ get_css_set(src_cset);
++ list_add(&src_cset->mg_preload_node, preloaded_csets);
++}
++
++/**
++ * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
++ * @dst_cgrp: the destination cgroup (may be %NULL)
++ * @preloaded_csets: list of preloaded source css_sets
++ *
++ * Tasks are about to be moved to @dst_cgrp and all the source css_sets
++ * have been preloaded to @preloaded_csets. This function looks up and
++ * pins all destination css_sets, links each to its source, and append them
++ * to @preloaded_csets. If @dst_cgrp is %NULL, the destination of each
++ * source css_set is assumed to be its cgroup on the default hierarchy.
++ *
++ * This function must be called after cgroup_migrate_add_src() has been
++ * called on each migration source css_set. After migration is performed
++ * using cgroup_migrate(), cgroup_migrate_finish() must be called on
++ * @preloaded_csets.
++ */
++static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp,
++ struct list_head *preloaded_csets)
++{
++ LIST_HEAD(csets);
++ struct css_set *src_cset, *tmp_cset;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ /*
++ * Except for the root, child_subsys_mask must be zero for a cgroup
++ * with tasks so that child cgroups don't compete against tasks.
++ */
++ if (dst_cgrp && cgroup_on_dfl(dst_cgrp) && cgroup_parent(dst_cgrp) &&
++ dst_cgrp->child_subsys_mask)
++ return -EBUSY;
++
++ /* look up the dst cset for each src cset and link it to src */
++ list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) {
++ struct css_set *dst_cset;
++
++ dst_cset = find_css_set(src_cset,
++ dst_cgrp ?: src_cset->dfl_cgrp);
++ if (!dst_cset)
++ goto err;
++
++ WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
++
++ /*
++ * If src cset equals dst, it's noop. Drop the src.
++ * cgroup_migrate() will skip the cset too. Note that we
++ * can't handle src == dst as some nodes are used by both.
++ */
++ if (src_cset == dst_cset) {
++ src_cset->mg_src_cgrp = NULL;
++ list_del_init(&src_cset->mg_preload_node);
++ put_css_set(src_cset);
++ put_css_set(dst_cset);
++ continue;
++ }
++
++ src_cset->mg_dst_cset = dst_cset;
++
++ if (list_empty(&dst_cset->mg_preload_node))
++ list_add(&dst_cset->mg_preload_node, &csets);
++ else
++ put_css_set(dst_cset);
++ }
++
++ list_splice_tail(&csets, preloaded_csets);
++ return 0;
++err:
++ cgroup_migrate_finish(&csets);
++ return -ENOMEM;
++}
++
++/**
++ * cgroup_migrate - migrate a process or task to a cgroup
++ * @cgrp: the destination cgroup
++ * @leader: the leader of the process or the task to migrate
++ * @threadgroup: whether @leader points to the whole process or a single task
++ *
++ * Migrate a process or task denoted by @leader to @cgrp. If migrating a
++ * process, the caller must be holding threadgroup_lock of @leader. The
++ * caller is also responsible for invoking cgroup_migrate_add_src() and
++ * cgroup_migrate_prepare_dst() on the targets before invoking this
++ * function and following up with cgroup_migrate_finish().
++ *
++ * As long as a controller's ->can_attach() doesn't fail, this function is
++ * guaranteed to succeed. This means that, excluding ->can_attach()
++ * failure, when migrating multiple targets, the success or failure can be
++ * decided for all targets by invoking group_migrate_prepare_dst() before
++ * actually starting migrating.
++ */
++static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
++ bool threadgroup)
++{
++ struct cgroup_taskset tset = {
++ .src_csets = LIST_HEAD_INIT(tset.src_csets),
++ .dst_csets = LIST_HEAD_INIT(tset.dst_csets),
++ .csets = &tset.src_csets,
++ };
++ struct cgroup_subsys_state *css, *failed_css = NULL;
++ struct css_set *cset, *tmp_cset;
++ struct task_struct *task, *tmp_task;
++ int i, ret;
++
++ /*
++ * Prevent freeing of tasks while we take a snapshot. Tasks that are
++ * already PF_EXITING could be freed from underneath us unless we
++ * take an rcu_read_lock.
++ */
++ down_write(&css_set_rwsem);
++ rcu_read_lock();
++ task = leader;
++ do {
++ /* @task either already exited or can't exit until the end */
++ if (task->flags & PF_EXITING)
++ goto next;
++
++ /* leave @task alone if post_fork() hasn't linked it yet */
++ if (list_empty(&task->cg_list))
++ goto next;
++
++ cset = task_css_set(task);
++ if (!cset->mg_src_cgrp)
++ goto next;
++
++ /*
++ * cgroup_taskset_first() must always return the leader.
++ * Take care to avoid disturbing the ordering.
++ */
++ list_move_tail(&task->cg_list, &cset->mg_tasks);
++ if (list_empty(&cset->mg_node))
++ list_add_tail(&cset->mg_node, &tset.src_csets);
++ if (list_empty(&cset->mg_dst_cset->mg_node))
++ list_move_tail(&cset->mg_dst_cset->mg_node,
++ &tset.dst_csets);
++ next:
++ if (!threadgroup)
++ break;
++ } while_each_thread(leader, task);
++ rcu_read_unlock();
++ up_write(&css_set_rwsem);
++
++ /* methods shouldn't be called if no task is actually migrating */
++ if (list_empty(&tset.src_csets))
++ return 0;
++
++ /* check that we can legitimately attach to the cgroup */
++ for_each_e_css(css, i, cgrp) {
++ if (css->ss->can_attach) {
++ ret = css->ss->can_attach(css, &tset);
++ if (ret) {
++ failed_css = css;
++ goto out_cancel_attach;
++ }
++ }
++ }
++
++ /*
++ * Now that we're guaranteed success, proceed to move all tasks to
++ * the new cgroup. There are no failure cases after here, so this
++ * is the commit point.
++ */
++ down_write(&css_set_rwsem);
++ list_for_each_entry(cset, &tset.src_csets, mg_node) {
++ list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list)
++ cgroup_task_migrate(cset->mg_src_cgrp, task,
++ cset->mg_dst_cset);
++ }
++ up_write(&css_set_rwsem);
++
++ /*
++ * Migration is committed, all target tasks are now on dst_csets.
++ * Nothing is sensitive to fork() after this point. Notify
++ * controllers that migration is complete.
++ */
++ tset.csets = &tset.dst_csets;
++
++ for_each_e_css(css, i, cgrp)
++ if (css->ss->attach)
++ css->ss->attach(css, &tset);
++
++ ret = 0;
++ goto out_release_tset;
++
++out_cancel_attach:
++ for_each_e_css(css, i, cgrp) {
++ if (css == failed_css)
++ break;
++ if (css->ss->cancel_attach)
++ css->ss->cancel_attach(css, &tset);
++ }
++out_release_tset:
++ down_write(&css_set_rwsem);
++ list_splice_init(&tset.dst_csets, &tset.src_csets);
++ list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) {
++ list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
++ list_del_init(&cset->mg_node);
++ }
++ up_write(&css_set_rwsem);
++ return ret;
++}
++
++/**
++ * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
++ * @dst_cgrp: the cgroup to attach to
++ * @leader: the task or the leader of the threadgroup to be attached
++ * @threadgroup: attach the whole threadgroup?
++ *
++ * Call holding cgroup_mutex and threadgroup_lock of @leader.
++ */
++static int cgroup_attach_task(struct cgroup *dst_cgrp,
++ struct task_struct *leader, bool threadgroup)
++{
++ LIST_HEAD(preloaded_csets);
++ struct task_struct *task;
++ int ret;
++
++ /* look up all src csets */
++ down_read(&css_set_rwsem);
++ rcu_read_lock();
++ task = leader;
++ do {
++ cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
++ &preloaded_csets);
++ if (!threadgroup)
++ break;
++ } while_each_thread(leader, task);
++ rcu_read_unlock();
++ up_read(&css_set_rwsem);
++
++ /* prepare dst csets and commit */
++ ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
++ if (!ret)
++ ret = cgroup_migrate(dst_cgrp, leader, threadgroup);
++
++ cgroup_migrate_finish(&preloaded_csets);
++ return ret;
++}
++
++/*
++ * Find the task_struct of the task to attach by vpid and pass it along to the
++ * function to attach either it or all tasks in its threadgroup. Will lock
++ * cgroup_mutex and threadgroup.
++ */
++static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
++ size_t nbytes, loff_t off, bool threadgroup)
++{
++ struct task_struct *tsk;
++ const struct cred *cred = current_cred(), *tcred;
++ struct cgroup *cgrp;
++ pid_t pid;
++ int ret;
++
++ if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
++ return -EINVAL;
++
++ cgrp = cgroup_kn_lock_live(of->kn);
++ if (!cgrp)
++ return -ENODEV;
++
++retry_find_task:
++ rcu_read_lock();
++ if (pid) {
++ tsk = find_task_by_vpid(pid);
++ if (!tsk) {
++ rcu_read_unlock();
++ ret = -ESRCH;
++ goto out_unlock_cgroup;
++ }
++ /*
++ * even if we're attaching all tasks in the thread group, we
++ * only need to check permissions on one of them.
++ */
++ tcred = __task_cred(tsk);
++ if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
++ !uid_eq(cred->euid, tcred->uid) &&
++ !uid_eq(cred->euid, tcred->suid)) {
++ rcu_read_unlock();
++ ret = -EACCES;
++ goto out_unlock_cgroup;
++ }
++ } else
++ tsk = current;
++
++ if (threadgroup)
++ tsk = tsk->group_leader;
++
++ /*
++ * Workqueue threads may acquire PF_NO_SETAFFINITY and become
++ * trapped in a cpuset, or RT worker may be born in a cgroup
++ * with no rt_runtime allocated. Just say no.
++ */
++ if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
++ ret = -EINVAL;
++ rcu_read_unlock();
++ goto out_unlock_cgroup;
++ }
++
++ get_task_struct(tsk);
++ rcu_read_unlock();
++
++ threadgroup_lock(tsk);
++ if (threadgroup) {
++ if (!thread_group_leader(tsk)) {
++ /*
++ * a race with de_thread from another thread's exec()
++ * may strip us of our leadership, if this happens,
++ * there is no choice but to throw this task away and
++ * try again; this is
++ * "double-double-toil-and-trouble-check locking".
++ */
++ threadgroup_unlock(tsk);
++ put_task_struct(tsk);
++ goto retry_find_task;
++ }
++ }
++
++ ret = cgroup_attach_task(cgrp, tsk, threadgroup);
++
++ threadgroup_unlock(tsk);
++
++ put_task_struct(tsk);
++out_unlock_cgroup:
++ cgroup_kn_unlock(of->kn);
++ return ret ?: nbytes;
++}
++
++/**
++ * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
++ * @from: attach to all cgroups of a given task
++ * @tsk: the task to be attached
++ */
++int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
++{
++ struct cgroup_root *root;
++ int retval = 0;
++
++ mutex_lock(&cgroup_mutex);
++ for_each_root(root) {
++ struct cgroup *from_cgrp;
++
++ if (root == &cgrp_dfl_root)
++ continue;
++
++ down_read(&css_set_rwsem);
++ from_cgrp = task_cgroup_from_root(from, root);
++ up_read(&css_set_rwsem);
++
++ retval = cgroup_attach_task(from_cgrp, tsk, false);
++ if (retval)
++ break;
++ }
++ mutex_unlock(&cgroup_mutex);
++
++ return retval;
++}
++EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
++
++static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
++ char *buf, size_t nbytes, loff_t off)
++{
++ return __cgroup_procs_write(of, buf, nbytes, off, false);
++}
++
++static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
++ char *buf, size_t nbytes, loff_t off)
++{
++ return __cgroup_procs_write(of, buf, nbytes, off, true);
++}
++
++static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
++ char *buf, size_t nbytes, loff_t off)
++{
++ struct cgroup *cgrp;
++
++ BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
++
++ cgrp = cgroup_kn_lock_live(of->kn);
++ if (!cgrp)
++ return -ENODEV;
++ spin_lock(&release_agent_path_lock);
++ strlcpy(cgrp->root->release_agent_path, strstrip(buf),
++ sizeof(cgrp->root->release_agent_path));
++ spin_unlock(&release_agent_path_lock);
++ cgroup_kn_unlock(of->kn);
++ return nbytes;
++}
++
++static int cgroup_release_agent_show(struct seq_file *seq, void *v)
++{
++ struct cgroup *cgrp = seq_css(seq)->cgroup;
++
++ spin_lock(&release_agent_path_lock);
++ seq_puts(seq, cgrp->root->release_agent_path);
++ spin_unlock(&release_agent_path_lock);
++ seq_putc(seq, '\n');
++ return 0;
++}
++
++static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
++{
++ seq_puts(seq, "0\n");
++ return 0;
++}
++
++static void cgroup_print_ss_mask(struct seq_file *seq, unsigned int ss_mask)
++{
++ struct cgroup_subsys *ss;
++ bool printed = false;
++ int ssid;
++
++ for_each_subsys(ss, ssid) {
++ if (ss_mask & (1 << ssid)) {
++ if (printed)
++ seq_putc(seq, ' ');
++ seq_printf(seq, "%s", ss->name);
++ printed = true;
++ }
++ }
++ if (printed)
++ seq_putc(seq, '\n');
++}
++
++/* show controllers which are currently attached to the default hierarchy */
++static int cgroup_root_controllers_show(struct seq_file *seq, void *v)
++{
++ struct cgroup *cgrp = seq_css(seq)->cgroup;
++
++ cgroup_print_ss_mask(seq, cgrp->root->subsys_mask &
++ ~cgrp_dfl_root_inhibit_ss_mask);
++ return 0;
++}
++
++/* show controllers which are enabled from the parent */
++static int cgroup_controllers_show(struct seq_file *seq, void *v)
++{
++ struct cgroup *cgrp = seq_css(seq)->cgroup;
++
++ cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control);
++ return 0;
++}
++
++/* show controllers which are enabled for a given cgroup's children */
++static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
++{
++ struct cgroup *cgrp = seq_css(seq)->cgroup;
++
++ cgroup_print_ss_mask(seq, cgrp->subtree_control);
++ return 0;
++}
++
++/**
++ * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
++ * @cgrp: root of the subtree to update csses for
++ *
++ * @cgrp's child_subsys_mask has changed and its subtree's (self excluded)
++ * css associations need to be updated accordingly. This function looks up
++ * all css_sets which are attached to the subtree, creates the matching
++ * updated css_sets and migrates the tasks to the new ones.
++ */
++static int cgroup_update_dfl_csses(struct cgroup *cgrp)
++{
++ LIST_HEAD(preloaded_csets);
++ struct cgroup_subsys_state *css;
++ struct css_set *src_cset;
++ int ret;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ /* look up all csses currently attached to @cgrp's subtree */
++ down_read(&css_set_rwsem);
++ css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
++ struct cgrp_cset_link *link;
++
++ /* self is not affected by child_subsys_mask change */
++ if (css->cgroup == cgrp)
++ continue;
++
++ list_for_each_entry(link, &css->cgroup->cset_links, cset_link)
++ cgroup_migrate_add_src(link->cset, cgrp,
++ &preloaded_csets);
++ }
++ up_read(&css_set_rwsem);
++
++ /* NULL dst indicates self on default hierarchy */
++ ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
++ if (ret)
++ goto out_finish;
++
++ list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
++ struct task_struct *last_task = NULL, *task;
++
++ /* src_csets precede dst_csets, break on the first dst_cset */
++ if (!src_cset->mg_src_cgrp)
++ break;
++
++ /*
++ * All tasks in src_cset need to be migrated to the
++ * matching dst_cset. Empty it process by process. We
++ * walk tasks but migrate processes. The leader might even
++ * belong to a different cset but such src_cset would also
++ * be among the target src_csets because the default
++ * hierarchy enforces per-process membership.
++ */
++ while (true) {
++ down_read(&css_set_rwsem);
++ task = list_first_entry_or_null(&src_cset->tasks,
++ struct task_struct, cg_list);
++ if (task) {
++ task = task->group_leader;
++ WARN_ON_ONCE(!task_css_set(task)->mg_src_cgrp);
++ get_task_struct(task);
++ }
++ up_read(&css_set_rwsem);
++
++ if (!task)
++ break;
++
++ /* guard against possible infinite loop */
++ if (WARN(last_task == task,
++ "cgroup: update_dfl_csses failed to make progress, aborting in inconsistent state\n"))
++ goto out_finish;
++ last_task = task;
++
++ threadgroup_lock(task);
++ /* raced against de_thread() from another thread? */
++ if (!thread_group_leader(task)) {
++ threadgroup_unlock(task);
++ put_task_struct(task);
++ continue;
++ }
++
++ ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
++
++ threadgroup_unlock(task);
++ put_task_struct(task);
++
++ if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
++ goto out_finish;
++ }
++ }
++
++out_finish:
++ cgroup_migrate_finish(&preloaded_csets);
++ return ret;
++}
++
++/* change the enabled child controllers for a cgroup in the default hierarchy */
++static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
++ char *buf, size_t nbytes,
++ loff_t off)
++{
++ unsigned int enable = 0, disable = 0;
++ unsigned int css_enable, css_disable, old_sc, new_sc, old_ss, new_ss;
++ struct cgroup *cgrp, *child;
++ struct cgroup_subsys *ss;
++ char *tok;
++ int ssid, ret;
++
++ /*
++ * Parse input - space separated list of subsystem names prefixed
++ * with either + or -.
++ */
++ buf = strstrip(buf);
++ while ((tok = strsep(&buf, " "))) {
++ if (tok[0] == '\0')
++ continue;
++ for_each_subsys(ss, ssid) {
++ if (ss->disabled || strcmp(tok + 1, ss->name) ||
++ ((1 << ss->id) & cgrp_dfl_root_inhibit_ss_mask))
++ continue;
++
++ if (*tok == '+') {
++ enable |= 1 << ssid;
++ disable &= ~(1 << ssid);
++ } else if (*tok == '-') {
++ disable |= 1 << ssid;
++ enable &= ~(1 << ssid);
++ } else {
++ return -EINVAL;
++ }
++ break;
++ }
++ if (ssid == CGROUP_SUBSYS_COUNT)
++ return -EINVAL;
++ }
++
++ cgrp = cgroup_kn_lock_live(of->kn);
++ if (!cgrp)
++ return -ENODEV;
++
++ for_each_subsys(ss, ssid) {
++ if (enable & (1 << ssid)) {
++ if (cgrp->subtree_control & (1 << ssid)) {
++ enable &= ~(1 << ssid);
++ continue;
++ }
++
++ /* unavailable or not enabled on the parent? */
++ if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
++ (cgroup_parent(cgrp) &&
++ !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) {
++ ret = -ENOENT;
++ goto out_unlock;
++ }
++ } else if (disable & (1 << ssid)) {
++ if (!(cgrp->subtree_control & (1 << ssid))) {
++ disable &= ~(1 << ssid);
++ continue;
++ }
++
++ /* a child has it enabled? */
++ cgroup_for_each_live_child(child, cgrp) {
++ if (child->subtree_control & (1 << ssid)) {
++ ret = -EBUSY;
++ goto out_unlock;
++ }
++ }
++ }
++ }
++
++ if (!enable && !disable) {
++ ret = 0;
++ goto out_unlock;
++ }
++
++ /*
++ * Except for the root, subtree_control must be zero for a cgroup
++ * with tasks so that child cgroups don't compete against tasks.
++ */
++ if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
++ ret = -EBUSY;
++ goto out_unlock;
++ }
++
++ /*
++ * Update subsys masks and calculate what needs to be done. More
++ * subsystems than specified may need to be enabled or disabled
++ * depending on subsystem dependencies.
++ */
++ old_sc = cgrp->subtree_control;
++ old_ss = cgrp->child_subsys_mask;
++ new_sc = (old_sc | enable) & ~disable;
++ new_ss = cgroup_calc_child_subsys_mask(cgrp, new_sc);
++
++ css_enable = ~old_ss & new_ss;
++ css_disable = old_ss & ~new_ss;
++ enable |= css_enable;
++ disable |= css_disable;
++
++ /*
++ * Because css offlining is asynchronous, userland might try to
++ * re-enable the same controller while the previous instance is
++ * still around. In such cases, wait till it's gone using
++ * offline_waitq.
++ */
++ for_each_subsys(ss, ssid) {
++ if (!(css_enable & (1 << ssid)))
++ continue;
++
++ cgroup_for_each_live_child(child, cgrp) {
++ DEFINE_WAIT(wait);
++
++ if (!cgroup_css(child, ss))
++ continue;
++
++ cgroup_get(child);
++ prepare_to_wait(&child->offline_waitq, &wait,
++ TASK_UNINTERRUPTIBLE);
++ cgroup_kn_unlock(of->kn);
++ schedule();
++ finish_wait(&child->offline_waitq, &wait);
++ cgroup_put(child);
++
++ return restart_syscall();
++ }
++ }
++
++ cgrp->subtree_control = new_sc;
++ cgrp->child_subsys_mask = new_ss;
++
++ /*
++ * Create new csses or make the existing ones visible. A css is
++ * created invisible if it's being implicitly enabled through
++ * dependency. An invisible css is made visible when the userland
++ * explicitly enables it.
++ */
++ for_each_subsys(ss, ssid) {
++ if (!(enable & (1 << ssid)))
++ continue;
++
++ cgroup_for_each_live_child(child, cgrp) {
++ if (css_enable & (1 << ssid))
++ ret = create_css(child, ss,
++ cgrp->subtree_control & (1 << ssid));
++ else
++ ret = cgroup_populate_dir(child, 1 << ssid);
++ if (ret)
++ goto err_undo_css;
++ }
++ }
++
++ /*
++ * At this point, cgroup_e_css() results reflect the new csses
++ * making the following cgroup_update_dfl_csses() properly update
++ * css associations of all tasks in the subtree.
++ */
++ ret = cgroup_update_dfl_csses(cgrp);
++ if (ret)
++ goto err_undo_css;
++
++ /*
++ * All tasks are migrated out of disabled csses. Kill or hide
++ * them. A css is hidden when the userland requests it to be
++ * disabled while other subsystems are still depending on it. The
++ * css must not actively control resources and be in the vanilla
++ * state if it's made visible again later. Controllers which may
++ * be depended upon should provide ->css_reset() for this purpose.
++ */
++ for_each_subsys(ss, ssid) {
++ if (!(disable & (1 << ssid)))
++ continue;
++
++ cgroup_for_each_live_child(child, cgrp) {
++ struct cgroup_subsys_state *css = cgroup_css(child, ss);
++
++ if (css_disable & (1 << ssid)) {
++ kill_css(css);
++ } else {
++ cgroup_clear_dir(child, 1 << ssid);
++ if (ss->css_reset)
++ ss->css_reset(css);
++ }
++ }
++ }
++
++ /*
++ * The effective csses of all the descendants (excluding @cgrp) may
++ * have changed. Subsystems can optionally subscribe to this event
++ * by implementing ->css_e_css_changed() which is invoked if any of
++ * the effective csses seen from the css's cgroup may have changed.
++ */
++ for_each_subsys(ss, ssid) {
++ struct cgroup_subsys_state *this_css = cgroup_css(cgrp, ss);
++ struct cgroup_subsys_state *css;
++
++ if (!ss->css_e_css_changed || !this_css)
++ continue;
++
++ css_for_each_descendant_pre(css, this_css)
++ if (css != this_css)
++ ss->css_e_css_changed(css);
++ }
++
++ kernfs_activate(cgrp->kn);
++ ret = 0;
++out_unlock:
++ cgroup_kn_unlock(of->kn);
++ return ret ?: nbytes;
++
++err_undo_css:
++ cgrp->subtree_control = old_sc;
++ cgrp->child_subsys_mask = old_ss;
++
++ for_each_subsys(ss, ssid) {
++ if (!(enable & (1 << ssid)))
++ continue;
++
++ cgroup_for_each_live_child(child, cgrp) {
++ struct cgroup_subsys_state *css = cgroup_css(child, ss);
++
++ if (!css)
++ continue;
++
++ if (css_enable & (1 << ssid))
++ kill_css(css);
++ else
++ cgroup_clear_dir(child, 1 << ssid);
++ }
++ }
++ goto out_unlock;
++}
++
++static int cgroup_populated_show(struct seq_file *seq, void *v)
++{
++ seq_printf(seq, "%d\n", (bool)seq_css(seq)->cgroup->populated_cnt);
++ return 0;
++}
++
++static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
++ size_t nbytes, loff_t off)
++{
++ struct cgroup *cgrp = of->kn->parent->priv;
++ struct cftype *cft = of->kn->priv;
++ struct cgroup_subsys_state *css;
++ int ret;
++
++ if (cft->write)
++ return cft->write(of, buf, nbytes, off);
++
++ /*
++ * kernfs guarantees that a file isn't deleted with operations in
++ * flight, which means that the matching css is and stays alive and
++ * doesn't need to be pinned. The RCU locking is not necessary
++ * either. It's just for the convenience of using cgroup_css().
++ */
++ rcu_read_lock();
++ css = cgroup_css(cgrp, cft->ss);
++ rcu_read_unlock();
++
++ if (cft->write_u64) {
++ unsigned long long v;
++ ret = kstrtoull(buf, 0, &v);
++ if (!ret)
++ ret = cft->write_u64(css, cft, v);
++ } else if (cft->write_s64) {
++ long long v;
++ ret = kstrtoll(buf, 0, &v);
++ if (!ret)
++ ret = cft->write_s64(css, cft, v);
++ } else {
++ ret = -EINVAL;
++ }
++
++ return ret ?: nbytes;
++}
++
++static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
++{
++ return seq_cft(seq)->seq_start(seq, ppos);
++}
++
++static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
++{
++ return seq_cft(seq)->seq_next(seq, v, ppos);
++}
++
++static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
++{
++ seq_cft(seq)->seq_stop(seq, v);
++}
++
++static int cgroup_seqfile_show(struct seq_file *m, void *arg)
++{
++ struct cftype *cft = seq_cft(m);
++ struct cgroup_subsys_state *css = seq_css(m);
++
++ if (cft->seq_show)
++ return cft->seq_show(m, arg);
++
++ if (cft->read_u64)
++ seq_printf(m, "%llu\n", cft->read_u64(css, cft));
++ else if (cft->read_s64)
++ seq_printf(m, "%lld\n", cft->read_s64(css, cft));
++ else
++ return -EINVAL;
++ return 0;
++}
++
++static struct kernfs_ops cgroup_kf_single_ops = {
++ .atomic_write_len = PAGE_SIZE,
++ .write = cgroup_file_write,
++ .seq_show = cgroup_seqfile_show,
++};
++
++static struct kernfs_ops cgroup_kf_ops = {
++ .atomic_write_len = PAGE_SIZE,
++ .write = cgroup_file_write,
++ .seq_start = cgroup_seqfile_start,
++ .seq_next = cgroup_seqfile_next,
++ .seq_stop = cgroup_seqfile_stop,
++ .seq_show = cgroup_seqfile_show,
++};
++
++/*
++ * cgroup_rename - Only allow simple rename of directories in place.
++ */
++static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
++ const char *new_name_str)
++{
++ struct cgroup *cgrp = kn->priv;
++ int ret;
++
++ if (kernfs_type(kn) != KERNFS_DIR)
++ return -ENOTDIR;
++ if (kn->parent != new_parent)
++ return -EIO;
++
++ /*
++ * This isn't a proper migration and its usefulness is very
++ * limited. Disallow on the default hierarchy.
++ */
++ if (cgroup_on_dfl(cgrp))
++ return -EPERM;
++
++ /*
++ * We're gonna grab cgroup_mutex which nests outside kernfs
++ * active_ref. kernfs_rename() doesn't require active_ref
++ * protection. Break them before grabbing cgroup_mutex.
++ */
++ kernfs_break_active_protection(new_parent);
++ kernfs_break_active_protection(kn);
++
++ mutex_lock(&cgroup_mutex);
++
++ ret = kernfs_rename(kn, new_parent, new_name_str);
++
++ mutex_unlock(&cgroup_mutex);
++
++ kernfs_unbreak_active_protection(kn);
++ kernfs_unbreak_active_protection(new_parent);
++ return ret;
++}
++
++/* set uid and gid of cgroup dirs and files to that of the creator */
++static int cgroup_kn_set_ugid(struct kernfs_node *kn)
++{
++ struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
++ .ia_uid = current_fsuid(),
++ .ia_gid = current_fsgid(), };
++
++ if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
++ gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
++ return 0;
++
++ return kernfs_setattr(kn, &iattr);
++}
++
++static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
++{
++ char name[CGROUP_FILE_NAME_MAX];
++ struct kernfs_node *kn;
++ struct lock_class_key *key = NULL;
++ int ret;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ key = &cft->lockdep_key;
++#endif
++ kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
++ cgroup_file_mode(cft), 0, cft->kf_ops, cft,
++ NULL, key);
++ if (IS_ERR(kn))
++ return PTR_ERR(kn);
++
++ ret = cgroup_kn_set_ugid(kn);
++ if (ret) {
++ kernfs_remove(kn);
++ return ret;
++ }
++
++ if (cft->seq_show == cgroup_populated_show)
++ cgrp->populated_kn = kn;
++ return 0;
++}
++
++/**
++ * cgroup_addrm_files - add or remove files to a cgroup directory
++ * @cgrp: the target cgroup
++ * @cfts: array of cftypes to be added
++ * @is_add: whether to add or remove
++ *
++ * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
++ * For removals, this function never fails. If addition fails, this
++ * function doesn't remove files already added. The caller is responsible
++ * for cleaning up.
++ */
++static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
++ bool is_add)
++{
++ struct cftype *cft;
++ int ret;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ for (cft = cfts; cft->name[0] != '\0'; cft++) {
++ /* does cft->flags tell us to skip this file on @cgrp? */
++ if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
++ continue;
++ if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
++ continue;
++ if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
++ continue;
++ if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
++ continue;
++
++ if (is_add) {
++ ret = cgroup_add_file(cgrp, cft);
++ if (ret) {
++ pr_warn("%s: failed to add %s, err=%d\n",
++ __func__, cft->name, ret);
++ return ret;
++ }
++ } else {
++ cgroup_rm_file(cgrp, cft);
++ }
++ }
++ return 0;
++}
++
++static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
++{
++ LIST_HEAD(pending);
++ struct cgroup_subsys *ss = cfts[0].ss;
++ struct cgroup *root = &ss->root->cgrp;
++ struct cgroup_subsys_state *css;
++ int ret = 0;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ /* add/rm files for all cgroups created before */
++ css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
++ struct cgroup *cgrp = css->cgroup;
++
++ if (cgroup_is_dead(cgrp))
++ continue;
++
++ ret = cgroup_addrm_files(cgrp, cfts, is_add);
++ if (ret)
++ break;
++ }
++
++ if (is_add && !ret)
++ kernfs_activate(root->kn);
++ return ret;
++}
++
++static void cgroup_exit_cftypes(struct cftype *cfts)
++{
++ struct cftype *cft;
++
++ for (cft = cfts; cft->name[0] != '\0'; cft++) {
++ /* free copy for custom atomic_write_len, see init_cftypes() */
++ if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
++ kfree(cft->kf_ops);
++ cft->kf_ops = NULL;
++ cft->ss = NULL;
++
++ /* revert flags set by cgroup core while adding @cfts */
++ cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
++ }
++}
++
++static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
++{
++ struct cftype *cft;
++
++ for (cft = cfts; cft->name[0] != '\0'; cft++) {
++ struct kernfs_ops *kf_ops;
++
++ WARN_ON(cft->ss || cft->kf_ops);
++
++ if (cft->seq_start)
++ kf_ops = &cgroup_kf_ops;
++ else
++ kf_ops = &cgroup_kf_single_ops;
++
++ /*
++ * Ugh... if @cft wants a custom max_write_len, we need to
++ * make a copy of kf_ops to set its atomic_write_len.
++ */
++ if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
++ kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
++ if (!kf_ops) {
++ cgroup_exit_cftypes(cfts);
++ return -ENOMEM;
++ }
++ kf_ops->atomic_write_len = cft->max_write_len;
++ }
++
++ cft->kf_ops = kf_ops;
++ cft->ss = ss;
++ }
++
++ return 0;
++}
++
++static int cgroup_rm_cftypes_locked(struct cftype *cfts)
++{
++ lockdep_assert_held(&cgroup_mutex);
++
++ if (!cfts || !cfts[0].ss)
++ return -ENOENT;
++
++ list_del(&cfts->node);
++ cgroup_apply_cftypes(cfts, false);
++ cgroup_exit_cftypes(cfts);
++ return 0;
++}
++
++/**
++ * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
++ * @cfts: zero-length name terminated array of cftypes
++ *
++ * Unregister @cfts. Files described by @cfts are removed from all
++ * existing cgroups and all future cgroups won't have them either. This
++ * function can be called anytime whether @cfts' subsys is attached or not.
++ *
++ * Returns 0 on successful unregistration, -ENOENT if @cfts is not
++ * registered.
++ */
++int cgroup_rm_cftypes(struct cftype *cfts)
++{
++ int ret;
++
++ mutex_lock(&cgroup_mutex);
++ ret = cgroup_rm_cftypes_locked(cfts);
++ mutex_unlock(&cgroup_mutex);
++ return ret;
++}
++
++/**
++ * cgroup_add_cftypes - add an array of cftypes to a subsystem
++ * @ss: target cgroup subsystem
++ * @cfts: zero-length name terminated array of cftypes
++ *
++ * Register @cfts to @ss. Files described by @cfts are created for all
++ * existing cgroups to which @ss is attached and all future cgroups will
++ * have them too. This function can be called anytime whether @ss is
++ * attached or not.
++ *
++ * Returns 0 on successful registration, -errno on failure. Note that this
++ * function currently returns 0 as long as @cfts registration is successful
++ * even if some file creation attempts on existing cgroups fail.
++ */
++static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
++{
++ int ret;
++
++ if (ss->disabled)
++ return 0;
++
++ if (!cfts || cfts[0].name[0] == '\0')
++ return 0;
++
++ ret = cgroup_init_cftypes(ss, cfts);
++ if (ret)
++ return ret;
++
++ mutex_lock(&cgroup_mutex);
++
++ list_add_tail(&cfts->node, &ss->cfts);
++ ret = cgroup_apply_cftypes(cfts, true);
++ if (ret)
++ cgroup_rm_cftypes_locked(cfts);
++
++ mutex_unlock(&cgroup_mutex);
++ return ret;
++}
++
++/**
++ * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
++ * @ss: target cgroup subsystem
++ * @cfts: zero-length name terminated array of cftypes
++ *
++ * Similar to cgroup_add_cftypes() but the added files are only used for
++ * the default hierarchy.
++ */
++int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
++{
++ struct cftype *cft;
++
++ for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
++ cft->flags |= __CFTYPE_ONLY_ON_DFL;
++ return cgroup_add_cftypes(ss, cfts);
++}
++
++/**
++ * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
++ * @ss: target cgroup subsystem
++ * @cfts: zero-length name terminated array of cftypes
++ *
++ * Similar to cgroup_add_cftypes() but the added files are only used for
++ * the legacy hierarchies.
++ */
++int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
++{
++ struct cftype *cft;
++
++ /*
++ * If legacy_flies_on_dfl, we want to show the legacy files on the
++ * dfl hierarchy but iff the target subsystem hasn't been updated
++ * for the dfl hierarchy yet.
++ */
++ if (!cgroup_legacy_files_on_dfl ||
++ ss->dfl_cftypes != ss->legacy_cftypes) {
++ for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
++ cft->flags |= __CFTYPE_NOT_ON_DFL;
++ }
++
++ return cgroup_add_cftypes(ss, cfts);
++}
++
++/**
++ * cgroup_task_count - count the number of tasks in a cgroup.
++ * @cgrp: the cgroup in question
++ *
++ * Return the number of tasks in the cgroup.
++ */
++static int cgroup_task_count(const struct cgroup *cgrp)
++{
++ int count = 0;
++ struct cgrp_cset_link *link;
++
++ down_read(&css_set_rwsem);
++ list_for_each_entry(link, &cgrp->cset_links, cset_link)
++ count += atomic_read(&link->cset->refcount);
++ up_read(&css_set_rwsem);
++ return count;
++}
++
++/**
++ * css_next_child - find the next child of a given css
++ * @pos: the current position (%NULL to initiate traversal)
++ * @parent: css whose children to walk
++ *
++ * This function returns the next child of @parent and should be called
++ * under either cgroup_mutex or RCU read lock. The only requirement is
++ * that @parent and @pos are accessible. The next sibling is guaranteed to
++ * be returned regardless of their states.
++ *
++ * If a subsystem synchronizes ->css_online() and the start of iteration, a
++ * css which finished ->css_online() is guaranteed to be visible in the
++ * future iterations and will stay visible until the last reference is put.
++ * A css which hasn't finished ->css_online() or already finished
++ * ->css_offline() may show up during traversal. It's each subsystem's
++ * responsibility to synchronize against on/offlining.
++ */
++struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
++ struct cgroup_subsys_state *parent)
++{
++ struct cgroup_subsys_state *next;
++
++ cgroup_assert_mutex_or_rcu_locked();
++
++ /*
++ * @pos could already have been unlinked from the sibling list.
++ * Once a cgroup is removed, its ->sibling.next is no longer
++ * updated when its next sibling changes. CSS_RELEASED is set when
++ * @pos is taken off list, at which time its next pointer is valid,
++ * and, as releases are serialized, the one pointed to by the next
++ * pointer is guaranteed to not have started release yet. This
++ * implies that if we observe !CSS_RELEASED on @pos in this RCU
++ * critical section, the one pointed to by its next pointer is
++ * guaranteed to not have finished its RCU grace period even if we
++ * have dropped rcu_read_lock() inbetween iterations.
++ *
++ * If @pos has CSS_RELEASED set, its next pointer can't be
++ * dereferenced; however, as each css is given a monotonically
++ * increasing unique serial number and always appended to the
++ * sibling list, the next one can be found by walking the parent's
++ * children until the first css with higher serial number than
++ * @pos's. While this path can be slower, it happens iff iteration
++ * races against release and the race window is very small.
++ */
++ if (!pos) {
++ next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
++ } else if (likely(!(pos->flags & CSS_RELEASED))) {
++ next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
++ } else {
++ list_for_each_entry_rcu(next, &parent->children, sibling)
++ if (next->serial_nr > pos->serial_nr)
++ break;
++ }
++
++ /*
++ * @next, if not pointing to the head, can be dereferenced and is
++ * the next sibling.
++ */
++ if (&next->sibling != &parent->children)
++ return next;
++ return NULL;
++}
++
++/**
++ * css_next_descendant_pre - find the next descendant for pre-order walk
++ * @pos: the current position (%NULL to initiate traversal)
++ * @root: css whose descendants to walk
++ *
++ * To be used by css_for_each_descendant_pre(). Find the next descendant
++ * to visit for pre-order traversal of @root's descendants. @root is
++ * included in the iteration and the first node to be visited.
++ *
++ * While this function requires cgroup_mutex or RCU read locking, it
++ * doesn't require the whole traversal to be contained in a single critical
++ * section. This function will return the correct next descendant as long
++ * as both @pos and @root are accessible and @pos is a descendant of @root.
++ *
++ * If a subsystem synchronizes ->css_online() and the start of iteration, a
++ * css which finished ->css_online() is guaranteed to be visible in the
++ * future iterations and will stay visible until the last reference is put.
++ * A css which hasn't finished ->css_online() or already finished
++ * ->css_offline() may show up during traversal. It's each subsystem's
++ * responsibility to synchronize against on/offlining.
++ */
++struct cgroup_subsys_state *
++css_next_descendant_pre(struct cgroup_subsys_state *pos,
++ struct cgroup_subsys_state *root)
++{
++ struct cgroup_subsys_state *next;
++
++ cgroup_assert_mutex_or_rcu_locked();
++
++ /* if first iteration, visit @root */
++ if (!pos)
++ return root;
++
++ /* visit the first child if exists */
++ next = css_next_child(NULL, pos);
++ if (next)
++ return next;
++
++ /* no child, visit my or the closest ancestor's next sibling */
++ while (pos != root) {
++ next = css_next_child(pos, pos->parent);
++ if (next)
++ return next;
++ pos = pos->parent;
++ }
++
++ return NULL;
++}
++
++/**
++ * css_rightmost_descendant - return the rightmost descendant of a css
++ * @pos: css of interest
++ *
++ * Return the rightmost descendant of @pos. If there's no descendant, @pos
++ * is returned. This can be used during pre-order traversal to skip
++ * subtree of @pos.
++ *
++ * While this function requires cgroup_mutex or RCU read locking, it
++ * doesn't require the whole traversal to be contained in a single critical
++ * section. This function will return the correct rightmost descendant as
++ * long as @pos is accessible.
++ */
++struct cgroup_subsys_state *
++css_rightmost_descendant(struct cgroup_subsys_state *pos)
++{
++ struct cgroup_subsys_state *last, *tmp;
++
++ cgroup_assert_mutex_or_rcu_locked();
++
++ do {
++ last = pos;
++ /* ->prev isn't RCU safe, walk ->next till the end */
++ pos = NULL;
++ css_for_each_child(tmp, last)
++ pos = tmp;
++ } while (pos);
++
++ return last;
++}
++
++static struct cgroup_subsys_state *
++css_leftmost_descendant(struct cgroup_subsys_state *pos)
++{
++ struct cgroup_subsys_state *last;
++
++ do {
++ last = pos;
++ pos = css_next_child(NULL, pos);
++ } while (pos);
++
++ return last;
++}
++
++/**
++ * css_next_descendant_post - find the next descendant for post-order walk
++ * @pos: the current position (%NULL to initiate traversal)
++ * @root: css whose descendants to walk
++ *
++ * To be used by css_for_each_descendant_post(). Find the next descendant
++ * to visit for post-order traversal of @root's descendants. @root is
++ * included in the iteration and the last node to be visited.
++ *
++ * While this function requires cgroup_mutex or RCU read locking, it
++ * doesn't require the whole traversal to be contained in a single critical
++ * section. This function will return the correct next descendant as long
++ * as both @pos and @cgroup are accessible and @pos is a descendant of
++ * @cgroup.
++ *
++ * If a subsystem synchronizes ->css_online() and the start of iteration, a
++ * css which finished ->css_online() is guaranteed to be visible in the
++ * future iterations and will stay visible until the last reference is put.
++ * A css which hasn't finished ->css_online() or already finished
++ * ->css_offline() may show up during traversal. It's each subsystem's
++ * responsibility to synchronize against on/offlining.
++ */
++struct cgroup_subsys_state *
++css_next_descendant_post(struct cgroup_subsys_state *pos,
++ struct cgroup_subsys_state *root)
++{
++ struct cgroup_subsys_state *next;
++
++ cgroup_assert_mutex_or_rcu_locked();
++
++ /* if first iteration, visit leftmost descendant which may be @root */
++ if (!pos)
++ return css_leftmost_descendant(root);
++
++ /* if we visited @root, we're done */
++ if (pos == root)
++ return NULL;
++
++ /* if there's an unvisited sibling, visit its leftmost descendant */
++ next = css_next_child(pos, pos->parent);
++ if (next)
++ return css_leftmost_descendant(next);
++
++ /* no sibling left, visit parent */
++ return pos->parent;
++}
++
++/**
++ * css_has_online_children - does a css have online children
++ * @css: the target css
++ *
++ * Returns %true if @css has any online children; otherwise, %false. This
++ * function can be called from any context but the caller is responsible
++ * for synchronizing against on/offlining as necessary.
++ */
++bool css_has_online_children(struct cgroup_subsys_state *css)
++{
++ struct cgroup_subsys_state *child;
++ bool ret = false;
++
++ rcu_read_lock();
++ css_for_each_child(child, css) {
++ if (child->flags & CSS_ONLINE) {
++ ret = true;
++ break;
++ }
++ }
++ rcu_read_unlock();
++ return ret;
++}
++
++/**
++ * css_advance_task_iter - advance a task itererator to the next css_set
++ * @it: the iterator to advance
++ *
++ * Advance @it to the next css_set to walk.
++ */
++static void css_advance_task_iter(struct css_task_iter *it)
++{
++ struct list_head *l = it->cset_pos;
++ struct cgrp_cset_link *link;
++ struct css_set *cset;
++
++ /* Advance to the next non-empty css_set */
++ do {
++ l = l->next;
++ if (l == it->cset_head) {
++ it->cset_pos = NULL;
++ return;
++ }
++
++ if (it->ss) {
++ cset = container_of(l, struct css_set,
++ e_cset_node[it->ss->id]);
++ } else {
++ link = list_entry(l, struct cgrp_cset_link, cset_link);
++ cset = link->cset;
++ }
++ } while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks));
++
++ it->cset_pos = l;
++
++ if (!list_empty(&cset->tasks))
++ it->task_pos = cset->tasks.next;
++ else
++ it->task_pos = cset->mg_tasks.next;
++
++ it->tasks_head = &cset->tasks;
++ it->mg_tasks_head = &cset->mg_tasks;
++}
++
++/**
++ * css_task_iter_start - initiate task iteration
++ * @css: the css to walk tasks of
++ * @it: the task iterator to use
++ *
++ * Initiate iteration through the tasks of @css. The caller can call
++ * css_task_iter_next() to walk through the tasks until the function
++ * returns NULL. On completion of iteration, css_task_iter_end() must be
++ * called.
++ *
++ * Note that this function acquires a lock which is released when the
++ * iteration finishes. The caller can't sleep while iteration is in
++ * progress.
++ */
++void css_task_iter_start(struct cgroup_subsys_state *css,
++ struct css_task_iter *it)
++ __acquires(css_set_rwsem)
++{
++ /* no one should try to iterate before mounting cgroups */
++ WARN_ON_ONCE(!use_task_css_set_links);
++
++ down_read(&css_set_rwsem);
++
++ it->ss = css->ss;
++
++ if (it->ss)
++ it->cset_pos = &css->cgroup->e_csets[css->ss->id];
++ else
++ it->cset_pos = &css->cgroup->cset_links;
++
++ it->cset_head = it->cset_pos;
++
++ css_advance_task_iter(it);
++}
++
++/**
++ * css_task_iter_next - return the next task for the iterator
++ * @it: the task iterator being iterated
++ *
++ * The "next" function for task iteration. @it should have been
++ * initialized via css_task_iter_start(). Returns NULL when the iteration
++ * reaches the end.
++ */
++struct task_struct *css_task_iter_next(struct css_task_iter *it)
++{
++ struct task_struct *res;
++ struct list_head *l = it->task_pos;
++
++ /* If the iterator cg is NULL, we have no tasks */
++ if (!it->cset_pos)
++ return NULL;
++ res = list_entry(l, struct task_struct, cg_list);
++
++ /*
++ * Advance iterator to find next entry. cset->tasks is consumed
++ * first and then ->mg_tasks. After ->mg_tasks, we move onto the
++ * next cset.
++ */
++ l = l->next;
++
++ if (l == it->tasks_head)
++ l = it->mg_tasks_head->next;
++
++ if (l == it->mg_tasks_head)
++ css_advance_task_iter(it);
++ else
++ it->task_pos = l;
++
++ return res;
++}
++
++/**
++ * css_task_iter_end - finish task iteration
++ * @it: the task iterator to finish
++ *
++ * Finish task iteration started by css_task_iter_start().
++ */
++void css_task_iter_end(struct css_task_iter *it)
++ __releases(css_set_rwsem)
++{
++ up_read(&css_set_rwsem);
++}
++
++/**
++ * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
++ * @to: cgroup to which the tasks will be moved
++ * @from: cgroup in which the tasks currently reside
++ *
++ * Locking rules between cgroup_post_fork() and the migration path
++ * guarantee that, if a task is forking while being migrated, the new child
++ * is guaranteed to be either visible in the source cgroup after the
++ * parent's migration is complete or put into the target cgroup. No task
++ * can slip out of migration through forking.
++ */
++int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
++{
++ LIST_HEAD(preloaded_csets);
++ struct cgrp_cset_link *link;
++ struct css_task_iter it;
++ struct task_struct *task;
++ int ret;
++
++ mutex_lock(&cgroup_mutex);
++
++ /* all tasks in @from are being moved, all csets are source */
++ down_read(&css_set_rwsem);
++ list_for_each_entry(link, &from->cset_links, cset_link)
++ cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
++ up_read(&css_set_rwsem);
++
++ ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
++ if (ret)
++ goto out_err;
++
++ /*
++ * Migrate tasks one-by-one until @form is empty. This fails iff
++ * ->can_attach() fails.
++ */
++ do {
++ css_task_iter_start(&from->self, &it);
++ task = css_task_iter_next(&it);
++ if (task)
++ get_task_struct(task);
++ css_task_iter_end(&it);
++
++ if (task) {
++ ret = cgroup_migrate(to, task, false);
++ put_task_struct(task);
++ }
++ } while (task && !ret);
++out_err:
++ cgroup_migrate_finish(&preloaded_csets);
++ mutex_unlock(&cgroup_mutex);
++ return ret;
++}
++
++/*
++ * Stuff for reading the 'tasks'/'procs' files.
++ *
++ * Reading this file can return large amounts of data if a cgroup has
++ * *lots* of attached tasks. So it may need several calls to read(),
++ * but we cannot guarantee that the information we produce is correct
++ * unless we produce it entirely atomically.
++ *
++ */
++
++/* which pidlist file are we talking about? */
++enum cgroup_filetype {
++ CGROUP_FILE_PROCS,
++ CGROUP_FILE_TASKS,
++};
++
++/*
++ * A pidlist is a list of pids that virtually represents the contents of one
++ * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
++ * a pair (one each for procs, tasks) for each pid namespace that's relevant
++ * to the cgroup.
++ */
++struct cgroup_pidlist {
++ /*
++ * used to find which pidlist is wanted. doesn't change as long as
++ * this particular list stays in the list.
++ */
++ struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
++ /* array of xids */
++ pid_t *list;
++ /* how many elements the above list has */
++ int length;
++ /* each of these stored in a list by its cgroup */
++ struct list_head links;
++ /* pointer to the cgroup we belong to, for list removal purposes */
++ struct cgroup *owner;
++ /* for delayed destruction */
++ struct delayed_work destroy_dwork;
++};
++
++/*
++ * The following two functions "fix" the issue where there are more pids
++ * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
++ * TODO: replace with a kernel-wide solution to this problem
++ */
++#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
++static void *pidlist_allocate(int count)
++{
++ if (PIDLIST_TOO_LARGE(count))
++ return vmalloc(count * sizeof(pid_t));
++ else
++ return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
++}
++
++static void pidlist_free(void *p)
++{
++ kvfree(p);
++}
++
++/*
++ * Used to destroy all pidlists lingering waiting for destroy timer. None
++ * should be left afterwards.
++ */
++static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
++{
++ struct cgroup_pidlist *l, *tmp_l;
++
++ mutex_lock(&cgrp->pidlist_mutex);
++ list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
++ mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
++ mutex_unlock(&cgrp->pidlist_mutex);
++
++ flush_workqueue(cgroup_pidlist_destroy_wq);
++ BUG_ON(!list_empty(&cgrp->pidlists));
++}
++
++static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
++{
++ struct delayed_work *dwork = to_delayed_work(work);
++ struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
++ destroy_dwork);
++ struct cgroup_pidlist *tofree = NULL;
++
++ mutex_lock(&l->owner->pidlist_mutex);
++
++ /*
++ * Destroy iff we didn't get queued again. The state won't change
++ * as destroy_dwork can only be queued while locked.
++ */
++ if (!delayed_work_pending(dwork)) {
++ list_del(&l->links);
++ pidlist_free(l->list);
++ put_pid_ns(l->key.ns);
++ tofree = l;
++ }
++
++ mutex_unlock(&l->owner->pidlist_mutex);
++ kfree(tofree);
++}
++
++/*
++ * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
++ * Returns the number of unique elements.
++ */
++static int pidlist_uniq(pid_t *list, int length)
++{
++ int src, dest = 1;
++
++ /*
++ * we presume the 0th element is unique, so i starts at 1. trivial
++ * edge cases first; no work needs to be done for either
++ */
++ if (length == 0 || length == 1)
++ return length;
++ /* src and dest walk down the list; dest counts unique elements */
++ for (src = 1; src < length; src++) {
++ /* find next unique element */
++ while (list[src] == list[src-1]) {
++ src++;
++ if (src == length)
++ goto after;
++ }
++ /* dest always points to where the next unique element goes */
++ list[dest] = list[src];
++ dest++;
++ }
++after:
++ return dest;
++}
++
++/*
++ * The two pid files - task and cgroup.procs - guaranteed that the result
++ * is sorted, which forced this whole pidlist fiasco. As pid order is
++ * different per namespace, each namespace needs differently sorted list,
++ * making it impossible to use, for example, single rbtree of member tasks
++ * sorted by task pointer. As pidlists can be fairly large, allocating one
++ * per open file is dangerous, so cgroup had to implement shared pool of
++ * pidlists keyed by cgroup and namespace.
++ *
++ * All this extra complexity was caused by the original implementation
++ * committing to an entirely unnecessary property. In the long term, we
++ * want to do away with it. Explicitly scramble sort order if on the
++ * default hierarchy so that no such expectation exists in the new
++ * interface.
++ *
++ * Scrambling is done by swapping every two consecutive bits, which is
++ * non-identity one-to-one mapping which disturbs sort order sufficiently.
++ */
++static pid_t pid_fry(pid_t pid)
++{
++ unsigned a = pid & 0x55555555;
++ unsigned b = pid & 0xAAAAAAAA;
++
++ return (a << 1) | (b >> 1);
++}
++
++static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
++{
++ if (cgroup_on_dfl(cgrp))
++ return pid_fry(pid);
++ else
++ return pid;
++}
++
++static int cmppid(const void *a, const void *b)
++{
++ return *(pid_t *)a - *(pid_t *)b;
++}
++
++static int fried_cmppid(const void *a, const void *b)
++{
++ return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
++}
++
++static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
++ enum cgroup_filetype type)
++{
++ struct cgroup_pidlist *l;
++ /* don't need task_nsproxy() if we're looking at ourself */
++ struct pid_namespace *ns = task_active_pid_ns(current);
++
++ lockdep_assert_held(&cgrp->pidlist_mutex);
++
++ list_for_each_entry(l, &cgrp->pidlists, links)
++ if (l->key.type == type && l->key.ns == ns)
++ return l;
++ return NULL;
++}
++
++/*
++ * find the appropriate pidlist for our purpose (given procs vs tasks)
++ * returns with the lock on that pidlist already held, and takes care
++ * of the use count, or returns NULL with no locks held if we're out of
++ * memory.
++ */
++static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
++ enum cgroup_filetype type)
++{
++ struct cgroup_pidlist *l;
++
++ lockdep_assert_held(&cgrp->pidlist_mutex);
++
++ l = cgroup_pidlist_find(cgrp, type);
++ if (l)
++ return l;
++
++ /* entry not found; create a new one */
++ l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
++ if (!l)
++ return l;
++
++ INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
++ l->key.type = type;
++ /* don't need task_nsproxy() if we're looking at ourself */
++ l->key.ns = get_pid_ns(task_active_pid_ns(current));
++ l->owner = cgrp;
++ list_add(&l->links, &cgrp->pidlists);
++ return l;
++}
++
++/*
++ * Load a cgroup's pidarray with either procs' tgids or tasks' pids
++ */
++static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
++ struct cgroup_pidlist **lp)
++{
++ pid_t *array;
++ int length;
++ int pid, n = 0; /* used for populating the array */
++ struct css_task_iter it;
++ struct task_struct *tsk;
++ struct cgroup_pidlist *l;
++
++ lockdep_assert_held(&cgrp->pidlist_mutex);
++
++ /*
++ * If cgroup gets more users after we read count, we won't have
++ * enough space - tough. This race is indistinguishable to the
++ * caller from the case that the additional cgroup users didn't
++ * show up until sometime later on.
++ */
++ length = cgroup_task_count(cgrp);
++ array = pidlist_allocate(length);
++ if (!array)
++ return -ENOMEM;
++ /* now, populate the array */
++ css_task_iter_start(&cgrp->self, &it);
++ while ((tsk = css_task_iter_next(&it))) {
++ if (unlikely(n == length))
++ break;
++ /* get tgid or pid for procs or tasks file respectively */
++ if (type == CGROUP_FILE_PROCS)
++ pid = task_tgid_vnr(tsk);
++ else
++ pid = task_pid_vnr(tsk);
++ if (pid > 0) /* make sure to only use valid results */
++ array[n++] = pid;
++ }
++ css_task_iter_end(&it);
++ length = n;
++ /* now sort & (if procs) strip out duplicates */
++ if (cgroup_on_dfl(cgrp))
++ sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
++ else
++ sort(array, length, sizeof(pid_t), cmppid, NULL);
++ if (type == CGROUP_FILE_PROCS)
++ length = pidlist_uniq(array, length);
++
++ l = cgroup_pidlist_find_create(cgrp, type);
++ if (!l) {
++ pidlist_free(array);
++ return -ENOMEM;
++ }
++
++ /* store array, freeing old if necessary */
++ pidlist_free(l->list);
++ l->list = array;
++ l->length = length;
++ *lp = l;
++ return 0;
++}
++
++/**
++ * cgroupstats_build - build and fill cgroupstats
++ * @stats: cgroupstats to fill information into
++ * @dentry: A dentry entry belonging to the cgroup for which stats have
++ * been requested.
++ *
++ * Build and fill cgroupstats so that taskstats can export it to user
++ * space.
++ */
++int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
++{
++ struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
++ struct cgroup *cgrp;
++ struct css_task_iter it;
++ struct task_struct *tsk;
++
++ /* it should be kernfs_node belonging to cgroupfs and is a directory */
++ if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
++ kernfs_type(kn) != KERNFS_DIR)
++ return -EINVAL;
++
++ mutex_lock(&cgroup_mutex);
++
++ /*
++ * We aren't being called from kernfs and there's no guarantee on
++ * @kn->priv's validity. For this and css_tryget_online_from_dir(),
++ * @kn->priv is RCU safe. Let's do the RCU dancing.
++ */
++ rcu_read_lock();
++ cgrp = rcu_dereference(kn->priv);
++ if (!cgrp || cgroup_is_dead(cgrp)) {
++ rcu_read_unlock();
++ mutex_unlock(&cgroup_mutex);
++ return -ENOENT;
++ }
++ rcu_read_unlock();
++
++ css_task_iter_start(&cgrp->self, &it);
++ while ((tsk = css_task_iter_next(&it))) {
++ switch (tsk->state) {
++ case TASK_RUNNING:
++ stats->nr_running++;
++ break;
++ case TASK_INTERRUPTIBLE:
++ stats->nr_sleeping++;
++ break;
++ case TASK_UNINTERRUPTIBLE:
++ stats->nr_uninterruptible++;
++ break;
++ case TASK_STOPPED:
++ stats->nr_stopped++;
++ break;
++ default:
++ if (delayacct_is_task_waiting_on_io(tsk))
++ stats->nr_io_wait++;
++ break;
++ }
++ }
++ css_task_iter_end(&it);
++
++ mutex_unlock(&cgroup_mutex);
++ return 0;
++}
++
++
++/*
++ * seq_file methods for the tasks/procs files. The seq_file position is the
++ * next pid to display; the seq_file iterator is a pointer to the pid
++ * in the cgroup->l->list array.
++ */
++
++static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
++{
++ /*
++ * Initially we receive a position value that corresponds to
++ * one more than the last pid shown (or 0 on the first call or
++ * after a seek to the start). Use a binary-search to find the
++ * next pid to display, if any
++ */
++ struct kernfs_open_file *of = s->private;
++ struct cgroup *cgrp = seq_css(s)->cgroup;
++ struct cgroup_pidlist *l;
++ enum cgroup_filetype type = seq_cft(s)->private;
++ int index = 0, pid = *pos;
++ int *iter, ret;
++
++ mutex_lock(&cgrp->pidlist_mutex);
++
++ /*
++ * !NULL @of->priv indicates that this isn't the first start()
++ * after open. If the matching pidlist is around, we can use that.
++ * Look for it. Note that @of->priv can't be used directly. It
++ * could already have been destroyed.
++ */
++ if (of->priv)
++ of->priv = cgroup_pidlist_find(cgrp, type);
++
++ /*
++ * Either this is the first start() after open or the matching
++ * pidlist has been destroyed inbetween. Create a new one.
++ */
++ if (!of->priv) {
++ ret = pidlist_array_load(cgrp, type,
++ (struct cgroup_pidlist **)&of->priv);
++ if (ret)
++ return ERR_PTR(ret);
++ }
++ l = of->priv;
++
++ if (pid) {
++ int end = l->length;
++
++ while (index < end) {
++ int mid = (index + end) / 2;
++ if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
++ index = mid;
++ break;
++ } else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
++ index = mid + 1;
++ else
++ end = mid;
++ }
++ }
++ /* If we're off the end of the array, we're done */
++ if (index >= l->length)
++ return NULL;
++ /* Update the abstract position to be the actual pid that we found */
++ iter = l->list + index;
++ *pos = cgroup_pid_fry(cgrp, *iter);
++ return iter;
++}
++
++static void cgroup_pidlist_stop(struct seq_file *s, void *v)
++{
++ struct kernfs_open_file *of = s->private;
++ struct cgroup_pidlist *l = of->priv;
++
++ if (l)
++ mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
++ CGROUP_PIDLIST_DESTROY_DELAY);
++ mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
++}
++
++static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
++{
++ struct kernfs_open_file *of = s->private;
++ struct cgroup_pidlist *l = of->priv;
++ pid_t *p = v;
++ pid_t *end = l->list + l->length;
++ /*
++ * Advance to the next pid in the array. If this goes off the
++ * end, we're done
++ */
++ p++;
++ if (p >= end) {
++ return NULL;
++ } else {
++ *pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
++ return p;
++ }
++}
++
++static int cgroup_pidlist_show(struct seq_file *s, void *v)
++{
++ seq_printf(s, "%d\n", *(int *)v);
++
++ return 0;
++}
++
++static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
++ struct cftype *cft)
++{
++ return notify_on_release(css->cgroup);
++}
++
++static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
++ struct cftype *cft, u64 val)
++{
++ if (val)
++ set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
++ else
++ clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
++ return 0;
++}
++
++static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
++ struct cftype *cft)
++{
++ return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
++}
++
++static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
++ struct cftype *cft, u64 val)
++{
++ if (val)
++ set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
++ else
++ clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
++ return 0;
++}
++
++/* cgroup core interface files for the default hierarchy */
++static struct cftype cgroup_dfl_base_files[] = {
++ {
++ .name = "cgroup.procs",
++ .seq_start = cgroup_pidlist_start,
++ .seq_next = cgroup_pidlist_next,
++ .seq_stop = cgroup_pidlist_stop,
++ .seq_show = cgroup_pidlist_show,
++ .private = CGROUP_FILE_PROCS,
++ .write = cgroup_procs_write,
++ .mode = S_IRUGO | S_IWUSR,
++ },
++ {
++ .name = "cgroup.controllers",
++ .flags = CFTYPE_ONLY_ON_ROOT,
++ .seq_show = cgroup_root_controllers_show,
++ },
++ {
++ .name = "cgroup.controllers",
++ .flags = CFTYPE_NOT_ON_ROOT,
++ .seq_show = cgroup_controllers_show,
++ },
++ {
++ .name = "cgroup.subtree_control",
++ .seq_show = cgroup_subtree_control_show,
++ .write = cgroup_subtree_control_write,
++ },
++ {
++ .name = "cgroup.populated",
++ .flags = CFTYPE_NOT_ON_ROOT,
++ .seq_show = cgroup_populated_show,
++ },
++ { } /* terminate */
++};
++
++/* cgroup core interface files for the legacy hierarchies */
++static struct cftype cgroup_legacy_base_files[] = {
++ {
++ .name = "cgroup.procs",
++ .seq_start = cgroup_pidlist_start,
++ .seq_next = cgroup_pidlist_next,
++ .seq_stop = cgroup_pidlist_stop,
++ .seq_show = cgroup_pidlist_show,
++ .private = CGROUP_FILE_PROCS,
++ .write = cgroup_procs_write,
++ .mode = S_IRUGO | S_IWUSR,
++ },
++ {
++ .name = "cgroup.clone_children",
++ .read_u64 = cgroup_clone_children_read,
++ .write_u64 = cgroup_clone_children_write,
++ },
++ {
++ .name = "cgroup.sane_behavior",
++ .flags = CFTYPE_ONLY_ON_ROOT,
++ .seq_show = cgroup_sane_behavior_show,
++ },
++ {
++ .name = "tasks",
++ .seq_start = cgroup_pidlist_start,
++ .seq_next = cgroup_pidlist_next,
++ .seq_stop = cgroup_pidlist_stop,
++ .seq_show = cgroup_pidlist_show,
++ .private = CGROUP_FILE_TASKS,
++ .write = cgroup_tasks_write,
++ .mode = S_IRUGO | S_IWUSR,
++ },
++ {
++ .name = "notify_on_release",
++ .read_u64 = cgroup_read_notify_on_release,
++ .write_u64 = cgroup_write_notify_on_release,
++ },
++ {
++ .name = "release_agent",
++ .flags = CFTYPE_ONLY_ON_ROOT,
++ .seq_show = cgroup_release_agent_show,
++ .write = cgroup_release_agent_write,
++ .max_write_len = PATH_MAX - 1,
++ },
++ { } /* terminate */
++};
++
++/**
++ * cgroup_populate_dir - create subsys files in a cgroup directory
++ * @cgrp: target cgroup
++ * @subsys_mask: mask of the subsystem ids whose files should be added
++ *
++ * On failure, no file is added.
++ */
++static int cgroup_populate_dir(struct cgroup *cgrp, unsigned int subsys_mask)
++{
++ struct cgroup_subsys *ss;
++ int i, ret = 0;
++
++ /* process cftsets of each subsystem */
++ for_each_subsys(ss, i) {
++ struct cftype *cfts;
++
++ if (!(subsys_mask & (1 << i)))
++ continue;
++
++ list_for_each_entry(cfts, &ss->cfts, node) {
++ ret = cgroup_addrm_files(cgrp, cfts, true);
++ if (ret < 0)
++ goto err;
++ }
++ }
++ return 0;
++err:
++ cgroup_clear_dir(cgrp, subsys_mask);
++ return ret;
++}
++
++/*
++ * css destruction is four-stage process.
++ *
++ * 1. Destruction starts. Killing of the percpu_ref is initiated.
++ * Implemented in kill_css().
++ *
++ * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
++ * and thus css_tryget_online() is guaranteed to fail, the css can be
++ * offlined by invoking offline_css(). After offlining, the base ref is
++ * put. Implemented in css_killed_work_fn().
++ *
++ * 3. When the percpu_ref reaches zero, the only possible remaining
++ * accessors are inside RCU read sections. css_release() schedules the
++ * RCU callback.
++ *
++ * 4. After the grace period, the css can be freed. Implemented in
++ * css_free_work_fn().
++ *
++ * It is actually hairier because both step 2 and 4 require process context
++ * and thus involve punting to css->destroy_work adding two additional
++ * steps to the already complex sequence.
++ */
++static void css_free_work_fn(struct work_struct *work)
++{
++ struct cgroup_subsys_state *css =
++ container_of(work, struct cgroup_subsys_state, destroy_work);
++ struct cgroup_subsys *ss = css->ss;
++ struct cgroup *cgrp = css->cgroup;
++
++ percpu_ref_exit(&css->refcnt);
++
++ if (ss) {
++ /* css free path */
++ int id = css->id;
++
++ if (css->parent)
++ css_put(css->parent);
++
++ ss->css_free(css);
++ cgroup_idr_remove(&ss->css_idr, id);
++ cgroup_put(cgrp);
++ } else {
++ /* cgroup free path */
++ atomic_dec(&cgrp->root->nr_cgrps);
++ cgroup_pidlist_destroy_all(cgrp);
++ cancel_work_sync(&cgrp->release_agent_work);
++
++ if (cgroup_parent(cgrp)) {
++ /*
++ * We get a ref to the parent, and put the ref when
++ * this cgroup is being freed, so it's guaranteed
++ * that the parent won't be destroyed before its
++ * children.
++ */
++ cgroup_put(cgroup_parent(cgrp));
++ kernfs_put(cgrp->kn);
++ kfree(cgrp);
++ } else {
++ /*
++ * This is root cgroup's refcnt reaching zero,
++ * which indicates that the root should be
++ * released.
++ */
++ cgroup_destroy_root(cgrp->root);
++ }
++ }
++}
++
++static void css_free_rcu_fn(struct rcu_head *rcu_head)
++{
++ struct cgroup_subsys_state *css =
++ container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
++
++ INIT_WORK(&css->destroy_work, css_free_work_fn);
++ queue_work(cgroup_destroy_wq, &css->destroy_work);
++}
++
++static void css_release_work_fn(struct work_struct *work)
++{
++ struct cgroup_subsys_state *css =
++ container_of(work, struct cgroup_subsys_state, destroy_work);
++ struct cgroup_subsys *ss = css->ss;
++ struct cgroup *cgrp = css->cgroup;
++
++ mutex_lock(&cgroup_mutex);
++
++ css->flags |= CSS_RELEASED;
++ list_del_rcu(&css->sibling);
++
++ if (ss) {
++ /* css release path */
++ cgroup_idr_replace(&ss->css_idr, NULL, css->id);
++ if (ss->css_released)
++ ss->css_released(css);
++ } else {
++ /* cgroup release path */
++ cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
++ cgrp->id = -1;
++
++ /*
++ * There are two control paths which try to determine
++ * cgroup from dentry without going through kernfs -
++ * cgroupstats_build() and css_tryget_online_from_dir().
++ * Those are supported by RCU protecting clearing of
++ * cgrp->kn->priv backpointer.
++ */
++ RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
++ }
++
++ mutex_unlock(&cgroup_mutex);
++
++ call_rcu(&css->rcu_head, css_free_rcu_fn);
++}
++
++static void css_release(struct percpu_ref *ref)
++{
++ struct cgroup_subsys_state *css =
++ container_of(ref, struct cgroup_subsys_state, refcnt);
++
++ INIT_WORK(&css->destroy_work, css_release_work_fn);
++ queue_work(cgroup_destroy_wq, &css->destroy_work);
++}
++
++static void init_and_link_css(struct cgroup_subsys_state *css,
++ struct cgroup_subsys *ss, struct cgroup *cgrp)
++{
++ lockdep_assert_held(&cgroup_mutex);
++
++ cgroup_get(cgrp);
++
++ memset(css, 0, sizeof(*css));
++ css->cgroup = cgrp;
++ css->ss = ss;
++ INIT_LIST_HEAD(&css->sibling);
++ INIT_LIST_HEAD(&css->children);
++ css->serial_nr = css_serial_nr_next++;
++
++ if (cgroup_parent(cgrp)) {
++ css->parent = cgroup_css(cgroup_parent(cgrp), ss);
++ css_get(css->parent);
++ }
++
++ BUG_ON(cgroup_css(cgrp, ss));
++}
++
++/* invoke ->css_online() on a new CSS and mark it online if successful */
++static int online_css(struct cgroup_subsys_state *css)
++{
++ struct cgroup_subsys *ss = css->ss;
++ int ret = 0;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ if (ss->css_online)
++ ret = ss->css_online(css);
++ if (!ret) {
++ css->flags |= CSS_ONLINE;
++ rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
++ }
++ return ret;
++}
++
++/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
++static void offline_css(struct cgroup_subsys_state *css)
++{
++ struct cgroup_subsys *ss = css->ss;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ if (!(css->flags & CSS_ONLINE))
++ return;
++
++ if (ss->css_offline)
++ ss->css_offline(css);
++
++ css->flags &= ~CSS_ONLINE;
++ RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
++
++ wake_up_all(&css->cgroup->offline_waitq);
++}
++
++/**
++ * create_css - create a cgroup_subsys_state
++ * @cgrp: the cgroup new css will be associated with
++ * @ss: the subsys of new css
++ * @visible: whether to create control knobs for the new css or not
++ *
++ * Create a new css associated with @cgrp - @ss pair. On success, the new
++ * css is online and installed in @cgrp with all interface files created if
++ * @visible. Returns 0 on success, -errno on failure.
++ */
++static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
++ bool visible)
++{
++ struct cgroup *parent = cgroup_parent(cgrp);
++ struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
++ struct cgroup_subsys_state *css;
++ int err;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ css = ss->css_alloc(parent_css);
++ if (IS_ERR(css))
++ return PTR_ERR(css);
++
++ init_and_link_css(css, ss, cgrp);
++
++ err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
++ if (err)
++ goto err_free_css;
++
++ err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
++ if (err < 0)
++ goto err_free_percpu_ref;
++ css->id = err;
++
++ if (visible) {
++ err = cgroup_populate_dir(cgrp, 1 << ss->id);
++ if (err)
++ goto err_free_id;
++ }
++
++ /* @css is ready to be brought online now, make it visible */
++ list_add_tail_rcu(&css->sibling, &parent_css->children);
++ cgroup_idr_replace(&ss->css_idr, css, css->id);
++
++ err = online_css(css);
++ if (err)
++ goto err_list_del;
++
++ if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
++ cgroup_parent(parent)) {
++ pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
++ current->comm, current->pid, ss->name);
++ if (!strcmp(ss->name, "memory"))
++ pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
++ ss->warned_broken_hierarchy = true;
++ }
++
++ return 0;
++
++err_list_del:
++ list_del_rcu(&css->sibling);
++ cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
++err_free_id:
++ cgroup_idr_remove(&ss->css_idr, css->id);
++err_free_percpu_ref:
++ percpu_ref_exit(&css->refcnt);
++err_free_css:
++ call_rcu(&css->rcu_head, css_free_rcu_fn);
++ return err;
++}
++
++static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
++ umode_t mode)
++{
++ struct cgroup *parent, *cgrp;
++ struct cgroup_root *root;
++ struct cgroup_subsys *ss;
++ struct kernfs_node *kn;
++ struct cftype *base_files;
++ int ssid, ret;
++
++ /* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable.
++ */
++ if (strchr(name, '\n'))
++ return -EINVAL;
++
++ parent = cgroup_kn_lock_live(parent_kn);
++ if (!parent)
++ return -ENODEV;
++ root = parent->root;
++
++ /* allocate the cgroup and its ID, 0 is reserved for the root */
++ cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
++ if (!cgrp) {
++ ret = -ENOMEM;
++ goto out_unlock;
++ }
++
++ ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
++ if (ret)
++ goto out_free_cgrp;
++
++ /*
++ * Temporarily set the pointer to NULL, so idr_find() won't return
++ * a half-baked cgroup.
++ */
++ cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_NOWAIT);
++ if (cgrp->id < 0) {
++ ret = -ENOMEM;
++ goto out_cancel_ref;
++ }
++
++ init_cgroup_housekeeping(cgrp);
++
++ cgrp->self.parent = &parent->self;
++ cgrp->root = root;
++
++ if (notify_on_release(parent))
++ set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
++
++ if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
++ set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
++
++ /* create the directory */
++ kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
++ if (IS_ERR(kn)) {
++ ret = PTR_ERR(kn);
++ goto out_free_id;
++ }
++ cgrp->kn = kn;
++
++ /*
++ * This extra ref will be put in cgroup_free_fn() and guarantees
++ * that @cgrp->kn is always accessible.
++ */
++ kernfs_get(kn);
++
++ cgrp->self.serial_nr = css_serial_nr_next++;
++
++ /* allocation complete, commit to creation */
++ list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
++ atomic_inc(&root->nr_cgrps);
++ cgroup_get(parent);
++
++ /*
++ * @cgrp is now fully operational. If something fails after this
++ * point, it'll be released via the normal destruction path.
++ */
++ cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
++
++ ret = cgroup_kn_set_ugid(kn);
++ if (ret)
++ goto out_destroy;
++
++ if (cgroup_on_dfl(cgrp))
++ base_files = cgroup_dfl_base_files;
++ else
++ base_files = cgroup_legacy_base_files;
++
++ ret = cgroup_addrm_files(cgrp, base_files, true);
++ if (ret)
++ goto out_destroy;
++
++ /* let's create and online css's */
++ for_each_subsys(ss, ssid) {
++ if (parent->child_subsys_mask & (1 << ssid)) {
++ ret = create_css(cgrp, ss,
++ parent->subtree_control & (1 << ssid));
++ if (ret)
++ goto out_destroy;
++ }
++ }
++
++ /*
++ * On the default hierarchy, a child doesn't automatically inherit
++ * subtree_control from the parent. Each is configured manually.
++ */
++ if (!cgroup_on_dfl(cgrp)) {
++ cgrp->subtree_control = parent->subtree_control;
++ cgroup_refresh_child_subsys_mask(cgrp);
++ }
++
++ kernfs_activate(kn);
++
++ ret = 0;
++ goto out_unlock;
++
++out_free_id:
++ cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
++out_cancel_ref:
++ percpu_ref_exit(&cgrp->self.refcnt);
++out_free_cgrp:
++ kfree(cgrp);
++out_unlock:
++ cgroup_kn_unlock(parent_kn);
++ return ret;
++
++out_destroy:
++ cgroup_destroy_locked(cgrp);
++ goto out_unlock;
++}
++
++/*
++ * This is called when the refcnt of a css is confirmed to be killed.
++ * css_tryget_online() is now guaranteed to fail. Tell the subsystem to
++ * initate destruction and put the css ref from kill_css().
++ */
++static void css_killed_work_fn(struct work_struct *work)
++{
++ struct cgroup_subsys_state *css =
++ container_of(work, struct cgroup_subsys_state, destroy_work);
++
++ mutex_lock(&cgroup_mutex);
++ offline_css(css);
++ mutex_unlock(&cgroup_mutex);
++
++ css_put(css);
++}
++
++/* css kill confirmation processing requires process context, bounce */
++static void css_killed_ref_fn(struct percpu_ref *ref)
++{
++ struct cgroup_subsys_state *css =
++ container_of(ref, struct cgroup_subsys_state, refcnt);
++
++ INIT_WORK(&css->destroy_work, css_killed_work_fn);
++ queue_work(cgroup_destroy_wq, &css->destroy_work);
++}
++
++/**
++ * kill_css - destroy a css
++ * @css: css to destroy
++ *
++ * This function initiates destruction of @css by removing cgroup interface
++ * files and putting its base reference. ->css_offline() will be invoked
++ * asynchronously once css_tryget_online() is guaranteed to fail and when
++ * the reference count reaches zero, @css will be released.
++ */
++static void kill_css(struct cgroup_subsys_state *css)
++{
++ lockdep_assert_held(&cgroup_mutex);
++
++ /*
++ * This must happen before css is disassociated with its cgroup.
++ * See seq_css() for details.
++ */
++ cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
++
++ /*
++ * Killing would put the base ref, but we need to keep it alive
++ * until after ->css_offline().
++ */
++ css_get(css);
++
++ /*
++ * cgroup core guarantees that, by the time ->css_offline() is
++ * invoked, no new css reference will be given out via
++ * css_tryget_online(). We can't simply call percpu_ref_kill() and
++ * proceed to offlining css's because percpu_ref_kill() doesn't
++ * guarantee that the ref is seen as killed on all CPUs on return.
++ *
++ * Use percpu_ref_kill_and_confirm() to get notifications as each
++ * css is confirmed to be seen as killed on all CPUs.
++ */
++ percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
++}
++
++/**
++ * cgroup_destroy_locked - the first stage of cgroup destruction
++ * @cgrp: cgroup to be destroyed
++ *
++ * css's make use of percpu refcnts whose killing latency shouldn't be
++ * exposed to userland and are RCU protected. Also, cgroup core needs to
++ * guarantee that css_tryget_online() won't succeed by the time
++ * ->css_offline() is invoked. To satisfy all the requirements,
++ * destruction is implemented in the following two steps.
++ *
++ * s1. Verify @cgrp can be destroyed and mark it dying. Remove all
++ * userland visible parts and start killing the percpu refcnts of
++ * css's. Set up so that the next stage will be kicked off once all
++ * the percpu refcnts are confirmed to be killed.
++ *
++ * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
++ * rest of destruction. Once all cgroup references are gone, the
++ * cgroup is RCU-freed.
++ *
++ * This function implements s1. After this step, @cgrp is gone as far as
++ * the userland is concerned and a new cgroup with the same name may be
++ * created. As cgroup doesn't care about the names internally, this
++ * doesn't cause any problem.
++ */
++static int cgroup_destroy_locked(struct cgroup *cgrp)
++ __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
++{
++ struct cgroup_subsys_state *css;
++ bool empty;
++ int ssid;
++
++ lockdep_assert_held(&cgroup_mutex);
++
++ /*
++ * css_set_rwsem synchronizes access to ->cset_links and prevents
++ * @cgrp from being removed while put_css_set() is in progress.
++ */
++ down_read(&css_set_rwsem);
++ empty = list_empty(&cgrp->cset_links);
++ up_read(&css_set_rwsem);
++ if (!empty)
++ return -EBUSY;
++
++ /*
++ * Make sure there's no live children. We can't test emptiness of
++ * ->self.children as dead children linger on it while being
++ * drained; otherwise, "rmdir parent/child parent" may fail.
++ */
++ if (css_has_online_children(&cgrp->self))
++ return -EBUSY;
++
++ /*
++ * Mark @cgrp dead. This prevents further task migration and child
++ * creation by disabling cgroup_lock_live_group().
++ */
++ cgrp->self.flags &= ~CSS_ONLINE;
++
++ /* initiate massacre of all css's */
++ for_each_css(css, ssid, cgrp)
++ kill_css(css);
++
++ /*
++ * Remove @cgrp directory along with the base files. @cgrp has an
++ * extra ref on its kn.
++ */
++ kernfs_remove(cgrp->kn);
++
++ check_for_release(cgroup_parent(cgrp));
++
++ /* put the base reference */
++ percpu_ref_kill(&cgrp->self.refcnt);
++
++ return 0;
++};
++
++static int cgroup_rmdir(struct kernfs_node *kn)
++{
++ struct cgroup *cgrp;
++ int ret = 0;
++
++ cgrp = cgroup_kn_lock_live(kn);
++ if (!cgrp)
++ return 0;
++
++ ret = cgroup_destroy_locked(cgrp);
++
++ cgroup_kn_unlock(kn);
++ return ret;
++}
++
++static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
++ .remount_fs = cgroup_remount,
++ .show_options = cgroup_show_options,
++ .mkdir = cgroup_mkdir,
++ .rmdir = cgroup_rmdir,
++ .rename = cgroup_rename,
++};
++
++static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
++{
++ struct cgroup_subsys_state *css;
++
++ printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
++
++ mutex_lock(&cgroup_mutex);
++
++ idr_init(&ss->css_idr);
++ INIT_LIST_HEAD(&ss->cfts);
++
++ /* Create the root cgroup state for this subsystem */
++ ss->root = &cgrp_dfl_root;
++ css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
++ /* We don't handle early failures gracefully */
++ BUG_ON(IS_ERR(css));
++ init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
++
++ /*
++ * Root csses are never destroyed and we can't initialize
++ * percpu_ref during early init. Disable refcnting.
++ */
++ css->flags |= CSS_NO_REF;
++
++ if (early) {
++ /* allocation can't be done safely during early init */
++ css->id = 1;
++ } else {
++ css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
++ BUG_ON(css->id < 0);
++ }
++
++ /* Update the init_css_set to contain a subsys
++ * pointer to this state - since the subsystem is
++ * newly registered, all tasks and hence the
++ * init_css_set is in the subsystem's root cgroup. */
++ init_css_set.subsys[ss->id] = css;
++
++ need_forkexit_callback |= ss->fork || ss->exit;
++
++ /* At system boot, before all subsystems have been
++ * registered, no tasks have been forked, so we don't
++ * need to invoke fork callbacks here. */
++ BUG_ON(!list_empty(&init_task.tasks));
++
++ BUG_ON(online_css(css));
++
++ mutex_unlock(&cgroup_mutex);
++}
++
++/**
++ * cgroup_init_early - cgroup initialization at system boot
++ *
++ * Initialize cgroups at system boot, and initialize any
++ * subsystems that request early init.
++ */
++int __init cgroup_init_early(void)
++{
++ static struct cgroup_sb_opts __initdata opts;
++ struct cgroup_subsys *ss;
++ int i;
++
++ init_cgroup_root(&cgrp_dfl_root, &opts);
++ cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
++
++ RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
++
++ for_each_subsys(ss, i) {
++ WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
++ "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
++ i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
++ ss->id, ss->name);
++ WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
++ "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
++
++ ss->id = i;
++ ss->name = cgroup_subsys_name[i];
++
++ if (ss->early_init)
++ cgroup_init_subsys(ss, true);
++ }
++ return 0;
++}
++
++/**
++ * cgroup_init - cgroup initialization
++ *
++ * Register cgroup filesystem and /proc file, and initialize
++ * any subsystems that didn't request early init.
++ */
++int __init cgroup_init(void)
++{
++ struct cgroup_subsys *ss;
++ unsigned long key;
++ int ssid, err;
++
++ BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
++ BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
++
++ mutex_lock(&cgroup_mutex);
++
++ /* Add init_css_set to the hash table */
++ key = css_set_hash(init_css_set.subsys);
++ hash_add(css_set_table, &init_css_set.hlist, key);
++
++ BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
++
++ mutex_unlock(&cgroup_mutex);
++
++ for_each_subsys(ss, ssid) {
++ if (ss->early_init) {
++ struct cgroup_subsys_state *css =
++ init_css_set.subsys[ss->id];
++
++ css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
++ GFP_KERNEL);
++ BUG_ON(css->id < 0);
++ } else {
++ cgroup_init_subsys(ss, false);
++ }
++
++ list_add_tail(&init_css_set.e_cset_node[ssid],
++ &cgrp_dfl_root.cgrp.e_csets[ssid]);
++
++ /*
++ * Setting dfl_root subsys_mask needs to consider the
++ * disabled flag and cftype registration needs kmalloc,
++ * both of which aren't available during early_init.
++ */
++ if (ss->disabled)
++ continue;
++
++ cgrp_dfl_root.subsys_mask |= 1 << ss->id;
++
++ if (cgroup_legacy_files_on_dfl && !ss->dfl_cftypes)
++ ss->dfl_cftypes = ss->legacy_cftypes;
++
++ if (!ss->dfl_cftypes)
++ cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;
++
++ if (ss->dfl_cftypes == ss->legacy_cftypes) {
++ WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
++ } else {
++ WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
++ WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
++ }
++
++ if (ss->bind)
++ ss->bind(init_css_set.subsys[ssid]);
++ }
++
++ err = sysfs_create_mount_point(fs_kobj, "cgroup");
++ if (err)
++ return err;
++
++ err = register_filesystem(&cgroup_fs_type);
++ if (err < 0) {
++ sysfs_remove_mount_point(fs_kobj, "cgroup");
++ return err;
++ }
++
++ proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
++ return 0;
++}
++
++static int __init cgroup_wq_init(void)
++{
++ /*
++ * There isn't much point in executing destruction path in
++ * parallel. Good chunk is serialized with cgroup_mutex anyway.
++ * Use 1 for @max_active.
++ *
++ * We would prefer to do this in cgroup_init() above, but that
++ * is called before init_workqueues(): so leave this until after.
++ */
++ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
++ BUG_ON(!cgroup_destroy_wq);
++
++ /*
++ * Used to destroy pidlists and separate to serve as flush domain.
++ * Cap @max_active to 1 too.
++ */
++ cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
++ 0, 1);
++ BUG_ON(!cgroup_pidlist_destroy_wq);
++
++ return 0;
++}
++core_initcall(cgroup_wq_init);
++
++/*
++ * proc_cgroup_show()
++ * - Print task's cgroup paths into seq_file, one line for each hierarchy
++ * - Used for /proc/<pid>/cgroup.
++ */
++int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
++ struct pid *pid, struct task_struct *tsk)
++{
++ char *buf, *path;
++ int retval;
++ struct cgroup_root *root;
++
++ retval = -ENOMEM;
++ buf = kmalloc(PATH_MAX, GFP_KERNEL);
++ if (!buf)
++ goto out;
++
++ mutex_lock(&cgroup_mutex);
++ down_read(&css_set_rwsem);
++
++ for_each_root(root) {
++ struct cgroup_subsys *ss;
++ struct cgroup *cgrp;
++ int ssid, count = 0;
++
++ if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible)
++ continue;
++
++ seq_printf(m, "%d:", root->hierarchy_id);
++ for_each_subsys(ss, ssid)
++ if (root->subsys_mask & (1 << ssid))
++ seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
++ if (strlen(root->name))
++ seq_printf(m, "%sname=%s", count ? "," : "",
++ root->name);
++ seq_putc(m, ':');
++ cgrp = task_cgroup_from_root(tsk, root);
++ path = cgroup_path(cgrp, buf, PATH_MAX);
++ if (!path) {
++ retval = -ENAMETOOLONG;
++ goto out_unlock;
++ }
++ seq_puts(m, path);
++ seq_putc(m, '\n');
++ }
++
++ retval = 0;
++out_unlock:
++ up_read(&css_set_rwsem);
++ mutex_unlock(&cgroup_mutex);
++ kfree(buf);
++out:
++ return retval;
++}
++
++/* Display information about each subsystem and each hierarchy */
++static int proc_cgroupstats_show(struct seq_file *m, void *v)
++{
++ struct cgroup_subsys *ss;
++ int i;
++
++ seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
++ /*
++ * ideally we don't want subsystems moving around while we do this.
++ * cgroup_mutex is also necessary to guarantee an atomic snapshot of
++ * subsys/hierarchy state.
++ */
++ mutex_lock(&cgroup_mutex);
++
++ for_each_subsys(ss, i)
++ seq_printf(m, "%s\t%d\t%d\t%d\n",
++ ss->name, ss->root->hierarchy_id,
++ atomic_read(&ss->root->nr_cgrps), !ss->disabled);
++
++ mutex_unlock(&cgroup_mutex);
++ return 0;
++}
++
++static int cgroupstats_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, proc_cgroupstats_show, NULL);
++}
++
++static const struct file_operations proc_cgroupstats_operations = {
++ .open = cgroupstats_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++/**
++ * cgroup_fork - initialize cgroup related fields during copy_process()
++ * @child: pointer to task_struct of forking parent process.
++ *
++ * A task is associated with the init_css_set until cgroup_post_fork()
++ * attaches it to the parent's css_set. Empty cg_list indicates that
++ * @child isn't holding reference to its css_set.
++ */
++void cgroup_fork(struct task_struct *child)
++{
++ RCU_INIT_POINTER(child->cgroups, &init_css_set);
++ INIT_LIST_HEAD(&child->cg_list);
++}
++
++/**
++ * cgroup_post_fork - called on a new task after adding it to the task list
++ * @child: the task in question
++ *
++ * Adds the task to the list running through its css_set if necessary and
++ * call the subsystem fork() callbacks. Has to be after the task is
++ * visible on the task list in case we race with the first call to
++ * cgroup_task_iter_start() - to guarantee that the new task ends up on its
++ * list.
++ */
++void cgroup_post_fork(struct task_struct *child)
++{
++ struct cgroup_subsys *ss;
++ int i;
++
++ /*
++ * This may race against cgroup_enable_task_cg_lists(). As that
++ * function sets use_task_css_set_links before grabbing
++ * tasklist_lock and we just went through tasklist_lock to add
++ * @child, it's guaranteed that either we see the set
++ * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
++ * @child during its iteration.
++ *
++ * If we won the race, @child is associated with %current's
++ * css_set. Grabbing css_set_rwsem guarantees both that the
++ * association is stable, and, on completion of the parent's
++ * migration, @child is visible in the source of migration or
++ * already in the destination cgroup. This guarantee is necessary
++ * when implementing operations which need to migrate all tasks of
++ * a cgroup to another.
++ *
++ * Note that if we lose to cgroup_enable_task_cg_lists(), @child
++ * will remain in init_css_set. This is safe because all tasks are
++ * in the init_css_set before cg_links is enabled and there's no
++ * operation which transfers all tasks out of init_css_set.
++ */
++ if (use_task_css_set_links) {
++ struct css_set *cset;
++
++ down_write(&css_set_rwsem);
++ cset = task_css_set(current);
++ if (list_empty(&child->cg_list)) {
++ rcu_assign_pointer(child->cgroups, cset);
++ list_add(&child->cg_list, &cset->tasks);
++ get_css_set(cset);
++ }
++ up_write(&css_set_rwsem);
++ }
++
++ /*
++ * Call ss->fork(). This must happen after @child is linked on
++ * css_set; otherwise, @child might change state between ->fork()
++ * and addition to css_set.
++ */
++ if (need_forkexit_callback) {
++ for_each_subsys(ss, i)
++ if (ss->fork)
++ ss->fork(child);
++ }
++}
++
++/**
++ * cgroup_exit - detach cgroup from exiting task
++ * @tsk: pointer to task_struct of exiting process
++ *
++ * Description: Detach cgroup from @tsk and release it.
++ *
++ * Note that cgroups marked notify_on_release force every task in
++ * them to take the global cgroup_mutex mutex when exiting.
++ * This could impact scaling on very large systems. Be reluctant to
++ * use notify_on_release cgroups where very high task exit scaling
++ * is required on large systems.
++ *
++ * We set the exiting tasks cgroup to the root cgroup (top_cgroup). We
++ * call cgroup_exit() while the task is still competent to handle
++ * notify_on_release(), then leave the task attached to the root cgroup in
++ * each hierarchy for the remainder of its exit. No need to bother with
++ * init_css_set refcnting. init_css_set never goes away and we can't race
++ * with migration path - PF_EXITING is visible to migration path.
++ */
++void cgroup_exit(struct task_struct *tsk)
++{
++ struct cgroup_subsys *ss;
++ struct css_set *cset;
++ bool put_cset = false;
++ int i;
++
++ /*
++ * Unlink from @tsk from its css_set. As migration path can't race
++ * with us, we can check cg_list without grabbing css_set_rwsem.
++ */
++ if (!list_empty(&tsk->cg_list)) {
++ down_write(&css_set_rwsem);
++ list_del_init(&tsk->cg_list);
++ up_write(&css_set_rwsem);
++ put_cset = true;
++ }
++
++ /* Reassign the task to the init_css_set. */
++ cset = task_css_set(tsk);
++ RCU_INIT_POINTER(tsk->cgroups, &init_css_set);
++
++ if (need_forkexit_callback) {
++ /* see cgroup_post_fork() for details */
++ for_each_subsys(ss, i) {
++ if (ss->exit) {
++ struct cgroup_subsys_state *old_css = cset->subsys[i];
++ struct cgroup_subsys_state *css = task_css(tsk, i);
++
++ ss->exit(css, old_css, tsk);
++ }
++ }
++ }
++
++ if (put_cset)
++ put_css_set(cset);
++}
++
++static void check_for_release(struct cgroup *cgrp)
++{
++ if (notify_on_release(cgrp) && !cgroup_has_tasks(cgrp) &&
++ !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
++ schedule_work(&cgrp->release_agent_work);
++}
++
++/*
++ * Notify userspace when a cgroup is released, by running the
++ * configured release agent with the name of the cgroup (path
++ * relative to the root of cgroup file system) as the argument.
++ *
++ * Most likely, this user command will try to rmdir this cgroup.
++ *
++ * This races with the possibility that some other task will be
++ * attached to this cgroup before it is removed, or that some other
++ * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
++ * The presumed 'rmdir' will fail quietly if this cgroup is no longer
++ * unused, and this cgroup will be reprieved from its death sentence,
++ * to continue to serve a useful existence. Next time it's released,
++ * we will get notified again, if it still has 'notify_on_release' set.
++ *
++ * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
++ * means only wait until the task is successfully execve()'d. The
++ * separate release agent task is forked by call_usermodehelper(),
++ * then control in this thread returns here, without waiting for the
++ * release agent task. We don't bother to wait because the caller of
++ * this routine has no use for the exit status of the release agent
++ * task, so no sense holding our caller up for that.
++ */
++static void cgroup_release_agent(struct work_struct *work)
++{
++ struct cgroup *cgrp =
++ container_of(work, struct cgroup, release_agent_work);
++ char *pathbuf = NULL, *agentbuf = NULL, *path;
++ char *argv[3], *envp[3];
++
++ mutex_lock(&cgroup_mutex);
++
++ pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
++ agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
++ if (!pathbuf || !agentbuf)
++ goto out;
++
++ path = cgroup_path(cgrp, pathbuf, PATH_MAX);
++ if (!path)
++ goto out;
++
++ argv[0] = agentbuf;
++ argv[1] = path;
++ argv[2] = NULL;
++
++ /* minimal command environment */
++ envp[0] = "HOME=/";
++ envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
++ envp[2] = NULL;
++
++ mutex_unlock(&cgroup_mutex);
++ call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
++ goto out_free;
++out:
++ mutex_unlock(&cgroup_mutex);
++out_free:
++ kfree(agentbuf);
++ kfree(pathbuf);
++}
++
++static int __init cgroup_disable(char *str)
++{
++ struct cgroup_subsys *ss;
++ char *token;
++ int i;
++
++ while ((token = strsep(&str, ",")) != NULL) {
++ if (!*token)
++ continue;
++
++ for_each_subsys(ss, i) {
++ if (!strcmp(token, ss->name)) {
++ ss->disabled = 1;
++ printk(KERN_INFO "Disabling %s control group"
++ " subsystem\n", ss->name);
++ break;
++ }
++ }
++ }
++ return 1;
++}
++__setup("cgroup_disable=", cgroup_disable);
++
++static int __init cgroup_set_legacy_files_on_dfl(char *str)
++{
++ printk("cgroup: using legacy files on the default hierarchy\n");
++ cgroup_legacy_files_on_dfl = true;
++ return 0;
++}
++__setup("cgroup__DEVEL__legacy_files_on_dfl", cgroup_set_legacy_files_on_dfl);
++
++/**
++ * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
++ * @dentry: directory dentry of interest
++ * @ss: subsystem of interest
++ *
++ * If @dentry is a directory for a cgroup which has @ss enabled on it, try
++ * to get the corresponding css and return it. If such css doesn't exist
++ * or can't be pinned, an ERR_PTR value is returned.
++ */
++struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
++ struct cgroup_subsys *ss)
++{
++ struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
++ struct cgroup_subsys_state *css = NULL;
++ struct cgroup *cgrp;
++
++ /* is @dentry a cgroup dir? */
++ if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
++ kernfs_type(kn) != KERNFS_DIR)
++ return ERR_PTR(-EBADF);
++
++ rcu_read_lock();
++
++ /*
++ * This path doesn't originate from kernfs and @kn could already
++ * have been or be removed at any point. @kn->priv is RCU
++ * protected for this access. See css_release_work_fn() for details.
++ */
++ cgrp = rcu_dereference(kn->priv);
++ if (cgrp)
++ css = cgroup_css(cgrp, ss);
++
++ if (!css || !css_tryget_online(css))
++ css = ERR_PTR(-ENOENT);
++
++ rcu_read_unlock();
++ return css;
++}
++
++/**
++ * css_from_id - lookup css by id
++ * @id: the cgroup id
++ * @ss: cgroup subsys to be looked into
++ *
++ * Returns the css if there's valid one with @id, otherwise returns NULL.
++ * Should be called under rcu_read_lock().
++ */
++struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
++{
++ WARN_ON_ONCE(!rcu_read_lock_held());
++ return id > 0 ? idr_find(&ss->css_idr, id) : NULL;
++}
++
++#ifdef CONFIG_CGROUP_DEBUG
++static struct cgroup_subsys_state *
++debug_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++ struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
++
++ if (!css)
++ return ERR_PTR(-ENOMEM);
++
++ return css;
++}
++
++static void debug_css_free(struct cgroup_subsys_state *css)
++{
++ kfree(css);
++}
++
++static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
++ struct cftype *cft)
++{
++ return cgroup_task_count(css->cgroup);
++}
++
++static u64 current_css_set_read(struct cgroup_subsys_state *css,
++ struct cftype *cft)
++{
++ return (u64)(unsigned long)current->cgroups;
++}
++
++static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
++ struct cftype *cft)
++{
++ u64 count;
++
++ rcu_read_lock();
++ count = atomic_read(&task_css_set(current)->refcount);
++ rcu_read_unlock();
++ return count;
++}
++
++static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
++{
++ struct cgrp_cset_link *link;
++ struct css_set *cset;
++ char *name_buf;
++
++ name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
++ if (!name_buf)
++ return -ENOMEM;
++
++ down_read(&css_set_rwsem);
++ rcu_read_lock();
++ cset = rcu_dereference(current->cgroups);
++ list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
++ struct cgroup *c = link->cgrp;
++
++ cgroup_name(c, name_buf, NAME_MAX + 1);
++ seq_printf(seq, "Root %d group %s\n",
++ c->root->hierarchy_id, name_buf);
++ }
++ rcu_read_unlock();
++ up_read(&css_set_rwsem);
++ kfree(name_buf);
++ return 0;
++}
++
++#define MAX_TASKS_SHOWN_PER_CSS 25
++static int cgroup_css_links_read(struct seq_file *seq, void *v)
++{
++ struct cgroup_subsys_state *css = seq_css(seq);
++ struct cgrp_cset_link *link;
++
++ down_read(&css_set_rwsem);
++ list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
++ struct css_set *cset = link->cset;
++ struct task_struct *task;
++ int count = 0;
++
++ seq_printf(seq, "css_set %p\n", cset);
++
++ list_for_each_entry(task, &cset->tasks, cg_list) {
++ if (count++ > MAX_TASKS_SHOWN_PER_CSS)
++ goto overflow;
++ seq_printf(seq, " task %d\n", task_pid_vnr(task));
++ }
++
++ list_for_each_entry(task, &cset->mg_tasks, cg_list) {
++ if (count++ > MAX_TASKS_SHOWN_PER_CSS)
++ goto overflow;
++ seq_printf(seq, " task %d\n", task_pid_vnr(task));
++ }
++ continue;
++ overflow:
++ seq_puts(seq, " ...\n");
++ }
++ up_read(&css_set_rwsem);
++ return 0;
++}
++
++static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
++{
++ return (!cgroup_has_tasks(css->cgroup) &&
++ !css_has_online_children(&css->cgroup->self));
++}
++
++static struct cftype debug_files[] = {
++ {
++ .name = "taskcount",
++ .read_u64 = debug_taskcount_read,
++ },
++
++ {
++ .name = "current_css_set",
++ .read_u64 = current_css_set_read,
++ },
++
++ {
++ .name = "current_css_set_refcount",
++ .read_u64 = current_css_set_refcount_read,
++ },
++
++ {
++ .name = "current_css_set_cg_links",
++ .seq_show = current_css_set_cg_links_read,
++ },
++
++ {
++ .name = "cgroup_css_links",
++ .seq_show = cgroup_css_links_read,
++ },
++
++ {
++ .name = "releasable",
++ .read_u64 = releasable_read,
++ },
++
++ { } /* terminate */
++};
++
++struct cgroup_subsys debug_cgrp_subsys = {
++ .css_alloc = debug_css_alloc,
++ .css_free = debug_css_free,
++ .legacy_cftypes = debug_files,
++};
++#endif /* CONFIG_CGROUP_DEBUG */
+diff -Nur linux-4.1.10.orig/kernel/cpu.c linux-4.1.10/kernel/cpu.c
+--- linux-4.1.10.orig/kernel/cpu.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/cpu.c 2015-10-07 18:00:08.000000000 +0200
@@ -74,8 +74,8 @@
#endif
} cpu_hotplug = {
@@ -13601,9 +29214,9 @@ diff -Nur linux-4.1.6.orig/kernel/cpu.c linux-4.1.6/kernel/cpu.c
return err;
}
-diff -Nur linux-4.1.6.orig/kernel/debug/kdb/kdb_io.c linux-4.1.6/kernel/debug/kdb/kdb_io.c
---- linux-4.1.6.orig/kernel/debug/kdb/kdb_io.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/debug/kdb/kdb_io.c 2015-09-08 23:49:08.089869160 +0200
+diff -Nur linux-4.1.10.orig/kernel/debug/kdb/kdb_io.c linux-4.1.10/kernel/debug/kdb/kdb_io.c
+--- linux-4.1.10.orig/kernel/debug/kdb/kdb_io.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/debug/kdb/kdb_io.c 2015-10-07 18:00:08.000000000 +0200
@@ -554,7 +554,6 @@
int linecount;
int colcount;
@@ -13641,10 +29254,10 @@ diff -Nur linux-4.1.6.orig/kernel/debug/kdb/kdb_io.c linux-4.1.6/kernel/debug/kd
return r;
}
-diff -Nur linux-4.1.6.orig/kernel/events/core.c linux-4.1.6/kernel/events/core.c
---- linux-4.1.6.orig/kernel/events/core.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/events/core.c 2015-09-08 23:49:08.089869160 +0200
-@@ -6890,6 +6890,7 @@
+diff -Nur linux-4.1.10.orig/kernel/events/core.c linux-4.1.10/kernel/events/core.c
+--- linux-4.1.10.orig/kernel/events/core.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/events/core.c 2015-10-07 18:00:08.000000000 +0200
+@@ -6933,6 +6933,7 @@
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
@@ -13652,9 +29265,9 @@ diff -Nur linux-4.1.6.orig/kernel/events/core.c linux-4.1.6/kernel/events/core.c
/*
* Since hrtimers have a fixed rate, we can do a static freq->period
-diff -Nur linux-4.1.6.orig/kernel/exit.c linux-4.1.6/kernel/exit.c
---- linux-4.1.6.orig/kernel/exit.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/exit.c 2015-09-08 23:49:08.089869160 +0200
+diff -Nur linux-4.1.10.orig/kernel/exit.c linux-4.1.10/kernel/exit.c
+--- linux-4.1.10.orig/kernel/exit.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/exit.c 2015-10-07 18:00:08.000000000 +0200
@@ -144,7 +144,7 @@
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
@@ -13664,9 +29277,9 @@ diff -Nur linux-4.1.6.orig/kernel/exit.c linux-4.1.6/kernel/exit.c
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
-diff -Nur linux-4.1.6.orig/kernel/fork.c linux-4.1.6/kernel/fork.c
---- linux-4.1.6.orig/kernel/fork.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/fork.c 2015-09-08 23:49:08.089869160 +0200
+diff -Nur linux-4.1.10.orig/kernel/fork.c linux-4.1.10/kernel/fork.c
+--- linux-4.1.10.orig/kernel/fork.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/fork.c 2015-10-07 18:00:08.000000000 +0200
@@ -108,7 +108,7 @@
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
@@ -13764,9 +29377,9 @@ diff -Nur linux-4.1.6.orig/kernel/fork.c linux-4.1.6/kernel/fork.c
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
-diff -Nur linux-4.1.6.orig/kernel/futex.c linux-4.1.6/kernel/futex.c
---- linux-4.1.6.orig/kernel/futex.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/futex.c 2015-09-08 23:49:08.089869160 +0200
+diff -Nur linux-4.1.10.orig/kernel/futex.c linux-4.1.10/kernel/futex.c
+--- linux-4.1.10.orig/kernel/futex.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/futex.c 2015-10-07 18:00:08.000000000 +0200
@@ -738,7 +738,9 @@
* task still owns the PI-state:
*/
@@ -14095,9 +29708,9 @@ diff -Nur linux-4.1.6.orig/kernel/futex.c linux-4.1.6/kernel/futex.c
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
-diff -Nur linux-4.1.6.orig/kernel/irq/handle.c linux-4.1.6/kernel/irq/handle.c
---- linux-4.1.6.orig/kernel/irq/handle.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/irq/handle.c 2015-09-08 23:49:08.093868717 +0200
+diff -Nur linux-4.1.10.orig/kernel/irq/handle.c linux-4.1.10/kernel/irq/handle.c
+--- linux-4.1.10.orig/kernel/irq/handle.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/irq/handle.c 2015-10-07 18:00:08.000000000 +0200
@@ -133,6 +133,8 @@
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
@@ -14120,9 +29733,9 @@ diff -Nur linux-4.1.6.orig/kernel/irq/handle.c linux-4.1.6/kernel/irq/handle.c
if (!noirqdebug)
note_interrupt(irq, desc, retval);
-diff -Nur linux-4.1.6.orig/kernel/irq/manage.c linux-4.1.6/kernel/irq/manage.c
---- linux-4.1.6.orig/kernel/irq/manage.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/irq/manage.c 2015-09-08 23:49:08.093868717 +0200
+diff -Nur linux-4.1.10.orig/kernel/irq/manage.c linux-4.1.10/kernel/irq/manage.c
+--- linux-4.1.10.orig/kernel/irq/manage.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/irq/manage.c 2015-10-07 18:00:08.000000000 +0200
@@ -22,6 +22,7 @@
#include "internals.h"
@@ -14255,7 +29868,30 @@ diff -Nur linux-4.1.6.orig/kernel/irq/manage.c linux-4.1.6/kernel/irq/manage.c
}
raw_spin_lock_irqsave(&desc->lock, flags);
-@@ -825,7 +900,15 @@
+@@ -697,6 +772,12 @@
+ return IRQ_NONE;
+ }
+
++static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
++{
++ WARN(1, "Secondary action handler called for irq %d\n", irq);
++ return IRQ_NONE;
++}
++
+ static int irq_wait_for_interrupt(struct irqaction *action)
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+@@ -723,7 +804,8 @@
+ static void irq_finalize_oneshot(struct irq_desc *desc,
+ struct irqaction *action)
+ {
+- if (!(desc->istate & IRQS_ONESHOT))
++ if (!(desc->istate & IRQS_ONESHOT) ||
++ action->handler == irq_forced_secondary_handler)
+ return;
+ again:
+ chip_bus_lock(desc);
+@@ -825,7 +907,15 @@
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
@@ -14272,9 +29908,31 @@ diff -Nur linux-4.1.6.orig/kernel/irq/manage.c linux-4.1.6/kernel/irq/manage.c
return ret;
}
-@@ -908,6 +991,12 @@
+@@ -877,6 +967,18 @@
+ irq_finalize_oneshot(desc, action);
+ }
+
++static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
++{
++ struct irqaction *secondary = action->secondary;
++
++ if (WARN_ON_ONCE(!secondary))
++ return;
++
++ raw_spin_lock_irq(&desc->lock);
++ __irq_wake_thread(desc, secondary);
++ raw_spin_unlock_irq(&desc->lock);
++}
++
+ /*
+ * Interrupt handler thread
+ */
+@@ -907,7 +1009,15 @@
+ action_ret = handler_fn(desc, action);
if (action_ret == IRQ_HANDLED)
atomic_inc(&desc->threads_handled);
++ if (action_ret == IRQ_WAKE_THREAD)
++ irq_wake_secondary(desc, action);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ migrate_disable();
@@ -14285,7 +29943,168 @@ diff -Nur linux-4.1.6.orig/kernel/irq/manage.c linux-4.1.6/kernel/irq/manage.c
wake_threads_waitq(desc);
}
-@@ -1221,6 +1310,9 @@
+@@ -951,20 +1061,36 @@
+ }
+ EXPORT_SYMBOL_GPL(irq_wake_thread);
+
+-static void irq_setup_forced_threading(struct irqaction *new)
++static int irq_setup_forced_threading(struct irqaction *new)
+ {
+ if (!force_irqthreads)
+- return;
++ return 0;
+ if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
+- return;
++ return 0;
+
+ new->flags |= IRQF_ONESHOT;
+
+- if (!new->thread_fn) {
+- set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
+- new->thread_fn = new->handler;
+- new->handler = irq_default_primary_handler;
+- }
++ /*
++ * Handle the case where we have a real primary handler and a
++ * thread handler. We force thread them as well by creating a
++ * secondary action.
++ */
++ if (new->handler != irq_default_primary_handler && new->thread_fn) {
++ /* Allocate the secondary action */
++ new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
++ if (!new->secondary)
++ return -ENOMEM;
++ new->secondary->handler = irq_forced_secondary_handler;
++ new->secondary->thread_fn = new->thread_fn;
++ new->secondary->dev_id = new->dev_id;
++ new->secondary->irq = new->irq;
++ new->secondary->name = new->name;
++ }
++ /* Deal with the primary handler */
++ set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
++ new->thread_fn = new->handler;
++ new->handler = irq_default_primary_handler;
++ return 0;
+ }
+
+ static int irq_request_resources(struct irq_desc *desc)
+@@ -984,6 +1110,48 @@
+ c->irq_release_resources(d);
+ }
+
++static int
++setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
++{
++ struct task_struct *t;
++ struct sched_param param = {
++ .sched_priority = MAX_USER_RT_PRIO/2,
++ };
++
++ if (!secondary) {
++ t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
++ new->name);
++ } else {
++ t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
++ new->name);
++ param.sched_priority += 1;
++ }
++
++ if (IS_ERR(t))
++ return PTR_ERR(t);
++
++ sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
++
++ /*
++ * We keep the reference to the task struct even if
++ * the thread dies to avoid that the interrupt code
++ * references an already freed task_struct.
++ */
++ get_task_struct(t);
++ new->thread = t;
++ /*
++ * Tell the thread to set its affinity. This is
++ * important for shared interrupt handlers as we do
++ * not invoke setup_affinity() for the secondary
++ * handlers as everything is already set up. Even for
++ * interrupts marked with IRQF_NO_BALANCE this is
++ * correct as we want the thread to move to the cpu(s)
++ * on which the requesting code placed the interrupt.
++ */
++ set_bit(IRQTF_AFFINITY, &new->thread_flags);
++ return 0;
++}
++
+ /*
+ * Internal function to register an irqaction - typically used to
+ * allocate special interrupts that are part of the architecture.
+@@ -1004,6 +1172,8 @@
+ if (!try_module_get(desc->owner))
+ return -ENODEV;
+
++ new->irq = irq;
++
+ /*
+ * Check whether the interrupt nests into another interrupt
+ * thread.
+@@ -1021,8 +1191,11 @@
+ */
+ new->handler = irq_nested_primary_handler;
+ } else {
+- if (irq_settings_can_thread(desc))
+- irq_setup_forced_threading(new);
++ if (irq_settings_can_thread(desc)) {
++ ret = irq_setup_forced_threading(new);
++ if (ret)
++ goto out_mput;
++ }
+ }
+
+ /*
+@@ -1031,37 +1204,14 @@
+ * thread.
+ */
+ if (new->thread_fn && !nested) {
+- struct task_struct *t;
+- static const struct sched_param param = {
+- .sched_priority = MAX_USER_RT_PRIO/2,
+- };
+-
+- t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
+- new->name);
+- if (IS_ERR(t)) {
+- ret = PTR_ERR(t);
++ ret = setup_irq_thread(new, irq, false);
++ if (ret)
+ goto out_mput;
++ if (new->secondary) {
++ ret = setup_irq_thread(new->secondary, irq, true);
++ if (ret)
++ goto out_thread;
+ }
+-
+- sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
+-
+- /*
+- * We keep the reference to the task struct even if
+- * the thread dies to avoid that the interrupt code
+- * references an already freed task_struct.
+- */
+- get_task_struct(t);
+- new->thread = t;
+- /*
+- * Tell the thread to set its affinity. This is
+- * important for shared interrupt handlers as we do
+- * not invoke setup_affinity() for the secondary
+- * handlers as everything is already set up. Even for
+- * interrupts marked with IRQF_NO_BALANCE this is
+- * correct as we want the thread to move to the cpu(s)
+- * on which the requesting code placed the interrupt.
+- */
+- set_bit(IRQTF_AFFINITY, &new->thread_flags);
+ }
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+@@ -1221,6 +1371,9 @@
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
@@ -14295,9 +30114,67 @@ diff -Nur linux-4.1.6.orig/kernel/irq/manage.c linux-4.1.6/kernel/irq/manage.c
/* Set default affinity mask once everything is setup */
setup_affinity(irq, desc, mask);
-diff -Nur linux-4.1.6.orig/kernel/irq/settings.h linux-4.1.6/kernel/irq/settings.h
---- linux-4.1.6.orig/kernel/irq/settings.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/irq/settings.h 2015-09-08 23:49:08.093868717 +0200
+@@ -1234,7 +1387,6 @@
+ irq, nmsk, omsk);
+ }
+
+- new->irq = irq;
+ *old_ptr = new;
+
+ irq_pm_install_action(desc, new);
+@@ -1260,6 +1412,8 @@
+ */
+ if (new->thread)
+ wake_up_process(new->thread);
++ if (new->secondary)
++ wake_up_process(new->secondary->thread);
+
+ register_irq_proc(irq, desc);
+ new->dir = NULL;
+@@ -1290,6 +1444,13 @@
+ kthread_stop(t);
+ put_task_struct(t);
+ }
++ if (new->secondary && new->secondary->thread) {
++ struct task_struct *t = new->secondary->thread;
++
++ new->secondary->thread = NULL;
++ kthread_stop(t);
++ put_task_struct(t);
++ }
+ out_mput:
+ module_put(desc->owner);
+ return ret;
+@@ -1397,9 +1558,14 @@
+ if (action->thread) {
+ kthread_stop(action->thread);
+ put_task_struct(action->thread);
++ if (action->secondary && action->secondary->thread) {
++ kthread_stop(action->secondary->thread);
++ put_task_struct(action->secondary->thread);
++ }
+ }
+
+ module_put(desc->owner);
++ kfree(action->secondary);
+ return action;
+ }
+
+@@ -1543,8 +1709,10 @@
+ retval = __setup_irq(irq, desc, action);
+ chip_bus_sync_unlock(desc);
+
+- if (retval)
++ if (retval) {
++ kfree(action->secondary);
+ kfree(action);
++ }
+
+ #ifdef CONFIG_DEBUG_SHIRQ_FIXME
+ if (!retval && (irqflags & IRQF_SHARED)) {
+diff -Nur linux-4.1.10.orig/kernel/irq/settings.h linux-4.1.10/kernel/irq/settings.h
+--- linux-4.1.10.orig/kernel/irq/settings.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/irq/settings.h 2015-10-07 18:00:08.000000000 +0200
@@ -15,6 +15,7 @@
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
@@ -14331,9 +30208,9 @@ diff -Nur linux-4.1.6.orig/kernel/irq/settings.h linux-4.1.6/kernel/irq/settings
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
{
return desc->status_use_accessors & _IRQ_PER_CPU;
-diff -Nur linux-4.1.6.orig/kernel/irq/spurious.c linux-4.1.6/kernel/irq/spurious.c
---- linux-4.1.6.orig/kernel/irq/spurious.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/irq/spurious.c 2015-09-08 23:49:08.093868717 +0200
+diff -Nur linux-4.1.10.orig/kernel/irq/spurious.c linux-4.1.10/kernel/irq/spurious.c
+--- linux-4.1.10.orig/kernel/irq/spurious.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/irq/spurious.c 2015-10-07 18:00:08.000000000 +0200
@@ -444,6 +444,10 @@
static int __init irqfixup_setup(char *str)
@@ -14356,9 +30233,9 @@ diff -Nur linux-4.1.6.orig/kernel/irq/spurious.c linux-4.1.6/kernel/irq/spurious
irqfixup = 2;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");
-diff -Nur linux-4.1.6.orig/kernel/irq_work.c linux-4.1.6/kernel/irq_work.c
---- linux-4.1.6.orig/kernel/irq_work.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/irq_work.c 2015-09-08 23:49:08.101867828 +0200
+diff -Nur linux-4.1.10.orig/kernel/irq_work.c linux-4.1.10/kernel/irq_work.c
+--- linux-4.1.10.orig/kernel/irq_work.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/irq_work.c 2015-10-07 18:00:08.000000000 +0200
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
@@ -14462,9 +30339,9 @@ diff -Nur linux-4.1.6.orig/kernel/irq_work.c linux-4.1.6/kernel/irq_work.c
}
EXPORT_SYMBOL_GPL(irq_work_run);
-diff -Nur linux-4.1.6.orig/kernel/Kconfig.locks linux-4.1.6/kernel/Kconfig.locks
---- linux-4.1.6.orig/kernel/Kconfig.locks 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/Kconfig.locks 2015-09-08 23:49:08.085869602 +0200
+diff -Nur linux-4.1.10.orig/kernel/Kconfig.locks linux-4.1.10/kernel/Kconfig.locks
+--- linux-4.1.10.orig/kernel/Kconfig.locks 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/Kconfig.locks 2015-10-07 18:00:08.000000000 +0200
@@ -225,11 +225,11 @@
config MUTEX_SPIN_ON_OWNER
@@ -14479,9 +30356,9 @@ diff -Nur linux-4.1.6.orig/kernel/Kconfig.locks linux-4.1.6/kernel/Kconfig.locks
config LOCK_SPIN_ON_OWNER
def_bool y
-diff -Nur linux-4.1.6.orig/kernel/Kconfig.preempt linux-4.1.6/kernel/Kconfig.preempt
---- linux-4.1.6.orig/kernel/Kconfig.preempt 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/Kconfig.preempt 2015-09-08 23:49:08.085869602 +0200
+diff -Nur linux-4.1.10.orig/kernel/Kconfig.preempt linux-4.1.10/kernel/Kconfig.preempt
+--- linux-4.1.10.orig/kernel/Kconfig.preempt 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/Kconfig.preempt 2015-10-07 18:00:08.000000000 +0200
@@ -1,3 +1,16 @@
+config PREEMPT
+ bool
@@ -14534,9 +30411,9 @@ diff -Nur linux-4.1.6.orig/kernel/Kconfig.preempt linux-4.1.6/kernel/Kconfig.pre
endchoice
config PREEMPT_COUNT
-diff -Nur linux-4.1.6.orig/kernel/ksysfs.c linux-4.1.6/kernel/ksysfs.c
---- linux-4.1.6.orig/kernel/ksysfs.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/ksysfs.c 2015-09-08 23:49:08.101867828 +0200
+diff -Nur linux-4.1.10.orig/kernel/ksysfs.c linux-4.1.10/kernel/ksysfs.c
+--- linux-4.1.10.orig/kernel/ksysfs.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/ksysfs.c 2015-10-07 18:00:08.000000000 +0200
@@ -136,6 +136,15 @@
#endif /* CONFIG_KEXEC */
@@ -14563,9 +30440,9 @@ diff -Nur linux-4.1.6.orig/kernel/ksysfs.c linux-4.1.6/kernel/ksysfs.c
NULL
};
-diff -Nur linux-4.1.6.orig/kernel/locking/lglock.c linux-4.1.6/kernel/locking/lglock.c
---- linux-4.1.6.orig/kernel/locking/lglock.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/locking/lglock.c 2015-09-08 23:49:08.101867828 +0200
+diff -Nur linux-4.1.10.orig/kernel/locking/lglock.c linux-4.1.10/kernel/locking/lglock.c
+--- linux-4.1.10.orig/kernel/locking/lglock.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/locking/lglock.c 2015-10-07 18:00:08.000000000 +0200
@@ -4,6 +4,15 @@
#include <linux/cpu.h>
#include <linux/string.h>
@@ -14710,9 +30587,9 @@ diff -Nur linux-4.1.6.orig/kernel/locking/lglock.c linux-4.1.6/kernel/locking/lg
+ }
+}
+#endif
-diff -Nur linux-4.1.6.orig/kernel/locking/lockdep.c linux-4.1.6/kernel/locking/lockdep.c
---- linux-4.1.6.orig/kernel/locking/lockdep.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/locking/lockdep.c 2015-09-08 23:49:08.101867828 +0200
+diff -Nur linux-4.1.10.orig/kernel/locking/lockdep.c linux-4.1.10/kernel/locking/lockdep.c
+--- linux-4.1.10.orig/kernel/locking/lockdep.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/locking/lockdep.c 2015-10-07 18:00:08.000000000 +0200
@@ -3563,6 +3563,7 @@
}
}
@@ -14729,9 +30606,9 @@ diff -Nur linux-4.1.6.orig/kernel/locking/lockdep.c linux-4.1.6/kernel/locking/l
if (!debug_locks)
print_irqtrace_events(current);
-diff -Nur linux-4.1.6.orig/kernel/locking/locktorture.c linux-4.1.6/kernel/locking/locktorture.c
---- linux-4.1.6.orig/kernel/locking/locktorture.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/locking/locktorture.c 2015-09-08 23:49:08.101867828 +0200
+diff -Nur linux-4.1.10.orig/kernel/locking/locktorture.c linux-4.1.10/kernel/locking/locktorture.c
+--- linux-4.1.10.orig/kernel/locking/locktorture.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/locking/locktorture.c 2015-10-07 18:00:08.000000000 +0200
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/kthread.h>
@@ -14740,9 +30617,9 @@ diff -Nur linux-4.1.6.orig/kernel/locking/locktorture.c linux-4.1.6/kernel/locki
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
-diff -Nur linux-4.1.6.orig/kernel/locking/Makefile linux-4.1.6/kernel/locking/Makefile
---- linux-4.1.6.orig/kernel/locking/Makefile 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/locking/Makefile 2015-09-08 23:49:08.101867828 +0200
+diff -Nur linux-4.1.10.orig/kernel/locking/Makefile linux-4.1.10/kernel/locking/Makefile
+--- linux-4.1.10.orig/kernel/locking/Makefile 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/locking/Makefile 2015-10-07 18:00:08.000000000 +0200
@@ -1,5 +1,5 @@
-obj-y += mutex.o semaphore.o rwsem.o
@@ -14774,9 +30651,9 @@ diff -Nur linux-4.1.6.orig/kernel/locking/Makefile linux-4.1.6/kernel/locking/Ma
+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
-diff -Nur linux-4.1.6.orig/kernel/locking/rt.c linux-4.1.6/kernel/locking/rt.c
---- linux-4.1.6.orig/kernel/locking/rt.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/kernel/locking/rt.c 2015-09-08 23:49:08.101867828 +0200
+diff -Nur linux-4.1.10.orig/kernel/locking/rt.c linux-4.1.10/kernel/locking/rt.c
+--- linux-4.1.10.orig/kernel/locking/rt.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/kernel/locking/rt.c 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,461 @@
+/*
+ * kernel/rt.c
@@ -15239,9 +31116,9 @@ diff -Nur linux-4.1.6.orig/kernel/locking/rt.c linux-4.1.6/kernel/locking/rt.c
+ return 1;
+}
+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
-diff -Nur linux-4.1.6.orig/kernel/locking/rtmutex.c linux-4.1.6/kernel/locking/rtmutex.c
---- linux-4.1.6.orig/kernel/locking/rtmutex.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/locking/rtmutex.c 2015-09-08 23:49:08.101867828 +0200
+diff -Nur linux-4.1.10.orig/kernel/locking/rtmutex.c linux-4.1.10/kernel/locking/rtmutex.c
+--- linux-4.1.10.orig/kernel/locking/rtmutex.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/locking/rtmutex.c 2015-10-07 18:00:08.000000000 +0200
@@ -7,6 +7,11 @@
* Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
@@ -16330,9 +32207,9 @@ diff -Nur linux-4.1.6.orig/kernel/locking/rtmutex.c linux-4.1.6/kernel/locking/r
+}
+EXPORT_SYMBOL(ww_mutex_unlock);
+#endif
-diff -Nur linux-4.1.6.orig/kernel/locking/rtmutex_common.h linux-4.1.6/kernel/locking/rtmutex_common.h
---- linux-4.1.6.orig/kernel/locking/rtmutex_common.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/locking/rtmutex_common.h 2015-09-08 23:49:08.101867828 +0200
+diff -Nur linux-4.1.10.orig/kernel/locking/rtmutex_common.h linux-4.1.10/kernel/locking/rtmutex_common.h
+--- linux-4.1.10.orig/kernel/locking/rtmutex_common.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/locking/rtmutex_common.h 2015-10-07 18:00:08.000000000 +0200
@@ -49,6 +49,7 @@
struct rb_node pi_tree_entry;
struct task_struct *task;
@@ -16376,9 +32253,9 @@ diff -Nur linux-4.1.6.orig/kernel/locking/rtmutex_common.h linux-4.1.6/kernel/lo
+}
+
#endif
-diff -Nur linux-4.1.6.orig/kernel/locking/spinlock.c linux-4.1.6/kernel/locking/spinlock.c
---- linux-4.1.6.orig/kernel/locking/spinlock.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/locking/spinlock.c 2015-09-08 23:49:08.101867828 +0200
+diff -Nur linux-4.1.10.orig/kernel/locking/spinlock.c linux-4.1.10/kernel/locking/spinlock.c
+--- linux-4.1.10.orig/kernel/locking/spinlock.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/locking/spinlock.c 2015-10-07 18:00:08.000000000 +0200
@@ -124,8 +124,11 @@
* __[spin|read|write]_lock_bh()
*/
@@ -16409,9 +32286,9 @@ diff -Nur linux-4.1.6.orig/kernel/locking/spinlock.c linux-4.1.6/kernel/locking/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
-diff -Nur linux-4.1.6.orig/kernel/locking/spinlock_debug.c linux-4.1.6/kernel/locking/spinlock_debug.c
---- linux-4.1.6.orig/kernel/locking/spinlock_debug.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/locking/spinlock_debug.c 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/locking/spinlock_debug.c linux-4.1.10/kernel/locking/spinlock_debug.c
+--- linux-4.1.10.orig/kernel/locking/spinlock_debug.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/locking/spinlock_debug.c 2015-10-07 18:00:08.000000000 +0200
@@ -31,6 +31,7 @@
EXPORT_SYMBOL(__raw_spin_lock_init);
@@ -16442,9 +32319,9 @@ diff -Nur linux-4.1.6.orig/kernel/locking/spinlock_debug.c linux-4.1.6/kernel/lo
}
+
+#endif
-diff -Nur linux-4.1.6.orig/kernel/panic.c linux-4.1.6/kernel/panic.c
---- linux-4.1.6.orig/kernel/panic.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/panic.c 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/panic.c linux-4.1.10/kernel/panic.c
+--- linux-4.1.10.orig/kernel/panic.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/panic.c 2015-10-07 18:00:08.000000000 +0200
@@ -387,9 +387,11 @@
static int init_oops_id(void)
@@ -16457,9 +32334,9 @@ diff -Nur linux-4.1.6.orig/kernel/panic.c linux-4.1.6/kernel/panic.c
oops_id++;
return 0;
-diff -Nur linux-4.1.6.orig/kernel/power/hibernate.c linux-4.1.6/kernel/power/hibernate.c
---- linux-4.1.6.orig/kernel/power/hibernate.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/power/hibernate.c 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/power/hibernate.c linux-4.1.10/kernel/power/hibernate.c
+--- linux-4.1.10.orig/kernel/power/hibernate.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/power/hibernate.c 2015-10-07 18:00:08.000000000 +0200
@@ -285,6 +285,8 @@
local_irq_disable();
@@ -16509,9 +32386,9 @@ diff -Nur linux-4.1.6.orig/kernel/power/hibernate.c linux-4.1.6/kernel/power/hib
local_irq_enable();
enable_nonboot_cpus();
-diff -Nur linux-4.1.6.orig/kernel/power/suspend.c linux-4.1.6/kernel/power/suspend.c
---- linux-4.1.6.orig/kernel/power/suspend.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/power/suspend.c 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/power/suspend.c linux-4.1.10/kernel/power/suspend.c
+--- linux-4.1.10.orig/kernel/power/suspend.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/power/suspend.c 2015-10-07 18:00:08.000000000 +0200
@@ -356,6 +356,8 @@
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
@@ -16530,9 +32407,9 @@ diff -Nur linux-4.1.6.orig/kernel/power/suspend.c linux-4.1.6/kernel/power/suspe
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
-diff -Nur linux-4.1.6.orig/kernel/printk/printk.c linux-4.1.6/kernel/printk/printk.c
---- linux-4.1.6.orig/kernel/printk/printk.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/printk/printk.c 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/printk/printk.c linux-4.1.10/kernel/printk/printk.c
+--- linux-4.1.10.orig/kernel/printk/printk.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/printk/printk.c 2015-10-07 18:00:08.000000000 +0200
@@ -1163,6 +1163,7 @@
{
char *text;
@@ -16785,9 +32662,9 @@ diff -Nur linux-4.1.6.orig/kernel/printk/printk.c linux-4.1.6/kernel/printk/prin
}
console_locked = 0;
-diff -Nur linux-4.1.6.orig/kernel/ptrace.c linux-4.1.6/kernel/ptrace.c
---- linux-4.1.6.orig/kernel/ptrace.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/ptrace.c 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/ptrace.c linux-4.1.10/kernel/ptrace.c
+--- linux-4.1.10.orig/kernel/ptrace.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/ptrace.c 2015-10-07 18:00:08.000000000 +0200
@@ -129,7 +129,12 @@
spin_lock_irq(&task->sighand->siglock);
@@ -16802,9 +32679,9 @@ diff -Nur linux-4.1.6.orig/kernel/ptrace.c linux-4.1.6/kernel/ptrace.c
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);
-diff -Nur linux-4.1.6.orig/kernel/rcu/tree.c linux-4.1.6/kernel/rcu/tree.c
---- linux-4.1.6.orig/kernel/rcu/tree.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/rcu/tree.c 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/rcu/tree.c linux-4.1.10/kernel/rcu/tree.c
+--- linux-4.1.10.orig/kernel/rcu/tree.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/rcu/tree.c 2015-10-07 18:00:08.000000000 +0200
@@ -56,6 +56,11 @@
#include <linux/random.h>
#include <linux/ftrace_event.h>
@@ -17092,9 +32969,9 @@ diff -Nur linux-4.1.6.orig/kernel/rcu/tree.c linux-4.1.6/kernel/rcu/tree.c
/*
* We don't need protection against CPU-hotplug here because
-diff -Nur linux-4.1.6.orig/kernel/rcu/tree.h linux-4.1.6/kernel/rcu/tree.h
---- linux-4.1.6.orig/kernel/rcu/tree.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/rcu/tree.h 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/rcu/tree.h linux-4.1.10/kernel/rcu/tree.h
+--- linux-4.1.10.orig/kernel/rcu/tree.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/rcu/tree.h 2015-10-07 18:00:08.000000000 +0200
@@ -27,6 +27,7 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
@@ -17155,9 +33032,9 @@ diff -Nur linux-4.1.6.orig/kernel/rcu/tree.h linux-4.1.6/kernel/rcu/tree.h
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_BOOST */
-diff -Nur linux-4.1.6.orig/kernel/rcu/tree_plugin.h linux-4.1.6/kernel/rcu/tree_plugin.h
---- linux-4.1.6.orig/kernel/rcu/tree_plugin.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/rcu/tree_plugin.h 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/rcu/tree_plugin.h linux-4.1.10/kernel/rcu/tree_plugin.h
+--- linux-4.1.10.orig/kernel/rcu/tree_plugin.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/rcu/tree_plugin.h 2015-10-07 18:00:08.000000000 +0200
@@ -24,27 +24,20 @@
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
@@ -17492,9 +33369,9 @@ diff -Nur linux-4.1.6.orig/kernel/rcu/tree_plugin.h linux-4.1.6/kernel/rcu/tree_
rdp->nocb_follower_tail = &rdp->nocb_follower_head;
}
-diff -Nur linux-4.1.6.orig/kernel/rcu/update.c linux-4.1.6/kernel/rcu/update.c
---- linux-4.1.6.orig/kernel/rcu/update.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/rcu/update.c 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/rcu/update.c linux-4.1.10/kernel/rcu/update.c
+--- linux-4.1.10.orig/kernel/rcu/update.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/rcu/update.c 2015-10-07 18:00:08.000000000 +0200
@@ -227,6 +227,7 @@
}
EXPORT_SYMBOL_GPL(rcu_read_lock_held);
@@ -17511,9 +33388,9 @@ diff -Nur linux-4.1.6.orig/kernel/rcu/update.c linux-4.1.6/kernel/rcu/update.c
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-diff -Nur linux-4.1.6.orig/kernel/relay.c linux-4.1.6/kernel/relay.c
---- linux-4.1.6.orig/kernel/relay.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/relay.c 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/relay.c linux-4.1.10/kernel/relay.c
+--- linux-4.1.10.orig/kernel/relay.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/relay.c 2015-10-07 18:00:08.000000000 +0200
@@ -339,6 +339,10 @@
{
struct rchan_buf *buf = (struct rchan_buf *)data;
@@ -17549,9 +33426,9 @@ diff -Nur linux-4.1.6.orig/kernel/relay.c linux-4.1.6/kernel/relay.c
}
old = buf->data;
-diff -Nur linux-4.1.6.orig/kernel/sched/completion.c linux-4.1.6/kernel/sched/completion.c
---- linux-4.1.6.orig/kernel/sched/completion.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/sched/completion.c 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/sched/completion.c linux-4.1.10/kernel/sched/completion.c
+--- linux-4.1.10.orig/kernel/sched/completion.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/sched/completion.c 2015-10-07 18:00:08.000000000 +0200
@@ -30,10 +30,10 @@
{
unsigned long flags;
@@ -17642,9 +33519,9 @@ diff -Nur linux-4.1.6.orig/kernel/sched/completion.c linux-4.1.6/kernel/sched/co
return true;
}
EXPORT_SYMBOL(completion_done);
-diff -Nur linux-4.1.6.orig/kernel/sched/core.c linux-4.1.6/kernel/sched/core.c
---- linux-4.1.6.orig/kernel/sched/core.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/sched/core.c 2015-09-08 23:49:08.109866942 +0200
+diff -Nur linux-4.1.10.orig/kernel/sched/core.c linux-4.1.10/kernel/sched/core.c
+--- linux-4.1.10.orig/kernel/sched/core.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/sched/core.c 2015-10-07 18:00:08.000000000 +0200
@@ -282,7 +282,11 @@
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
@@ -18362,7 +34239,7 @@ diff -Nur linux-4.1.6.orig/kernel/sched/core.c linux-4.1.6/kernel/sched/core.c
break;
#endif
}
-@@ -7266,7 +7610,8 @@
+@@ -7274,7 +7618,8 @@
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
@@ -18372,9 +34249,8402 @@ diff -Nur linux-4.1.6.orig/kernel/sched/core.c linux-4.1.6/kernel/sched/core.c
return (nested == preempt_offset);
}
-diff -Nur linux-4.1.6.orig/kernel/sched/cputime.c linux-4.1.6/kernel/sched/cputime.c
---- linux-4.1.6.orig/kernel/sched/cputime.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/sched/cputime.c 2015-09-08 23:49:08.109866942 +0200
+diff -Nur linux-4.1.10.orig/kernel/sched/core.c.orig linux-4.1.10/kernel/sched/core.c.orig
+--- linux-4.1.10.orig/kernel/sched/core.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/kernel/sched/core.c.orig 2015-10-03 13:49:38.000000000 +0200
+@@ -0,0 +1,8389 @@
++/*
++ * kernel/sched/core.c
++ *
++ * Kernel scheduler and related syscalls
++ *
++ * Copyright (C) 1991-2002 Linus Torvalds
++ *
++ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
++ * make semaphores SMP safe
++ * 1998-11-19 Implemented schedule_timeout() and related stuff
++ * by Andrea Arcangeli
++ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
++ * hybrid priority-list and round-robin design with
++ * an array-switch method of distributing timeslices
++ * and per-CPU runqueues. Cleanups and useful suggestions
++ * by Davide Libenzi, preemptible kernel bits by Robert Love.
++ * 2003-09-03 Interactivity tuning by Con Kolivas.
++ * 2004-04-02 Scheduler domains code by Nick Piggin
++ * 2007-04-15 Work begun on replacing all interactivity tuning with a
++ * fair scheduling design by Con Kolivas.
++ * 2007-05-05 Load balancing (smp-nice) and other improvements
++ * by Peter Williams
++ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
++ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
++ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
++ * Thomas Gleixner, Mike Kravetz
++ */
++
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/nmi.h>
++#include <linux/init.h>
++#include <linux/uaccess.h>
++#include <linux/highmem.h>
++#include <asm/mmu_context.h>
++#include <linux/interrupt.h>
++#include <linux/capability.h>
++#include <linux/completion.h>
++#include <linux/kernel_stat.h>
++#include <linux/debug_locks.h>
++#include <linux/perf_event.h>
++#include <linux/security.h>
++#include <linux/notifier.h>
++#include <linux/profile.h>
++#include <linux/freezer.h>
++#include <linux/vmalloc.h>
++#include <linux/blkdev.h>
++#include <linux/delay.h>
++#include <linux/pid_namespace.h>
++#include <linux/smp.h>
++#include <linux/threads.h>
++#include <linux/timer.h>
++#include <linux/rcupdate.h>
++#include <linux/cpu.h>
++#include <linux/cpuset.h>
++#include <linux/percpu.h>
++#include <linux/proc_fs.h>
++#include <linux/seq_file.h>
++#include <linux/sysctl.h>
++#include <linux/syscalls.h>
++#include <linux/times.h>
++#include <linux/tsacct_kern.h>
++#include <linux/kprobes.h>
++#include <linux/delayacct.h>
++#include <linux/unistd.h>
++#include <linux/pagemap.h>
++#include <linux/hrtimer.h>
++#include <linux/tick.h>
++#include <linux/debugfs.h>
++#include <linux/ctype.h>
++#include <linux/ftrace.h>
++#include <linux/slab.h>
++#include <linux/init_task.h>
++#include <linux/binfmts.h>
++#include <linux/context_tracking.h>
++#include <linux/compiler.h>
++
++#include <asm/switch_to.h>
++#include <asm/tlb.h>
++#include <asm/irq_regs.h>
++#include <asm/mutex.h>
++#ifdef CONFIG_PARAVIRT
++#include <asm/paravirt.h>
++#endif
++
++#include "sched.h"
++#include "../workqueue_internal.h"
++#include "../smpboot.h"
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++
++void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
++{
++ unsigned long delta;
++ ktime_t soft, hard, now;
++
++ for (;;) {
++ if (hrtimer_active(period_timer))
++ break;
++
++ now = hrtimer_cb_get_time(period_timer);
++ hrtimer_forward(period_timer, now, period);
++
++ soft = hrtimer_get_softexpires(period_timer);
++ hard = hrtimer_get_expires(period_timer);
++ delta = ktime_to_ns(ktime_sub(hard, soft));
++ __hrtimer_start_range_ns(period_timer, soft, delta,
++ HRTIMER_MODE_ABS_PINNED, 0);
++ }
++}
++
++DEFINE_MUTEX(sched_domains_mutex);
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++static void update_rq_clock_task(struct rq *rq, s64 delta);
++
++void update_rq_clock(struct rq *rq)
++{
++ s64 delta;
++
++ lockdep_assert_held(&rq->lock);
++
++ if (rq->clock_skip_update & RQCF_ACT_SKIP)
++ return;
++
++ delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++ if (delta < 0)
++ return;
++ rq->clock += delta;
++ update_rq_clock_task(rq, delta);
++}
++
++/*
++ * Debugging: various feature bits
++ */
++
++#define SCHED_FEAT(name, enabled) \
++ (1UL << __SCHED_FEAT_##name) * enabled |
++
++const_debug unsigned int sysctl_sched_features =
++#include "features.h"
++ 0;
++
++#undef SCHED_FEAT
++
++#ifdef CONFIG_SCHED_DEBUG
++#define SCHED_FEAT(name, enabled) \
++ #name ,
++
++static const char * const sched_feat_names[] = {
++#include "features.h"
++};
++
++#undef SCHED_FEAT
++
++static int sched_feat_show(struct seq_file *m, void *v)
++{
++ int i;
++
++ for (i = 0; i < __SCHED_FEAT_NR; i++) {
++ if (!(sysctl_sched_features & (1UL << i)))
++ seq_puts(m, "NO_");
++ seq_printf(m, "%s ", sched_feat_names[i]);
++ }
++ seq_puts(m, "\n");
++
++ return 0;
++}
++
++#ifdef HAVE_JUMP_LABEL
++
++#define jump_label_key__true STATIC_KEY_INIT_TRUE
++#define jump_label_key__false STATIC_KEY_INIT_FALSE
++
++#define SCHED_FEAT(name, enabled) \
++ jump_label_key__##enabled ,
++
++struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
++#include "features.h"
++};
++
++#undef SCHED_FEAT
++
++static void sched_feat_disable(int i)
++{
++ if (static_key_enabled(&sched_feat_keys[i]))
++ static_key_slow_dec(&sched_feat_keys[i]);
++}
++
++static void sched_feat_enable(int i)
++{
++ if (!static_key_enabled(&sched_feat_keys[i]))
++ static_key_slow_inc(&sched_feat_keys[i]);
++}
++#else
++static void sched_feat_disable(int i) { };
++static void sched_feat_enable(int i) { };
++#endif /* HAVE_JUMP_LABEL */
++
++static int sched_feat_set(char *cmp)
++{
++ int i;
++ int neg = 0;
++
++ if (strncmp(cmp, "NO_", 3) == 0) {
++ neg = 1;
++ cmp += 3;
++ }
++
++ for (i = 0; i < __SCHED_FEAT_NR; i++) {
++ if (strcmp(cmp, sched_feat_names[i]) == 0) {
++ if (neg) {
++ sysctl_sched_features &= ~(1UL << i);
++ sched_feat_disable(i);
++ } else {
++ sysctl_sched_features |= (1UL << i);
++ sched_feat_enable(i);
++ }
++ break;
++ }
++ }
++
++ return i;
++}
++
++static ssize_t
++sched_feat_write(struct file *filp, const char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ char *cmp;
++ int i;
++ struct inode *inode;
++
++ if (cnt > 63)
++ cnt = 63;
++
++ if (copy_from_user(&buf, ubuf, cnt))
++ return -EFAULT;
++
++ buf[cnt] = 0;
++ cmp = strstrip(buf);
++
++ /* Ensure the static_key remains in a consistent state */
++ inode = file_inode(filp);
++ mutex_lock(&inode->i_mutex);
++ i = sched_feat_set(cmp);
++ mutex_unlock(&inode->i_mutex);
++ if (i == __SCHED_FEAT_NR)
++ return -EINVAL;
++
++ *ppos += cnt;
++
++ return cnt;
++}
++
++static int sched_feat_open(struct inode *inode, struct file *filp)
++{
++ return single_open(filp, sched_feat_show, NULL);
++}
++
++static const struct file_operations sched_feat_fops = {
++ .open = sched_feat_open,
++ .write = sched_feat_write,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static __init int sched_init_debug(void)
++{
++ debugfs_create_file("sched_features", 0644, NULL, NULL,
++ &sched_feat_fops);
++
++ return 0;
++}
++late_initcall(sched_init_debug);
++#endif /* CONFIG_SCHED_DEBUG */
++
++/*
++ * Number of tasks to iterate in a single balance run.
++ * Limited because this is done with IRQs disabled.
++ */
++const_debug unsigned int sysctl_sched_nr_migrate = 32;
++
++/*
++ * period over which we average the RT time consumption, measured
++ * in ms.
++ *
++ * default: 1s
++ */
++const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
++
++/*
++ * period over which we measure -rt task cpu usage in us.
++ * default: 1s
++ */
++unsigned int sysctl_sched_rt_period = 1000000;
++
++__read_mostly int scheduler_running;
++
++/*
++ * part of the period that we allow rt tasks to run in us.
++ * default: 0.95s
++ */
++int sysctl_sched_rt_runtime = 950000;
++
++/* cpus with isolated domains */
++cpumask_var_t cpu_isolated_map;
++
++/*
++ * this_rq_lock - lock this runqueue and disable interrupts.
++ */
++static struct rq *this_rq_lock(void)
++ __acquires(rq->lock)
++{
++ struct rq *rq;
++
++ local_irq_disable();
++ rq = this_rq();
++ raw_spin_lock(&rq->lock);
++
++ return rq;
++}
++
++#ifdef CONFIG_SCHED_HRTICK
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++
++static void hrtick_clear(struct rq *rq)
++{
++ if (hrtimer_active(&rq->hrtick_timer))
++ hrtimer_cancel(&rq->hrtick_timer);
++}
++
++/*
++ * High-resolution timer tick.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrtick(struct hrtimer *timer)
++{
++ struct rq *rq = container_of(timer, struct rq, hrtick_timer);
++
++ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
++
++ raw_spin_lock(&rq->lock);
++ update_rq_clock(rq);
++ rq->curr->sched_class->task_tick(rq, rq->curr, 1);
++ raw_spin_unlock(&rq->lock);
++
++ return HRTIMER_NORESTART;
++}
++
++#ifdef CONFIG_SMP
++
++static int __hrtick_restart(struct rq *rq)
++{
++ struct hrtimer *timer = &rq->hrtick_timer;
++ ktime_t time = hrtimer_get_softexpires(timer);
++
++ return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
++}
++
++/*
++ * called from hardirq (IPI) context
++ */
++static void __hrtick_start(void *arg)
++{
++ struct rq *rq = arg;
++
++ raw_spin_lock(&rq->lock);
++ __hrtick_restart(rq);
++ rq->hrtick_csd_pending = 0;
++ raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++ struct hrtimer *timer = &rq->hrtick_timer;
++ ktime_t time;
++ s64 delta;
++
++ /*
++ * Don't schedule slices shorter than 10000ns, that just
++ * doesn't make sense and can cause timer DoS.
++ */
++ delta = max_t(s64, delay, 10000LL);
++ time = ktime_add_ns(timer->base->get_time(), delta);
++
++ hrtimer_set_expires(timer, time);
++
++ if (rq == this_rq()) {
++ __hrtick_restart(rq);
++ } else if (!rq->hrtick_csd_pending) {
++ smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
++ rq->hrtick_csd_pending = 1;
++ }
++}
++
++static int
++hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
++{
++ int cpu = (int)(long)hcpu;
++
++ switch (action) {
++ case CPU_UP_CANCELED:
++ case CPU_UP_CANCELED_FROZEN:
++ case CPU_DOWN_PREPARE:
++ case CPU_DOWN_PREPARE_FROZEN:
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ hrtick_clear(cpu_rq(cpu));
++ return NOTIFY_OK;
++ }
++
++ return NOTIFY_DONE;
++}
++
++static __init void init_hrtick(void)
++{
++ hotcpu_notifier(hotplug_hrtick, 0);
++}
++#else
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++ /*
++ * Don't schedule slices shorter than 10000ns, that just
++ * doesn't make sense. Rely on vruntime for fairness.
++ */
++ delay = max_t(u64, delay, 10000LL);
++ __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
++ HRTIMER_MODE_REL_PINNED, 0);
++}
++
++static inline void init_hrtick(void)
++{
++}
++#endif /* CONFIG_SMP */
++
++static void init_rq_hrtick(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++ rq->hrtick_csd_pending = 0;
++
++ rq->hrtick_csd.flags = 0;
++ rq->hrtick_csd.func = __hrtick_start;
++ rq->hrtick_csd.info = rq;
++#endif
++
++ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ rq->hrtick_timer.function = hrtick;
++}
++#else /* CONFIG_SCHED_HRTICK */
++static inline void hrtick_clear(struct rq *rq)
++{
++}
++
++static inline void init_rq_hrtick(struct rq *rq)
++{
++}
++
++static inline void init_hrtick(void)
++{
++}
++#endif /* CONFIG_SCHED_HRTICK */
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, val) \
++({ typeof(*(ptr)) __old, __val = *(ptr); \
++ for (;;) { \
++ __old = cmpxchg((ptr), __val, __val | (val)); \
++ if (__old == __val) \
++ break; \
++ __val = __old; \
++ } \
++ __old; \
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++ struct thread_info *ti = task_thread_info(p);
++ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++ struct thread_info *ti = task_thread_info(p);
++ typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags);
++
++ for (;;) {
++ if (!(val & _TIF_POLLING_NRFLAG))
++ return false;
++ if (val & _TIF_NEED_RESCHED)
++ return true;
++ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
++ if (old == val)
++ break;
++ val = old;
++ }
++ return true;
++}
++
++#else
++static bool set_nr_and_not_polling(struct task_struct *p)
++{
++ set_tsk_need_resched(p);
++ return true;
++}
++
++#ifdef CONFIG_SMP
++static bool set_nr_if_polling(struct task_struct *p)
++{
++ return false;
++}
++#endif
++#endif
++
++/*
++ * resched_curr - mark rq's current task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++void resched_curr(struct rq *rq)
++{
++ struct task_struct *curr = rq->curr;
++ int cpu;
++
++ lockdep_assert_held(&rq->lock);
++
++ if (test_tsk_need_resched(curr))
++ return;
++
++ cpu = cpu_of(rq);
++
++ if (cpu == smp_processor_id()) {
++ set_tsk_need_resched(curr);
++ set_preempt_need_resched();
++ return;
++ }
++
++ if (set_nr_and_not_polling(curr))
++ smp_send_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++}
++
++void resched_cpu(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ if (!raw_spin_trylock_irqsave(&rq->lock, flags))
++ return;
++ resched_curr(rq);
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++/*
++ * In the semi idle case, use the nearest busy cpu for migrating timers
++ * from an idle cpu. This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle cpu will add more delays to the timers than intended
++ * (as that cpu's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(int pinned)
++{
++ int cpu = smp_processor_id();
++ int i;
++ struct sched_domain *sd;
++
++ if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
++ return cpu;
++
++ rcu_read_lock();
++ for_each_domain(cpu, sd) {
++ for_each_cpu(i, sched_domain_span(sd)) {
++ if (!idle_cpu(i)) {
++ cpu = i;
++ goto unlock;
++ }
++ }
++ }
++unlock:
++ rcu_read_unlock();
++ return cpu;
++}
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++static void wake_up_idle_cpu(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ if (cpu == smp_processor_id())
++ return;
++
++ if (set_nr_and_not_polling(rq->idle))
++ smp_send_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static bool wake_up_full_nohz_cpu(int cpu)
++{
++ /*
++ * We just need the target to call irq_exit() and re-evaluate
++ * the next tick. The nohz full kick at least implies that.
++ * If needed we can still optimize that later with an
++ * empty IRQ.
++ */
++ if (tick_nohz_full_cpu(cpu)) {
++ if (cpu != smp_processor_id() ||
++ tick_nohz_tick_stopped())
++ tick_nohz_full_kick_cpu(cpu);
++ return true;
++ }
++
++ return false;
++}
++
++void wake_up_nohz_cpu(int cpu)
++{
++ if (!wake_up_full_nohz_cpu(cpu))
++ wake_up_idle_cpu(cpu);
++}
++
++static inline bool got_nohz_idle_kick(void)
++{
++ int cpu = smp_processor_id();
++
++ if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
++ return false;
++
++ if (idle_cpu(cpu) && !need_resched())
++ return true;
++
++ /*
++ * We can't run Idle Load Balance on this CPU for this time so we
++ * cancel it and clear NOHZ_BALANCE_KICK
++ */
++ clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
++ return false;
++}
++
++#else /* CONFIG_NO_HZ_COMMON */
++
++static inline bool got_nohz_idle_kick(void)
++{
++ return false;
++}
++
++#endif /* CONFIG_NO_HZ_COMMON */
++
++#ifdef CONFIG_NO_HZ_FULL
++bool sched_can_stop_tick(void)
++{
++ /*
++ * FIFO realtime policy runs the highest priority task. Other runnable
++ * tasks are of a lower priority. The scheduler tick does nothing.
++ */
++ if (current->policy == SCHED_FIFO)
++ return true;
++
++ /*
++ * Round-robin realtime tasks time slice with other tasks at the same
++ * realtime priority. Is this task the only one at this priority?
++ */
++ if (current->policy == SCHED_RR) {
++ struct sched_rt_entity *rt_se = &current->rt;
++
++ return rt_se->run_list.prev == rt_se->run_list.next;
++ }
++
++ /*
++ * More than one running task need preemption.
++ * nr_running update is assumed to be visible
++ * after IPI is sent from wakers.
++ */
++ if (this_rq()->nr_running > 1)
++ return false;
++
++ return true;
++}
++#endif /* CONFIG_NO_HZ_FULL */
++
++void sched_avg_update(struct rq *rq)
++{
++ s64 period = sched_avg_period();
++
++ while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
++ /*
++ * Inline assembly required to prevent the compiler
++ * optimising this loop into a divmod call.
++ * See __iter_div_u64_rem() for another example of this.
++ */
++ asm("" : "+rm" (rq->age_stamp));
++ rq->age_stamp += period;
++ rq->rt_avg /= 2;
++ }
++}
++
++#endif /* CONFIG_SMP */
++
++#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
++ (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
++/*
++ * Iterate task_group tree rooted at *from, calling @down when first entering a
++ * node and @up when leaving it for the final time.
++ *
++ * Caller must hold rcu_lock or sufficient equivalent.
++ */
++int walk_tg_tree_from(struct task_group *from,
++ tg_visitor down, tg_visitor up, void *data)
++{
++ struct task_group *parent, *child;
++ int ret;
++
++ parent = from;
++
++down:
++ ret = (*down)(parent, data);
++ if (ret)
++ goto out;
++ list_for_each_entry_rcu(child, &parent->children, siblings) {
++ parent = child;
++ goto down;
++
++up:
++ continue;
++ }
++ ret = (*up)(parent, data);
++ if (ret || parent == from)
++ goto out;
++
++ child = parent;
++ parent = parent->parent;
++ if (parent)
++ goto up;
++out:
++ return ret;
++}
++
++int tg_nop(struct task_group *tg, void *data)
++{
++ return 0;
++}
++#endif
++
++static void set_load_weight(struct task_struct *p)
++{
++ int prio = p->static_prio - MAX_RT_PRIO;
++ struct load_weight *load = &p->se.load;
++
++ /*
++ * SCHED_IDLE tasks get minimal weight:
++ */
++ if (p->policy == SCHED_IDLE) {
++ load->weight = scale_load(WEIGHT_IDLEPRIO);
++ load->inv_weight = WMULT_IDLEPRIO;
++ return;
++ }
++
++ load->weight = scale_load(prio_to_weight[prio]);
++ load->inv_weight = prio_to_wmult[prio];
++}
++
++static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
++{
++ update_rq_clock(rq);
++ sched_info_queued(rq, p);
++ p->sched_class->enqueue_task(rq, p, flags);
++}
++
++static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
++{
++ update_rq_clock(rq);
++ sched_info_dequeued(rq, p);
++ p->sched_class->dequeue_task(rq, p, flags);
++}
++
++void activate_task(struct rq *rq, struct task_struct *p, int flags)
++{
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible--;
++
++ enqueue_task(rq, p, flags);
++}
++
++void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
++{
++ if (task_contributes_to_load(p))
++ rq->nr_uninterruptible++;
++
++ dequeue_task(rq, p, flags);
++}
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
++ s64 steal = 0, irq_delta = 0;
++#endif
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++ irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++ /*
++ * Since irq_time is only updated on {soft,}irq_exit, we might run into
++ * this case when a previous update_rq_clock() happened inside a
++ * {soft,}irq region.
++ *
++ * When this happens, we stop ->clock_task and only update the
++ * prev_irq_time stamp to account for the part that fit, so that a next
++ * update will consume the rest. This ensures ->clock_task is
++ * monotonic.
++ *
++ * It does however cause some slight miss-attribution of {soft,}irq
++ * time, a more accurate solution would be to update the irq_time using
++ * the current rq->clock timestamp, except that would require using
++ * atomic ops.
++ */
++ if (irq_delta > delta)
++ irq_delta = delta;
++
++ rq->prev_irq_time += irq_delta;
++ delta -= irq_delta;
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++ if (static_key_false((&paravirt_steal_rq_enabled))) {
++ steal = paravirt_steal_clock(cpu_of(rq));
++ steal -= rq->prev_steal_time_rq;
++
++ if (unlikely(steal > delta))
++ steal = delta;
++
++ rq->prev_steal_time_rq += steal;
++ delta -= steal;
++ }
++#endif
++
++ rq->clock_task += delta;
++
++#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
++ if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
++ sched_rt_avg_update(rq, irq_delta + steal);
++#endif
++}
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
++ struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++ if (stop) {
++ /*
++ * Make it appear like a SCHED_FIFO task, its something
++ * userspace knows about and won't get confused about.
++ *
++ * Also, it will make PI more or less work without too
++ * much confusion -- but then, stop work should not
++ * rely on PI working anyway.
++ */
++ sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
++
++ stop->sched_class = &stop_sched_class;
++ }
++
++ cpu_rq(cpu)->stop = stop;
++
++ if (old_stop) {
++ /*
++ * Reset it back to a normal scheduling class so that
++ * it can die in pieces.
++ */
++ old_stop->sched_class = &rt_sched_class;
++ }
++}
++
++/*
++ * __normal_prio - return the priority that is based on the static prio
++ */
++static inline int __normal_prio(struct task_struct *p)
++{
++ return p->static_prio;
++}
++
++/*
++ * Calculate the expected normal priority: i.e. priority
++ * without taking RT-inheritance into account. Might be
++ * boosted by interactivity modifiers. Changes upon fork,
++ * setprio syscalls, and whenever the interactivity
++ * estimator recalculates.
++ */
++static inline int normal_prio(struct task_struct *p)
++{
++ int prio;
++
++ if (task_has_dl_policy(p))
++ prio = MAX_DL_PRIO-1;
++ else if (task_has_rt_policy(p))
++ prio = MAX_RT_PRIO-1 - p->rt_priority;
++ else
++ prio = __normal_prio(p);
++ return prio;
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks, or might be boosted by
++ * interactivity modifiers. Will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++ p->normal_prio = normal_prio(p);
++ /*
++ * If we are RT tasks or we were boosted to RT priority,
++ * keep the priority unchanged. Otherwise, update priority
++ * to the normal priority:
++ */
++ if (!rt_prio(p->prio))
++ return p->normal_prio;
++ return p->prio;
++}
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++ return cpu_curr(task_cpu(p)) == p;
++}
++
++/*
++ * Can drop rq->lock because from sched_class::switched_from() methods drop it.
++ */
++static inline void check_class_changed(struct rq *rq, struct task_struct *p,
++ const struct sched_class *prev_class,
++ int oldprio)
++{
++ if (prev_class != p->sched_class) {
++ if (prev_class->switched_from)
++ prev_class->switched_from(rq, p);
++ /* Possble rq->lock 'hole'. */
++ p->sched_class->switched_to(rq, p);
++ } else if (oldprio != p->prio || dl_task(p))
++ p->sched_class->prio_changed(rq, p, oldprio);
++}
++
++void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
++{
++ const struct sched_class *class;
++
++ if (p->sched_class == rq->curr->sched_class) {
++ rq->curr->sched_class->check_preempt_curr(rq, p, flags);
++ } else {
++ for_each_class(class) {
++ if (class == rq->curr->sched_class)
++ break;
++ if (class == p->sched_class) {
++ resched_curr(rq);
++ break;
++ }
++ }
++ }
++
++ /*
++ * A queue event has occurred, and we're going to schedule. In
++ * this case, we can save a useless back to back clock update.
++ */
++ if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
++ rq_clock_skip_update(rq, true);
++}
++
++#ifdef CONFIG_SMP
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++#ifdef CONFIG_SCHED_DEBUG
++ /*
++ * We should never call set_task_cpu() on a blocked task,
++ * ttwu() will sort out the placement.
++ */
++ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
++ !p->on_rq);
++
++#ifdef CONFIG_LOCKDEP
++ /*
++ * The caller should hold either p->pi_lock or rq->lock, when changing
++ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++ *
++ * sched_move_task() holds both and thus holding either pins the cgroup,
++ * see task_group().
++ *
++ * Furthermore, all task_rq users should acquire both locks, see
++ * task_rq_lock().
++ */
++ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++ lockdep_is_held(&task_rq(p)->lock)));
++#endif
++#endif
++
++ trace_sched_migrate_task(p, new_cpu);
++
++ if (task_cpu(p) != new_cpu) {
++ if (p->sched_class->migrate_task_rq)
++ p->sched_class->migrate_task_rq(p, new_cpu);
++ p->se.nr_migrations++;
++ perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
++ }
++
++ __set_task_cpu(p, new_cpu);
++}
++
++static void __migrate_swap_task(struct task_struct *p, int cpu)
++{
++ if (task_on_rq_queued(p)) {
++ struct rq *src_rq, *dst_rq;
++
++ src_rq = task_rq(p);
++ dst_rq = cpu_rq(cpu);
++
++ deactivate_task(src_rq, p, 0);
++ set_task_cpu(p, cpu);
++ activate_task(dst_rq, p, 0);
++ check_preempt_curr(dst_rq, p, 0);
++ } else {
++ /*
++ * Task isn't running anymore; make it appear like we migrated
++ * it before it went to sleep. This means on wakeup we make the
++ * previous cpu our targer instead of where it really is.
++ */
++ p->wake_cpu = cpu;
++ }
++}
++
++struct migration_swap_arg {
++ struct task_struct *src_task, *dst_task;
++ int src_cpu, dst_cpu;
++};
++
++static int migrate_swap_stop(void *data)
++{
++ struct migration_swap_arg *arg = data;
++ struct rq *src_rq, *dst_rq;
++ int ret = -EAGAIN;
++
++ src_rq = cpu_rq(arg->src_cpu);
++ dst_rq = cpu_rq(arg->dst_cpu);
++
++ double_raw_lock(&arg->src_task->pi_lock,
++ &arg->dst_task->pi_lock);
++ double_rq_lock(src_rq, dst_rq);
++ if (task_cpu(arg->dst_task) != arg->dst_cpu)
++ goto unlock;
++
++ if (task_cpu(arg->src_task) != arg->src_cpu)
++ goto unlock;
++
++ if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
++ goto unlock;
++
++ if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
++ goto unlock;
++
++ __migrate_swap_task(arg->src_task, arg->dst_cpu);
++ __migrate_swap_task(arg->dst_task, arg->src_cpu);
++
++ ret = 0;
++
++unlock:
++ double_rq_unlock(src_rq, dst_rq);
++ raw_spin_unlock(&arg->dst_task->pi_lock);
++ raw_spin_unlock(&arg->src_task->pi_lock);
++
++ return ret;
++}
++
++/*
++ * Cross migrate two tasks
++ */
++int migrate_swap(struct task_struct *cur, struct task_struct *p)
++{
++ struct migration_swap_arg arg;
++ int ret = -EINVAL;
++
++ arg = (struct migration_swap_arg){
++ .src_task = cur,
++ .src_cpu = task_cpu(cur),
++ .dst_task = p,
++ .dst_cpu = task_cpu(p),
++ };
++
++ if (arg.src_cpu == arg.dst_cpu)
++ goto out;
++
++ /*
++ * These three tests are all lockless; this is OK since all of them
++ * will be re-checked with proper locks held further down the line.
++ */
++ if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
++ goto out;
++
++ if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
++ goto out;
++
++ if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
++ goto out;
++
++ trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
++ ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
++
++out:
++ return ret;
++}
++
++struct migration_arg {
++ struct task_struct *task;
++ int dest_cpu;
++};
++
++static int migration_cpu_stop(void *data);
++
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * If @match_state is nonzero, it's the @p->state value just checked and
++ * not expected to change. If it changes, i.e. @p might have woken up,
++ * then return zero. When we succeed in waiting for @p to be off its CPU,
++ * we return a positive number (its total switch count). If a second call
++ * a short while later returns the same number, the caller can be sure that
++ * @p has remained unscheduled the whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, long match_state)
++{
++ unsigned long flags;
++ int running, queued;
++ unsigned long ncsw;
++ struct rq *rq;
++
++ for (;;) {
++ /*
++ * We do the initial early heuristics without holding
++ * any task-queue locks at all. We'll only try to get
++ * the runqueue lock when things look like they will
++ * work out!
++ */
++ rq = task_rq(p);
++
++ /*
++ * If the task is actively running on another CPU
++ * still, just relax and busy-wait without holding
++ * any locks.
++ *
++ * NOTE! Since we don't hold any locks, it's not
++ * even sure that "rq" stays as the right runqueue!
++ * But we don't care, since "task_running()" will
++ * return false if the runqueue has changed and p
++ * is actually now running somewhere else!
++ */
++ while (task_running(rq, p)) {
++ if (match_state && unlikely(p->state != match_state))
++ return 0;
++ cpu_relax();
++ }
++
++ /*
++ * Ok, time to look more closely! We need the rq
++ * lock now, to be *sure*. If we're wrong, we'll
++ * just go back and repeat.
++ */
++ rq = task_rq_lock(p, &flags);
++ trace_sched_wait_task(p);
++ running = task_running(rq, p);
++ queued = task_on_rq_queued(p);
++ ncsw = 0;
++ if (!match_state || p->state == match_state)
++ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++ task_rq_unlock(rq, p, &flags);
++
++ /*
++ * If it changed from the expected state, bail out now.
++ */
++ if (unlikely(!ncsw))
++ break;
++
++ /*
++ * Was it really running after all now that we
++ * checked with the proper locks actually held?
++ *
++ * Oops. Go back and try again..
++ */
++ if (unlikely(running)) {
++ cpu_relax();
++ continue;
++ }
++
++ /*
++ * It's not enough that it's not actively running,
++ * it must be off the runqueue _entirely_, and not
++ * preempted!
++ *
++ * So if it was still runnable (but just not actively
++ * running right now), it's preempted, and we should
++ * yield - it could be a while.
++ */
++ if (unlikely(queued)) {
++ ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
++
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_hrtimeout(&to, HRTIMER_MODE_REL);
++ continue;
++ }
++
++ /*
++ * Ahh, all good. It wasn't running, and it wasn't
++ * runnable, which means that it will never become
++ * running in the future either. We're all done!
++ */
++ break;
++ }
++
++ return ncsw;
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++ int cpu;
++
++ preempt_disable();
++ cpu = task_cpu(p);
++ if ((cpu != smp_processor_id()) && task_curr(p))
++ smp_send_reschedule(cpu);
++ preempt_enable();
++}
++EXPORT_SYMBOL_GPL(kick_process);
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_SMP
++/*
++ * ->cpus_allowed is protected by both rq->lock and p->pi_lock
++ */
++static int select_fallback_rq(int cpu, struct task_struct *p)
++{
++ int nid = cpu_to_node(cpu);
++ const struct cpumask *nodemask = NULL;
++ enum { cpuset, possible, fail } state = cpuset;
++ int dest_cpu;
++
++ /*
++ * If the node that the cpu is on has been offlined, cpu_to_node()
++ * will return -1. There is no cpu on the node, and we should
++ * select the cpu on the other node.
++ */
++ if (nid != -1) {
++ nodemask = cpumask_of_node(nid);
++
++ /* Look for allowed, online CPU in same node. */
++ for_each_cpu(dest_cpu, nodemask) {
++ if (!cpu_online(dest_cpu))
++ continue;
++ if (!cpu_active(dest_cpu))
++ continue;
++ if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
++ return dest_cpu;
++ }
++ }
++
++ for (;;) {
++ /* Any allowed, online CPU? */
++ for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
++ if (!cpu_online(dest_cpu))
++ continue;
++ if (!cpu_active(dest_cpu))
++ continue;
++ goto out;
++ }
++
++ switch (state) {
++ case cpuset:
++ /* No more Mr. Nice Guy. */
++ cpuset_cpus_allowed_fallback(p);
++ state = possible;
++ break;
++
++ case possible:
++ do_set_cpus_allowed(p, cpu_possible_mask);
++ state = fail;
++ break;
++
++ case fail:
++ BUG();
++ break;
++ }
++ }
++
++out:
++ if (state != cpuset) {
++ /*
++ * Don't tell them about moving exiting tasks or
++ * kernel threads (both mm NULL), since they never
++ * leave kernel.
++ */
++ if (p->mm && printk_ratelimit()) {
++ printk_deferred("process %d (%s) no longer affine to cpu%d\n",
++ task_pid_nr(p), p->comm, cpu);
++ }
++ }
++
++ return dest_cpu;
++}
++
++/*
++ * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
++ */
++static inline
++int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
++{
++ if (p->nr_cpus_allowed > 1)
++ cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
++
++ /*
++ * In order not to call set_task_cpu() on a blocking task we need
++ * to rely on ttwu() to place the task on a valid ->cpus_allowed
++ * cpu.
++ *
++ * Since this is common to all placement strategies, this lives here.
++ *
++ * [ this allows ->select_task() to simply return task_cpu(p) and
++ * not worry about this generic constraint ]
++ */
++ if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
++ !cpu_online(cpu)))
++ cpu = select_fallback_rq(task_cpu(p), p);
++
++ return cpu;
++}
++
++static void update_avg(u64 *avg, u64 sample)
++{
++ s64 diff = sample - *avg;
++ *avg += diff >> 3;
++}
++#endif
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++#ifdef CONFIG_SCHEDSTATS
++ struct rq *rq = this_rq();
++
++#ifdef CONFIG_SMP
++ int this_cpu = smp_processor_id();
++
++ if (cpu == this_cpu) {
++ schedstat_inc(rq, ttwu_local);
++ schedstat_inc(p, se.statistics.nr_wakeups_local);
++ } else {
++ struct sched_domain *sd;
++
++ schedstat_inc(p, se.statistics.nr_wakeups_remote);
++ rcu_read_lock();
++ for_each_domain(this_cpu, sd) {
++ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
++ schedstat_inc(sd, ttwu_wake_remote);
++ break;
++ }
++ }
++ rcu_read_unlock();
++ }
++
++ if (wake_flags & WF_MIGRATED)
++ schedstat_inc(p, se.statistics.nr_wakeups_migrate);
++
++#endif /* CONFIG_SMP */
++
++ schedstat_inc(rq, ttwu_count);
++ schedstat_inc(p, se.statistics.nr_wakeups);
++
++ if (wake_flags & WF_SYNC)
++ schedstat_inc(p, se.statistics.nr_wakeups_sync);
++
++#endif /* CONFIG_SCHEDSTATS */
++}
++
++static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
++{
++ activate_task(rq, p, en_flags);
++ p->on_rq = TASK_ON_RQ_QUEUED;
++
++ /* if a worker is waking up, notify workqueue */
++ if (p->flags & PF_WQ_WORKER)
++ wq_worker_waking_up(p, cpu_of(rq));
++}
++
++/*
++ * Mark the task runnable and perform wakeup-preemption.
++ */
++static void
++ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++ check_preempt_curr(rq, p, wake_flags);
++ trace_sched_wakeup(p, true);
++
++ p->state = TASK_RUNNING;
++#ifdef CONFIG_SMP
++ if (p->sched_class->task_woken)
++ p->sched_class->task_woken(rq, p);
++
++ if (rq->idle_stamp) {
++ u64 delta = rq_clock(rq) - rq->idle_stamp;
++ u64 max = 2*rq->max_idle_balance_cost;
++
++ update_avg(&rq->avg_idle, delta);
++
++ if (rq->avg_idle > max)
++ rq->avg_idle = max;
++
++ rq->idle_stamp = 0;
++ }
++#endif
++}
++
++static void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++#ifdef CONFIG_SMP
++ if (p->sched_contributes_to_load)
++ rq->nr_uninterruptible--;
++#endif
++
++ ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
++ ttwu_do_wakeup(rq, p, wake_flags);
++}
++
++/*
++ * Called in case the task @p isn't fully descheduled from its runqueue,
++ * in this case we must do a remote wakeup. Its a 'light' wakeup though,
++ * since all we need to do is flip p->state to TASK_RUNNING, since
++ * the task is still ->on_rq.
++ */
++static int ttwu_remote(struct task_struct *p, int wake_flags)
++{
++ struct rq *rq;
++ int ret = 0;
++
++ rq = __task_rq_lock(p);
++ if (task_on_rq_queued(p)) {
++ /* check_preempt_curr() may use rq clock */
++ update_rq_clock(rq);
++ ttwu_do_wakeup(rq, p, wake_flags);
++ ret = 1;
++ }
++ __task_rq_unlock(rq);
++
++ return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void)
++{
++ struct rq *rq = this_rq();
++ struct llist_node *llist = llist_del_all(&rq->wake_list);
++ struct task_struct *p;
++ unsigned long flags;
++
++ if (!llist)
++ return;
++
++ raw_spin_lock_irqsave(&rq->lock, flags);
++
++ while (llist) {
++ p = llist_entry(llist, struct task_struct, wake_entry);
++ llist = llist_next(llist);
++ ttwu_do_activate(rq, p, 0);
++ }
++
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++void scheduler_ipi(void)
++{
++ /*
++ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
++ * TIF_NEED_RESCHED remotely (for the first time) will also send
++ * this IPI.
++ */
++ preempt_fold_need_resched();
++
++ if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
++ return;
++
++ /*
++ * Not all reschedule IPI handlers call irq_enter/irq_exit, since
++ * traditionally all their work was done from the interrupt return
++ * path. Now that we actually do some work, we need to make sure
++ * we do call them.
++ *
++ * Some archs already do call them, luckily irq_enter/exit nest
++ * properly.
++ *
++ * Arguably we should visit all archs and update all handlers,
++ * however a fair share of IPIs are still resched only so this would
++ * somewhat pessimize the simple resched case.
++ */
++ irq_enter();
++ sched_ttwu_pending();
++
++ /*
++ * Check if someone kicked us for doing the nohz idle load balance.
++ */
++ if (unlikely(got_nohz_idle_kick())) {
++ this_rq()->idle_balance = 1;
++ raise_softirq_irqoff(SCHED_SOFTIRQ);
++ }
++ irq_exit();
++}
++
++static void ttwu_queue_remote(struct task_struct *p, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
++ if (!set_nr_if_polling(rq->idle))
++ smp_send_reschedule(cpu);
++ else
++ trace_sched_wake_idle_without_ipi(cpu);
++ }
++}
++
++void wake_up_if_idle(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ rcu_read_lock();
++
++ if (!is_idle_task(rcu_dereference(rq->curr)))
++ goto out;
++
++ if (set_nr_if_polling(rq->idle)) {
++ trace_sched_wake_idle_without_ipi(cpu);
++ } else {
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ if (is_idle_task(rq->curr))
++ smp_send_reschedule(cpu);
++ /* Else cpu is not in idle, do nothing here */
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++ }
++
++out:
++ rcu_read_unlock();
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++ return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
++}
++#endif /* CONFIG_SMP */
++
++static void ttwu_queue(struct task_struct *p, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++#if defined(CONFIG_SMP)
++ if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
++ sched_clock_cpu(cpu); /* sync clocks x-cpu */
++ ttwu_queue_remote(p, cpu);
++ return;
++ }
++#endif
++
++ raw_spin_lock(&rq->lock);
++ ttwu_do_activate(rq, p, 0);
++ raw_spin_unlock(&rq->lock);
++}
++
++/**
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Put it on the run-queue if it's not already there. The "current"
++ * thread is always on the run-queue (except when the actual
++ * re-schedule is in progress), and as such you're allowed to do
++ * the simpler "current->state = TASK_RUNNING" to mark yourself
++ * runnable without the overhead of this.
++ *
++ * Return: %true if @p was woken up, %false if it was already running.
++ * or @state didn't match @p's state.
++ */
++static int
++try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
++{
++ unsigned long flags;
++ int cpu, success = 0;
++
++ /*
++ * If we are going to wake up a thread waiting for CONDITION we
++ * need to ensure that CONDITION=1 done by the caller can not be
++ * reordered with p->state check below. This pairs with mb() in
++ * set_current_state() the waiting thread does.
++ */
++ smp_mb__before_spinlock();
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ if (!(p->state & state))
++ goto out;
++
++ success = 1; /* we're going to change ->state */
++ cpu = task_cpu(p);
++
++ if (p->on_rq && ttwu_remote(p, wake_flags))
++ goto stat;
++
++#ifdef CONFIG_SMP
++ /*
++ * If the owning (remote) cpu is still in the middle of schedule() with
++ * this task as prev, wait until its done referencing the task.
++ */
++ while (p->on_cpu)
++ cpu_relax();
++ /*
++ * Pairs with the smp_wmb() in finish_lock_switch().
++ */
++ smp_rmb();
++
++ p->sched_contributes_to_load = !!task_contributes_to_load(p);
++ p->state = TASK_WAKING;
++
++ if (p->sched_class->task_waking)
++ p->sched_class->task_waking(p);
++
++ cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
++ if (task_cpu(p) != cpu) {
++ wake_flags |= WF_MIGRATED;
++ set_task_cpu(p, cpu);
++ }
++#endif /* CONFIG_SMP */
++
++ ttwu_queue(p, cpu);
++stat:
++ ttwu_stat(p, cpu, wake_flags);
++out:
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++ return success;
++}
++
++/**
++ * try_to_wake_up_local - try to wake up a local task with rq lock held
++ * @p: the thread to be awakened
++ *
++ * Put @p on the run-queue if it's not already there. The caller must
++ * ensure that this_rq() is locked, @p is bound to this_rq() and not
++ * the current task.
++ */
++static void try_to_wake_up_local(struct task_struct *p)
++{
++ struct rq *rq = task_rq(p);
++
++ if (WARN_ON_ONCE(rq != this_rq()) ||
++ WARN_ON_ONCE(p == current))
++ return;
++
++ lockdep_assert_held(&rq->lock);
++
++ if (!raw_spin_trylock(&p->pi_lock)) {
++ raw_spin_unlock(&rq->lock);
++ raw_spin_lock(&p->pi_lock);
++ raw_spin_lock(&rq->lock);
++ }
++
++ if (!(p->state & TASK_NORMAL))
++ goto out;
++
++ if (!task_on_rq_queued(p))
++ ttwu_activate(rq, p, ENQUEUE_WAKEUP);
++
++ ttwu_do_wakeup(rq, p, 0);
++ ttwu_stat(p, smp_processor_id(), 0);
++out:
++ raw_spin_unlock(&p->pi_lock);
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * It may be assumed that this function implies a write memory barrier before
++ * changing the task state if and only if any tasks are woken up.
++ */
++int wake_up_process(struct task_struct *p)
++{
++ WARN_ON(task_is_stopped_or_traced(p));
++ return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++ return try_to_wake_up(p, state, 0);
++}
++
++/*
++ * This function clears the sched_dl_entity static params.
++ */
++void __dl_clear_params(struct task_struct *p)
++{
++ struct sched_dl_entity *dl_se = &p->dl;
++
++ dl_se->dl_runtime = 0;
++ dl_se->dl_deadline = 0;
++ dl_se->dl_period = 0;
++ dl_se->flags = 0;
++ dl_se->dl_bw = 0;
++
++ dl_se->dl_throttled = 0;
++ dl_se->dl_new = 1;
++ dl_se->dl_yielded = 0;
++}
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ *
++ * __sched_fork() is basic setup used by init_idle() too:
++ */
++static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++ p->on_rq = 0;
++
++ p->se.on_rq = 0;
++ p->se.exec_start = 0;
++ p->se.sum_exec_runtime = 0;
++ p->se.prev_sum_exec_runtime = 0;
++ p->se.nr_migrations = 0;
++ p->se.vruntime = 0;
++#ifdef CONFIG_SMP
++ p->se.avg.decay_count = 0;
++#endif
++ INIT_LIST_HEAD(&p->se.group_node);
++
++#ifdef CONFIG_SCHEDSTATS
++ memset(&p->se.statistics, 0, sizeof(p->se.statistics));
++#endif
++
++ RB_CLEAR_NODE(&p->dl.rb_node);
++ init_dl_task_timer(&p->dl);
++ __dl_clear_params(p);
++
++ INIT_LIST_HEAD(&p->rt.run_list);
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++ INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++
++#ifdef CONFIG_NUMA_BALANCING
++ if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
++ p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
++ p->mm->numa_scan_seq = 0;
++ }
++
++ if (clone_flags & CLONE_VM)
++ p->numa_preferred_nid = current->numa_preferred_nid;
++ else
++ p->numa_preferred_nid = -1;
++
++ p->node_stamp = 0ULL;
++ p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
++ p->numa_scan_period = sysctl_numa_balancing_scan_delay;
++ p->numa_work.next = &p->numa_work;
++ p->numa_faults = NULL;
++ p->last_task_numa_placement = 0;
++ p->last_sum_exec_runtime = 0;
++
++ p->numa_group = NULL;
++#endif /* CONFIG_NUMA_BALANCING */
++}
++
++#ifdef CONFIG_NUMA_BALANCING
++#ifdef CONFIG_SCHED_DEBUG
++void set_numabalancing_state(bool enabled)
++{
++ if (enabled)
++ sched_feat_set("NUMA");
++ else
++ sched_feat_set("NO_NUMA");
++}
++#else
++__read_mostly bool numabalancing_enabled;
++
++void set_numabalancing_state(bool enabled)
++{
++ numabalancing_enabled = enabled;
++}
++#endif /* CONFIG_SCHED_DEBUG */
++
++#ifdef CONFIG_PROC_SYSCTL
++int sysctl_numa_balancing(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ struct ctl_table t;
++ int err;
++ int state = numabalancing_enabled;
++
++ if (write && !capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ t = *table;
++ t.data = &state;
++ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++ if (err < 0)
++ return err;
++ if (write)
++ set_numabalancing_state(state);
++ return err;
++}
++#endif
++#endif
++
++/*
++ * fork()/clone()-time setup:
++ */
++int sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++ unsigned long flags;
++ int cpu = get_cpu();
++
++ __sched_fork(clone_flags, p);
++ /*
++ * We mark the process as running here. This guarantees that
++ * nobody will actually run it, and a signal or other external
++ * event cannot wake it up and insert it on the runqueue either.
++ */
++ p->state = TASK_RUNNING;
++
++ /*
++ * Make sure we do not leak PI boosting priority to the child.
++ */
++ p->prio = current->normal_prio;
++
++ /*
++ * Revert to default priority/policy on fork if requested.
++ */
++ if (unlikely(p->sched_reset_on_fork)) {
++ if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
++ p->policy = SCHED_NORMAL;
++ p->static_prio = NICE_TO_PRIO(0);
++ p->rt_priority = 0;
++ } else if (PRIO_TO_NICE(p->static_prio) < 0)
++ p->static_prio = NICE_TO_PRIO(0);
++
++ p->prio = p->normal_prio = __normal_prio(p);
++ set_load_weight(p);
++
++ /*
++ * We don't need the reset flag anymore after the fork. It has
++ * fulfilled its duty:
++ */
++ p->sched_reset_on_fork = 0;
++ }
++
++ if (dl_prio(p->prio)) {
++ put_cpu();
++ return -EAGAIN;
++ } else if (rt_prio(p->prio)) {
++ p->sched_class = &rt_sched_class;
++ } else {
++ p->sched_class = &fair_sched_class;
++ }
++
++ if (p->sched_class->task_fork)
++ p->sched_class->task_fork(p);
++
++ /*
++ * The child is not yet in the pid-hash so no cgroup attach races,
++ * and the cgroup is pinned to this child due to cgroup_fork()
++ * is ran before sched_fork().
++ *
++ * Silence PROVE_RCU.
++ */
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ set_task_cpu(p, cpu);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
++ if (likely(sched_info_on()))
++ memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++#if defined(CONFIG_SMP)
++ p->on_cpu = 0;
++#endif
++ init_task_preempt_count(p);
++#ifdef CONFIG_SMP
++ plist_node_init(&p->pushable_tasks, MAX_PRIO);
++ RB_CLEAR_NODE(&p->pushable_dl_tasks);
++#endif
++
++ put_cpu();
++ return 0;
++}
++
++unsigned long to_ratio(u64 period, u64 runtime)
++{
++ if (runtime == RUNTIME_INF)
++ return 1ULL << 20;
++
++ /*
++ * Doing this here saves a lot of checks in all
++ * the calling paths, and returning zero seems
++ * safe for them anyway.
++ */
++ if (period == 0)
++ return 0;
++
++ return div64_u64(runtime << 20, period);
++}
++
++#ifdef CONFIG_SMP
++inline struct dl_bw *dl_bw_of(int i)
++{
++ rcu_lockdep_assert(rcu_read_lock_sched_held(),
++ "sched RCU must be held");
++ return &cpu_rq(i)->rd->dl_bw;
++}
++
++static inline int dl_bw_cpus(int i)
++{
++ struct root_domain *rd = cpu_rq(i)->rd;
++ int cpus = 0;
++
++ rcu_lockdep_assert(rcu_read_lock_sched_held(),
++ "sched RCU must be held");
++ for_each_cpu_and(i, rd->span, cpu_active_mask)
++ cpus++;
++
++ return cpus;
++}
++#else
++inline struct dl_bw *dl_bw_of(int i)
++{
++ return &cpu_rq(i)->dl.dl_bw;
++}
++
++static inline int dl_bw_cpus(int i)
++{
++ return 1;
++}
++#endif
++
++/*
++ * We must be sure that accepting a new task (or allowing changing the
++ * parameters of an existing one) is consistent with the bandwidth
++ * constraints. If yes, this function also accordingly updates the currently
++ * allocated bandwidth to reflect the new situation.
++ *
++ * This function is called while holding p's rq->lock.
++ *
++ * XXX we should delay bw change until the task's 0-lag point, see
++ * __setparam_dl().
++ */
++static int dl_overflow(struct task_struct *p, int policy,
++ const struct sched_attr *attr)
++{
++
++ struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
++ u64 period = attr->sched_period ?: attr->sched_deadline;
++ u64 runtime = attr->sched_runtime;
++ u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
++ int cpus, err = -1;
++
++ if (new_bw == p->dl.dl_bw)
++ return 0;
++
++ /*
++ * Either if a task, enters, leave, or stays -deadline but changes
++ * its parameters, we may need to update accordingly the total
++ * allocated bandwidth of the container.
++ */
++ raw_spin_lock(&dl_b->lock);
++ cpus = dl_bw_cpus(task_cpu(p));
++ if (dl_policy(policy) && !task_has_dl_policy(p) &&
++ !__dl_overflow(dl_b, cpus, 0, new_bw)) {
++ __dl_add(dl_b, new_bw);
++ err = 0;
++ } else if (dl_policy(policy) && task_has_dl_policy(p) &&
++ !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
++ __dl_clear(dl_b, p->dl.dl_bw);
++ __dl_add(dl_b, new_bw);
++ err = 0;
++ } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
++ __dl_clear(dl_b, p->dl.dl_bw);
++ err = 0;
++ }
++ raw_spin_unlock(&dl_b->lock);
++
++ return err;
++}
++
++extern void init_dl_bw(struct dl_bw *dl_b);
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++ unsigned long flags;
++ struct rq *rq;
++
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++#ifdef CONFIG_SMP
++ /*
++ * Fork balancing, do it here and not earlier because:
++ * - cpus_allowed can change in the fork path
++ * - any previously selected cpu might disappear through hotplug
++ */
++ set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
++#endif
++
++ /* Initialize new task's runnable average */
++ init_task_runnable_average(p);
++ rq = __task_rq_lock(p);
++ activate_task(rq, p, 0);
++ p->on_rq = TASK_ON_RQ_QUEUED;
++ trace_sched_wakeup_new(p, true);
++ check_preempt_curr(rq, p, WF_FORK);
++#ifdef CONFIG_SMP
++ if (p->sched_class->task_woken)
++ p->sched_class->task_woken(rq, p);
++#endif
++ task_rq_unlock(rq, p, &flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++ hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++ hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++ struct preempt_notifier *notifier;
++
++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++ notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++ struct preempt_notifier *notifier;
++
++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++ notifier->ops->sched_out(notifier, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++ struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @prev: the current task that is being switched out
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ trace_sched_switch(prev, next);
++ sched_info_switch(rq, prev, next);
++ perf_event_task_sched_out(prev, next);
++ fire_sched_out_preempt_notifiers(prev, next);
++ prepare_lock_switch(rq, next);
++ prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock. (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static struct rq *finish_task_switch(struct task_struct *prev)
++ __releases(rq->lock)
++{
++ struct rq *rq = this_rq();
++ struct mm_struct *mm = rq->prev_mm;
++ long prev_state;
++
++ rq->prev_mm = NULL;
++
++ /*
++ * A task struct has one reference for the use as "current".
++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++ * schedule one last time. The schedule call will never return, and
++ * the scheduled task must drop that reference.
++ * The test for TASK_DEAD must occur while the runqueue locks are
++ * still held, otherwise prev could be scheduled on another cpu, die
++ * there before we look at prev->state, and then the reference would
++ * be dropped twice.
++ * Manfred Spraul <manfred@colorfullife.com>
++ */
++ prev_state = prev->state;
++ vtime_task_switch(prev);
++ finish_arch_switch(prev);
++ perf_event_task_sched_in(prev, current);
++ finish_lock_switch(rq, prev);
++ finish_arch_post_lock_switch();
++
++ fire_sched_in_preempt_notifiers(current);
++ if (mm)
++ mmdrop(mm);
++ if (unlikely(prev_state == TASK_DEAD)) {
++ if (prev->sched_class->task_dead)
++ prev->sched_class->task_dead(prev);
++
++ /*
++ * Remove function-return probe instances associated with this
++ * task and put them back on the free list.
++ */
++ kprobe_flush_task(prev);
++ put_task_struct(prev);
++ }
++
++ tick_nohz_task_switch(current);
++ return rq;
++}
++
++#ifdef CONFIG_SMP
++
++/* rq->lock is NOT held, but preemption is disabled */
++static inline void post_schedule(struct rq *rq)
++{
++ if (rq->post_schedule) {
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ if (rq->curr->sched_class->post_schedule)
++ rq->curr->sched_class->post_schedule(rq);
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++ rq->post_schedule = 0;
++ }
++}
++
++#else
++
++static inline void post_schedule(struct rq *rq)
++{
++}
++
++#endif
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++ __releases(rq->lock)
++{
++ struct rq *rq;
++
++ /* finish_task_switch() drops rq->lock and enables preemtion */
++ preempt_disable();
++ rq = finish_task_switch(prev);
++ post_schedule(rq);
++ preempt_enable();
++
++ if (current->set_child_tid)
++ put_user(task_pid_vnr(current), current->set_child_tid);
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static inline struct rq *
++context_switch(struct rq *rq, struct task_struct *prev,
++ struct task_struct *next)
++{
++ struct mm_struct *mm, *oldmm;
++
++ prepare_task_switch(rq, prev, next);
++
++ mm = next->mm;
++ oldmm = prev->active_mm;
++ /*
++ * For paravirt, this is coupled with an exit in switch_to to
++ * combine the page table reload and the switch backend into
++ * one hypercall.
++ */
++ arch_start_context_switch(prev);
++
++ if (!mm) {
++ next->active_mm = oldmm;
++ atomic_inc(&oldmm->mm_count);
++ enter_lazy_tlb(oldmm, next);
++ } else
++ switch_mm(oldmm, mm, next);
++
++ if (!prev->mm) {
++ prev->active_mm = NULL;
++ rq->prev_mm = oldmm;
++ }
++ /*
++ * Since the runqueue lock will be released by the next
++ * task (which is an invalid locking op but in the case
++ * of the scheduler it's an obvious special-case), so we
++ * do an early lockdep release here:
++ */
++ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
++
++ context_tracking_task_switch(prev, next);
++ /* Here we just switch the register state and the stack. */
++ switch_to(prev, next, prev);
++ barrier();
++
++ return finish_task_switch(prev);
++}
++
++/*
++ * nr_running and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned long nr_running(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_online_cpu(i)
++ sum += cpu_rq(i)->nr_running;
++
++ return sum;
++}
++
++/*
++ * Check if only the current task is running on the cpu.
++ */
++bool single_task_running(void)
++{
++ if (cpu_rq(smp_processor_id())->nr_running == 1)
++ return true;
++ else
++ return false;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches(void)
++{
++ int i;
++ unsigned long long sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += cpu_rq(i)->nr_switches;
++
++ return sum;
++}
++
++unsigned long nr_iowait(void)
++{
++ unsigned long i, sum = 0;
++
++ for_each_possible_cpu(i)
++ sum += atomic_read(&cpu_rq(i)->nr_iowait);
++
++ return sum;
++}
++
++unsigned long nr_iowait_cpu(int cpu)
++{
++ struct rq *this = cpu_rq(cpu);
++ return atomic_read(&this->nr_iowait);
++}
++
++void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
++{
++ struct rq *this = this_rq();
++ *nr_waiters = atomic_read(&this->nr_iowait);
++ *load = this->cpu_load[0];
++}
++
++#ifdef CONFIG_SMP
++
++/*
++ * sched_exec - execve() is a valuable balancing opportunity, because at
++ * this point the task has the smallest effective memory and cache footprint.
++ */
++void sched_exec(void)
++{
++ struct task_struct *p = current;
++ unsigned long flags;
++ int dest_cpu;
++
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
++ if (dest_cpu == smp_processor_id())
++ goto unlock;
++
++ if (likely(cpu_active(dest_cpu))) {
++ struct migration_arg arg = { p, dest_cpu };
++
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
++ return;
++ }
++unlock:
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#endif
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++/*
++ * Return accounted runtime for the task.
++ * In case the task is currently running, return the runtime plus current's
++ * pending runtime that have not been accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++ unsigned long flags;
++ struct rq *rq;
++ u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++ /*
++ * 64-bit doesn't need locks to atomically read a 64bit value.
++ * So we have a optimization chance when the task's delta_exec is 0.
++ * Reading ->on_cpu is racy, but this is ok.
++ *
++ * If we race with it leaving cpu, we'll take a lock. So we're correct.
++ * If we race with it entering cpu, unaccounted time is 0. This is
++ * indistinguishable from the read occurring a few cycles earlier.
++ * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++ * been accounted, so we're correct here as well.
++ */
++ if (!p->on_cpu || !task_on_rq_queued(p))
++ return p->se.sum_exec_runtime;
++#endif
++
++ rq = task_rq_lock(p, &flags);
++ /*
++ * Must be ->curr _and_ ->on_rq. If dequeued, we would
++ * project cycles that may never be accounted to this
++ * thread, breaking clock_gettime().
++ */
++ if (task_current(rq, p) && task_on_rq_queued(p)) {
++ update_rq_clock(rq);
++ p->sched_class->update_curr(rq);
++ }
++ ns = p->se.sum_exec_runtime;
++ task_rq_unlock(rq, p, &flags);
++
++ return ns;
++}
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++ int cpu = smp_processor_id();
++ struct rq *rq = cpu_rq(cpu);
++ struct task_struct *curr = rq->curr;
++
++ sched_clock_tick();
++
++ raw_spin_lock(&rq->lock);
++ update_rq_clock(rq);
++ curr->sched_class->task_tick(rq, curr, 0);
++ update_cpu_load_active(rq);
++ raw_spin_unlock(&rq->lock);
++
++ perf_event_task_tick();
++
++#ifdef CONFIG_SMP
++ rq->idle_balance = idle_cpu(cpu);
++ trigger_load_balance(rq);
++#endif
++ rq_last_tick_reset(rq);
++}
++
++#ifdef CONFIG_NO_HZ_FULL
++/**
++ * scheduler_tick_max_deferment
++ *
++ * Keep at least one tick per second when a single
++ * active task is running because the scheduler doesn't
++ * yet completely support full dynticks environment.
++ *
++ * This makes sure that uptime, CFS vruntime, load
++ * balancing, etc... continue to move forward, even
++ * with a very low granularity.
++ *
++ * Return: Maximum deferment in nanoseconds.
++ */
++u64 scheduler_tick_max_deferment(void)
++{
++ struct rq *rq = this_rq();
++ unsigned long next, now = ACCESS_ONCE(jiffies);
++
++ next = rq->last_sched_tick + HZ;
++
++ if (time_before_eq(next, now))
++ return 0;
++
++ return jiffies_to_nsecs(next - now);
++}
++#endif
++
++notrace unsigned long get_parent_ip(unsigned long addr)
++{
++ if (in_lock_functions(addr)) {
++ addr = CALLER_ADDR2;
++ if (in_lock_functions(addr))
++ addr = CALLER_ADDR3;
++ }
++ return addr;
++}
++
++#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
++ defined(CONFIG_PREEMPT_TRACER))
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++ return;
++#endif
++ __preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Spinlock count overflowing soon?
++ */
++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++ PREEMPT_MASK - 10);
++#endif
++ if (preempt_count() == val) {
++ unsigned long ip = get_parent_ip(CALLER_ADDR1);
++#ifdef CONFIG_DEBUG_PREEMPT
++ current->preempt_disable_ip = ip;
++#endif
++ trace_preempt_off(CALLER_ADDR0, ip);
++ }
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++ /*
++ * Underflow?
++ */
++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++ return;
++ /*
++ * Is the spinlock portion underflowing?
++ */
++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++ !(preempt_count() & PREEMPT_MASK)))
++ return;
++#endif
++
++ if (preempt_count() == val)
++ trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
++ __preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#endif
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++ if (oops_in_progress)
++ return;
++
++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++ prev->comm, prev->pid, preempt_count());
++
++ debug_show_held_locks(prev);
++ print_modules();
++ if (irqs_disabled())
++ print_irqtrace_events(prev);
++#ifdef CONFIG_DEBUG_PREEMPT
++ if (in_atomic_preempt_off()) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(current->preempt_disable_ip);
++ pr_cont("\n");
++ }
++#endif
++ dump_stack();
++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++ BUG_ON(unlikely(task_stack_end_corrupted(prev)));
++#endif
++ /*
++ * Test if we are atomic. Since do_exit() needs to call into
++ * schedule() atomically, we ignore that path. Otherwise whine
++ * if we are scheduling when we should not.
++ */
++ if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
++ __schedule_bug(prev);
++ rcu_sleep_check();
++
++ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++ schedstat_inc(this_rq(), sched_count);
++}
++
++/*
++ * Pick up the highest-prio task:
++ */
++static inline struct task_struct *
++pick_next_task(struct rq *rq, struct task_struct *prev)
++{
++ const struct sched_class *class = &fair_sched_class;
++ struct task_struct *p;
++
++ /*
++ * Optimization: we know that if all tasks are in
++ * the fair class we can call that function directly:
++ */
++ if (likely(prev->sched_class == class &&
++ rq->nr_running == rq->cfs.h_nr_running)) {
++ p = fair_sched_class.pick_next_task(rq, prev);
++ if (unlikely(p == RETRY_TASK))
++ goto again;
++
++ /* assumes fair_sched_class->next == idle_sched_class */
++ if (unlikely(!p))
++ p = idle_sched_class.pick_next_task(rq, prev);
++
++ return p;
++ }
++
++again:
++ for_each_class(class) {
++ p = class->pick_next_task(rq, prev);
++ if (p) {
++ if (unlikely(p == RETRY_TASK))
++ goto again;
++ return p;
++ }
++ }
++
++ BUG(); /* the idle class will always have a runnable task */
++}
++
++/*
++ * __schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ * paths. For example, see arch/x86/entry_64.S.
++ *
++ * To drive preemption between tasks, the scheduler sets the flag in timer
++ * interrupt handler scheduler_tick().
++ *
++ * 3. Wakeups don't really cause entry into schedule(). They add a
++ * task to the run-queue and that's it.
++ *
++ * Now, if the new task added to the run-queue preempts the current
++ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ * called on the nearest possible occasion:
++ *
++ * - If the kernel is preemptible (CONFIG_PREEMPT=y):
++ *
++ * - in syscall or exception context, at the next outmost
++ * preempt_enable(). (this might be as soon as the wake_up()'s
++ * spin_unlock()!)
++ *
++ * - in IRQ context, return from interrupt-handler to
++ * preemptible context
++ *
++ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
++ * then at the next:
++ *
++ * - cond_resched() call
++ * - explicit schedule() call
++ * - return from syscall or exception to user-space
++ * - return from interrupt-handler to user-space
++ *
++ * WARNING: all callers must re-check need_resched() afterward and reschedule
++ * accordingly in case an event triggered the need for rescheduling (such as
++ * an interrupt waking up a task) while preemption was disabled in __schedule().
++ */
++static void __sched __schedule(void)
++{
++ struct task_struct *prev, *next;
++ unsigned long *switch_count;
++ struct rq *rq;
++ int cpu;
++
++ preempt_disable();
++ cpu = smp_processor_id();
++ rq = cpu_rq(cpu);
++ rcu_note_context_switch();
++ prev = rq->curr;
++
++ schedule_debug(prev);
++
++ if (sched_feat(HRTICK))
++ hrtick_clear(rq);
++
++ /*
++ * Make sure that signal_pending_state()->signal_pending() below
++ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++ * done by the caller to avoid the race with signal_wake_up().
++ */
++ smp_mb__before_spinlock();
++ raw_spin_lock_irq(&rq->lock);
++
++ rq->clock_skip_update <<= 1; /* promote REQ to ACT */
++
++ switch_count = &prev->nivcsw;
++ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
++ if (unlikely(signal_pending_state(prev->state, prev))) {
++ prev->state = TASK_RUNNING;
++ } else {
++ deactivate_task(rq, prev, DEQUEUE_SLEEP);
++ prev->on_rq = 0;
++
++ /*
++ * If a worker went to sleep, notify and ask workqueue
++ * whether it wants to wake up a task to maintain
++ * concurrency.
++ */
++ if (prev->flags & PF_WQ_WORKER) {
++ struct task_struct *to_wakeup;
++
++ to_wakeup = wq_worker_sleeping(prev, cpu);
++ if (to_wakeup)
++ try_to_wake_up_local(to_wakeup);
++ }
++ }
++ switch_count = &prev->nvcsw;
++ }
++
++ if (task_on_rq_queued(prev))
++ update_rq_clock(rq);
++
++ next = pick_next_task(rq, prev);
++ clear_tsk_need_resched(prev);
++ clear_preempt_need_resched();
++ rq->clock_skip_update = 0;
++
++ if (likely(prev != next)) {
++ rq->nr_switches++;
++ rq->curr = next;
++ ++*switch_count;
++
++ rq = context_switch(rq, prev, next); /* unlocks the rq */
++ cpu = cpu_of(rq);
++ } else
++ raw_spin_unlock_irq(&rq->lock);
++
++ post_schedule(rq);
++
++ sched_preempt_enable_no_resched();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++ if (!tsk->state || tsk_is_pi_blocked(tsk))
++ return;
++ /*
++ * If we are going to sleep and we have plugged IO queued,
++ * make sure to submit it to avoid deadlocks.
++ */
++ if (blk_needs_flush_plug(tsk))
++ blk_schedule_flush_plug(tsk);
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++ struct task_struct *tsk = current;
++
++ sched_submit_work(tsk);
++ do {
++ __schedule();
++ } while (need_resched());
++}
++EXPORT_SYMBOL(schedule);
++
++#ifdef CONFIG_CONTEXT_TRACKING
++asmlinkage __visible void __sched schedule_user(void)
++{
++ /*
++ * If we come here after a random call to set_need_resched(),
++ * or we have been woken up remotely but the IPI has not yet arrived,
++ * we haven't yet exited the RCU idle mode. Do it here manually until
++ * we find a better solution.
++ *
++ * NB: There are buggy callers of this function. Ideally we
++ * should warn if prev_state != CONTEXT_USER, but that will trigger
++ * too frequently to make sense yet.
++ */
++ enum ctx_state prev_state = exception_enter();
++ schedule();
++ exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++ sched_preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++}
++
++static void __sched notrace preempt_schedule_common(void)
++{
++ do {
++ __preempt_count_add(PREEMPT_ACTIVE);
++ __schedule();
++ __preempt_count_sub(PREEMPT_ACTIVE);
++
++ /*
++ * Check again in case we missed a preemption opportunity
++ * between schedule and now.
++ */
++ barrier();
++ } while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPT
++/*
++ * this is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable. Kernel preemptions off return from interrupt
++ * occur there and call schedule directly.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++ /*
++ * If there is a non-zero preempt_count or interrupts are disabled,
++ * we do not want to preempt the current task. Just return..
++ */
++ if (likely(!preemptible()))
++ return;
++
++ preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++#ifdef CONFIG_CONTEXT_TRACKING
++/**
++ * preempt_schedule_context - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_context(void)
++{
++ enum ctx_state prev_ctx;
++
++ if (likely(!preemptible()))
++ return;
++
++ do {
++ __preempt_count_add(PREEMPT_ACTIVE);
++ /*
++ * Needs preempt disabled in case user_exit() is traced
++ * and the tracer calls preempt_enable_notrace() causing
++ * an infinite recursion.
++ */
++ prev_ctx = exception_enter();
++ __schedule();
++ exception_exit(prev_ctx);
++
++ __preempt_count_sub(PREEMPT_ACTIVE);
++ barrier();
++ } while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_context);
++#endif /* CONFIG_CONTEXT_TRACKING */
++
++#endif /* CONFIG_PREEMPT */
++
++/*
++ * this is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++ enum ctx_state prev_state;
++
++ /* Catch callers which need to be fixed */
++ BUG_ON(preempt_count() || !irqs_disabled());
++
++ prev_state = exception_enter();
++
++ do {
++ __preempt_count_add(PREEMPT_ACTIVE);
++ local_irq_enable();
++ __schedule();
++ local_irq_disable();
++ __preempt_count_sub(PREEMPT_ACTIVE);
++
++ /*
++ * Check again in case we missed a preemption opportunity
++ * between schedule and now.
++ */
++ barrier();
++ } while (need_resched());
++
++ exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
++ void *key)
++{
++ return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++#ifdef CONFIG_RT_MUTEXES
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task
++ * @prio: prio value (kernel-internal form)
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, int prio)
++{
++ int oldprio, queued, running, enqueue_flag = 0;
++ struct rq *rq;
++ const struct sched_class *prev_class;
++
++ BUG_ON(prio > MAX_PRIO);
++
++ rq = __task_rq_lock(p);
++
++ /*
++ * Idle task boosting is a nono in general. There is one
++ * exception, when PREEMPT_RT and NOHZ is active:
++ *
++ * The idle task calls get_next_timer_interrupt() and holds
++ * the timer wheel base->lock on the CPU and another CPU wants
++ * to access the timer (probably to cancel it). We can safely
++ * ignore the boosting request, as the idle CPU runs this code
++ * with interrupts disabled and will complete the lock
++ * protected section without being interrupted. So there is no
++ * real need to boost.
++ */
++ if (unlikely(p == rq->idle)) {
++ WARN_ON(p != rq->curr);
++ WARN_ON(p->pi_blocked_on);
++ goto out_unlock;
++ }
++
++ trace_sched_pi_setprio(p, prio);
++ oldprio = p->prio;
++ prev_class = p->sched_class;
++ queued = task_on_rq_queued(p);
++ running = task_current(rq, p);
++ if (queued)
++ dequeue_task(rq, p, 0);
++ if (running)
++ put_prev_task(rq, p);
++
++ /*
++ * Boosting condition are:
++ * 1. -rt task is running and holds mutex A
++ * --> -dl task blocks on mutex A
++ *
++ * 2. -dl task is running and holds mutex A
++ * --> -dl task blocks on mutex A and could preempt the
++ * running task
++ */
++ if (dl_prio(prio)) {
++ struct task_struct *pi_task = rt_mutex_get_top_task(p);
++ if (!dl_prio(p->normal_prio) ||
++ (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
++ p->dl.dl_boosted = 1;
++ p->dl.dl_throttled = 0;
++ enqueue_flag = ENQUEUE_REPLENISH;
++ } else
++ p->dl.dl_boosted = 0;
++ p->sched_class = &dl_sched_class;
++ } else if (rt_prio(prio)) {
++ if (dl_prio(oldprio))
++ p->dl.dl_boosted = 0;
++ if (oldprio < prio)
++ enqueue_flag = ENQUEUE_HEAD;
++ p->sched_class = &rt_sched_class;
++ } else {
++ if (dl_prio(oldprio))
++ p->dl.dl_boosted = 0;
++ if (rt_prio(oldprio))
++ p->rt.timeout = 0;
++ p->sched_class = &fair_sched_class;
++ }
++
++ p->prio = prio;
++
++ if (running)
++ p->sched_class->set_curr_task(rq);
++ if (queued)
++ enqueue_task(rq, p, enqueue_flag);
++
++ check_class_changed(rq, p, prev_class, oldprio);
++out_unlock:
++ __task_rq_unlock(rq);
++}
++#endif
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++ int old_prio, delta, queued;
++ unsigned long flags;
++ struct rq *rq;
++
++ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++ return;
++ /*
++ * We have to be careful, if called from sys_setpriority(),
++ * the task might be in the middle of scheduling on another CPU.
++ */
++ rq = task_rq_lock(p, &flags);
++ /*
++ * The RT priorities are set via sched_setscheduler(), but we still
++ * allow the 'normal' nice value to be set - but as expected
++ * it wont have any effect on scheduling until the task is
++ * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
++ */
++ if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
++ p->static_prio = NICE_TO_PRIO(nice);
++ goto out_unlock;
++ }
++ queued = task_on_rq_queued(p);
++ if (queued)
++ dequeue_task(rq, p, 0);
++
++ p->static_prio = NICE_TO_PRIO(nice);
++ set_load_weight(p);
++ old_prio = p->prio;
++ p->prio = effective_prio(p);
++ delta = p->prio - old_prio;
++
++ if (queued) {
++ enqueue_task(rq, p, 0);
++ /*
++ * If the task increased its priority or is running and
++ * lowered its priority, then reschedule its CPU:
++ */
++ if (delta < 0 || (delta > 0 && task_running(rq, p)))
++ resched_curr(rq);
++ }
++out_unlock:
++ task_rq_unlock(rq, p, &flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++ /* convert nice value [19,-20] to rlimit style value [1,40] */
++ int nice_rlim = nice_to_rlimit(nice);
++
++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
++ capable(CAP_SYS_NICE));
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++ long nice, retval;
++
++ /*
++ * Setpriority might change our priority at the same moment.
++ * We don't have to worry. Conceptually one call occurs first
++ * and we have a single winner.
++ */
++ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++ nice = task_nice(current) + increment;
++
++ nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++ if (increment < 0 && !can_nice(current, nice))
++ return -EPERM;
++
++ retval = security_task_setnice(current, nice);
++ if (retval)
++ return retval;
++
++ set_user_nice(current, nice);
++ return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ * RT tasks are offset by -200. Normal tasks are centered
++ * around 0, value goes from -16 to +15.
++ */
++int task_prio(const struct task_struct *p)
++{
++ return p->prio - MAX_RT_PRIO;
++}
++
++/**
++ * idle_cpu - is a given cpu idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++
++ if (rq->curr != rq->idle)
++ return 0;
++
++ if (rq->nr_running)
++ return 0;
++
++#ifdef CONFIG_SMP
++ if (!llist_empty(&rq->wake_list))
++ return 0;
++#endif
++
++ return 1;
++}
++
++/**
++ * idle_task - return the idle task for a given cpu.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the cpu @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++ return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static struct task_struct *find_process_by_pid(pid_t pid)
++{
++ return pid ? find_task_by_vpid(pid) : current;
++}
++
++/*
++ * This function initializes the sched_dl_entity of a newly becoming
++ * SCHED_DEADLINE task.
++ *
++ * Only the static values are considered here, the actual runtime and the
++ * absolute deadline will be properly calculated when the task is enqueued
++ * for the first time with its new policy.
++ */
++static void
++__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
++{
++ struct sched_dl_entity *dl_se = &p->dl;
++
++ dl_se->dl_runtime = attr->sched_runtime;
++ dl_se->dl_deadline = attr->sched_deadline;
++ dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
++ dl_se->flags = attr->sched_flags;
++ dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
++
++ /*
++ * Changing the parameters of a task is 'tricky' and we're not doing
++ * the correct thing -- also see task_dead_dl() and switched_from_dl().
++ *
++ * What we SHOULD do is delay the bandwidth release until the 0-lag
++ * point. This would include retaining the task_struct until that time
++ * and change dl_overflow() to not immediately decrement the current
++ * amount.
++ *
++ * Instead we retain the current runtime/deadline and let the new
++ * parameters take effect after the current reservation period lapses.
++ * This is safe (albeit pessimistic) because the 0-lag point is always
++ * before the current scheduling deadline.
++ *
++ * We can still have temporary overloads because we do not delay the
++ * change in bandwidth until that time; so admission control is
++ * not on the safe side. It does however guarantee tasks will never
++ * consume more than promised.
++ */
++}
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++static void __setscheduler_params(struct task_struct *p,
++ const struct sched_attr *attr)
++{
++ int policy = attr->sched_policy;
++
++ if (policy == SETPARAM_POLICY)
++ policy = p->policy;
++
++ p->policy = policy;
++
++ if (dl_policy(policy))
++ __setparam_dl(p, attr);
++ else if (fair_policy(policy))
++ p->static_prio = NICE_TO_PRIO(attr->sched_nice);
++
++ /*
++ * __sched_setscheduler() ensures attr->sched_priority == 0 when
++ * !rt_policy. Always setting this ensures that things like
++ * getparam()/getattr() don't report silly values for !rt tasks.
++ */
++ p->rt_priority = attr->sched_priority;
++ p->normal_prio = normal_prio(p);
++ set_load_weight(p);
++}
++
++/* Actually do priority change: must hold pi & rq lock. */
++static void __setscheduler(struct rq *rq, struct task_struct *p,
++ const struct sched_attr *attr, bool keep_boost)
++{
++ __setscheduler_params(p, attr);
++
++ /*
++ * Keep a potential priority boosting if called from
++ * sched_setscheduler().
++ */
++ if (keep_boost)
++ p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
++ else
++ p->prio = normal_prio(p);
++
++ if (dl_prio(p->prio))
++ p->sched_class = &dl_sched_class;
++ else if (rt_prio(p->prio))
++ p->sched_class = &rt_sched_class;
++ else
++ p->sched_class = &fair_sched_class;
++}
++
++static void
++__getparam_dl(struct task_struct *p, struct sched_attr *attr)
++{
++ struct sched_dl_entity *dl_se = &p->dl;
++
++ attr->sched_priority = p->rt_priority;
++ attr->sched_runtime = dl_se->dl_runtime;
++ attr->sched_deadline = dl_se->dl_deadline;
++ attr->sched_period = dl_se->dl_period;
++ attr->sched_flags = dl_se->flags;
++}
++
++/*
++ * This function validates the new parameters of a -deadline task.
++ * We ask for the deadline not being zero, and greater or equal
++ * than the runtime, as well as the period of being zero or
++ * greater than deadline. Furthermore, we have to be sure that
++ * user parameters are above the internal resolution of 1us (we
++ * check sched_runtime only since it is always the smaller one) and
++ * below 2^63 ns (we have to check both sched_deadline and
++ * sched_period, as the latter can be zero).
++ */
++static bool
++__checkparam_dl(const struct sched_attr *attr)
++{
++ /* deadline != 0 */
++ if (attr->sched_deadline == 0)
++ return false;
++
++ /*
++ * Since we truncate DL_SCALE bits, make sure we're at least
++ * that big.
++ */
++ if (attr->sched_runtime < (1ULL << DL_SCALE))
++ return false;
++
++ /*
++ * Since we use the MSB for wrap-around and sign issues, make
++ * sure it's not set (mind that period can be equal to zero).
++ */
++ if (attr->sched_deadline & (1ULL << 63) ||
++ attr->sched_period & (1ULL << 63))
++ return false;
++
++ /* runtime <= deadline <= period (if period != 0) */
++ if ((attr->sched_period != 0 &&
++ attr->sched_period < attr->sched_deadline) ||
++ attr->sched_deadline < attr->sched_runtime)
++ return false;
++
++ return true;
++}
++
++/*
++ * check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++ const struct cred *cred = current_cred(), *pcred;
++ bool match;
++
++ rcu_read_lock();
++ pcred = __task_cred(p);
++ match = (uid_eq(cred->euid, pcred->euid) ||
++ uid_eq(cred->euid, pcred->uid));
++ rcu_read_unlock();
++ return match;
++}
++
++static bool dl_param_changed(struct task_struct *p,
++ const struct sched_attr *attr)
++{
++ struct sched_dl_entity *dl_se = &p->dl;
++
++ if (dl_se->dl_runtime != attr->sched_runtime ||
++ dl_se->dl_deadline != attr->sched_deadline ||
++ dl_se->dl_period != attr->sched_period ||
++ dl_se->flags != attr->sched_flags)
++ return true;
++
++ return false;
++}
++
++static int __sched_setscheduler(struct task_struct *p,
++ const struct sched_attr *attr,
++ bool user)
++{
++ int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
++ MAX_RT_PRIO - 1 - attr->sched_priority;
++ int retval, oldprio, oldpolicy = -1, queued, running;
++ int new_effective_prio, policy = attr->sched_policy;
++ unsigned long flags;
++ const struct sched_class *prev_class;
++ struct rq *rq;
++ int reset_on_fork;
++
++ /* may grab non-irq protected spin_locks */
++ BUG_ON(in_interrupt());
++recheck:
++ /* double check policy once rq lock held */
++ if (policy < 0) {
++ reset_on_fork = p->sched_reset_on_fork;
++ policy = oldpolicy = p->policy;
++ } else {
++ reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
++
++ if (policy != SCHED_DEADLINE &&
++ policy != SCHED_FIFO && policy != SCHED_RR &&
++ policy != SCHED_NORMAL && policy != SCHED_BATCH &&
++ policy != SCHED_IDLE)
++ return -EINVAL;
++ }
++
++ if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
++ return -EINVAL;
++
++ /*
++ * Valid priorities for SCHED_FIFO and SCHED_RR are
++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
++ * SCHED_BATCH and SCHED_IDLE is 0.
++ */
++ if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
++ (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
++ return -EINVAL;
++ if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
++ (rt_policy(policy) != (attr->sched_priority != 0)))
++ return -EINVAL;
++
++ /*
++ * Allow unprivileged RT tasks to decrease priority:
++ */
++ if (user && !capable(CAP_SYS_NICE)) {
++ if (fair_policy(policy)) {
++ if (attr->sched_nice < task_nice(p) &&
++ !can_nice(p, attr->sched_nice))
++ return -EPERM;
++ }
++
++ if (rt_policy(policy)) {
++ unsigned long rlim_rtprio =
++ task_rlimit(p, RLIMIT_RTPRIO);
++
++ /* can't set/change the rt policy */
++ if (policy != p->policy && !rlim_rtprio)
++ return -EPERM;
++
++ /* can't increase priority */
++ if (attr->sched_priority > p->rt_priority &&
++ attr->sched_priority > rlim_rtprio)
++ return -EPERM;
++ }
++
++ /*
++ * Can't set/change SCHED_DEADLINE policy at all for now
++ * (safest behavior); in the future we would like to allow
++ * unprivileged DL tasks to increase their relative deadline
++ * or reduce their runtime (both ways reducing utilization)
++ */
++ if (dl_policy(policy))
++ return -EPERM;
++
++ /*
++ * Treat SCHED_IDLE as nice 20. Only allow a switch to
++ * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
++ */
++ if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
++ if (!can_nice(p, task_nice(p)))
++ return -EPERM;
++ }
++
++ /* can't change other user's priorities */
++ if (!check_same_owner(p))
++ return -EPERM;
++
++ /* Normal users shall not reset the sched_reset_on_fork flag */
++ if (p->sched_reset_on_fork && !reset_on_fork)
++ return -EPERM;
++ }
++
++ if (user) {
++ retval = security_task_setscheduler(p);
++ if (retval)
++ return retval;
++ }
++
++ /*
++ * make sure no PI-waiters arrive (or leave) while we are
++ * changing the priority of the task:
++ *
++ * To be able to change p->policy safely, the appropriate
++ * runqueue lock must be held.
++ */
++ rq = task_rq_lock(p, &flags);
++
++ /*
++ * Changing the policy of the stop threads its a very bad idea
++ */
++ if (p == rq->stop) {
++ task_rq_unlock(rq, p, &flags);
++ return -EINVAL;
++ }
++
++ /*
++ * If not changing anything there's no need to proceed further,
++ * but store a possible modification of reset_on_fork.
++ */
++ if (unlikely(policy == p->policy)) {
++ if (fair_policy(policy) && attr->sched_nice != task_nice(p))
++ goto change;
++ if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
++ goto change;
++ if (dl_policy(policy) && dl_param_changed(p, attr))
++ goto change;
++
++ p->sched_reset_on_fork = reset_on_fork;
++ task_rq_unlock(rq, p, &flags);
++ return 0;
++ }
++change:
++
++ if (user) {
++#ifdef CONFIG_RT_GROUP_SCHED
++ /*
++ * Do not allow realtime tasks into groups that have no runtime
++ * assigned.
++ */
++ if (rt_bandwidth_enabled() && rt_policy(policy) &&
++ task_group(p)->rt_bandwidth.rt_runtime == 0 &&
++ !task_group_is_autogroup(task_group(p))) {
++ task_rq_unlock(rq, p, &flags);
++ return -EPERM;
++ }
++#endif
++#ifdef CONFIG_SMP
++ if (dl_bandwidth_enabled() && dl_policy(policy)) {
++ cpumask_t *span = rq->rd->span;
++
++ /*
++ * Don't allow tasks with an affinity mask smaller than
++ * the entire root_domain to become SCHED_DEADLINE. We
++ * will also fail if there's no bandwidth available.
++ */
++ if (!cpumask_subset(span, &p->cpus_allowed) ||
++ rq->rd->dl_bw.bw == 0) {
++ task_rq_unlock(rq, p, &flags);
++ return -EPERM;
++ }
++ }
++#endif
++ }
++
++ /* recheck policy now with rq lock held */
++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++ policy = oldpolicy = -1;
++ task_rq_unlock(rq, p, &flags);
++ goto recheck;
++ }
++
++ /*
++ * If setscheduling to SCHED_DEADLINE (or changing the parameters
++ * of a SCHED_DEADLINE task) we need to check if enough bandwidth
++ * is available.
++ */
++ if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
++ task_rq_unlock(rq, p, &flags);
++ return -EBUSY;
++ }
++
++ p->sched_reset_on_fork = reset_on_fork;
++ oldprio = p->prio;
++
++ /*
++ * Take priority boosted tasks into account. If the new
++ * effective priority is unchanged, we just store the new
++ * normal parameters and do not touch the scheduler class and
++ * the runqueue. This will be done when the task deboost
++ * itself.
++ */
++ new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
++ if (new_effective_prio == oldprio) {
++ __setscheduler_params(p, attr);
++ task_rq_unlock(rq, p, &flags);
++ return 0;
++ }
++
++ queued = task_on_rq_queued(p);
++ running = task_current(rq, p);
++ if (queued)
++ dequeue_task(rq, p, 0);
++ if (running)
++ put_prev_task(rq, p);
++
++ prev_class = p->sched_class;
++ __setscheduler(rq, p, attr, true);
++
++ if (running)
++ p->sched_class->set_curr_task(rq);
++ if (queued) {
++ /*
++ * We enqueue to tail when the priority of a task is
++ * increased (user space view).
++ */
++ enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
++ }
++
++ check_class_changed(rq, p, prev_class, oldprio);
++ task_rq_unlock(rq, p, &flags);
++
++ rt_mutex_adjust_pi(p);
++
++ return 0;
++}
++
++static int _sched_setscheduler(struct task_struct *p, int policy,
++ const struct sched_param *param, bool check)
++{
++ struct sched_attr attr = {
++ .sched_policy = policy,
++ .sched_priority = param->sched_priority,
++ .sched_nice = PRIO_TO_NICE(p->static_prio),
++ };
++
++ /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
++ if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
++ attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++ policy &= ~SCHED_RESET_ON_FORK;
++ attr.sched_policy = policy;
++ }
++
++ return __sched_setscheduler(p, &attr, check);
++}
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++ const struct sched_param *param)
++{
++ return _sched_setscheduler(p, policy, param, true);
++}
++EXPORT_SYMBOL_GPL(sched_setscheduler);
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++ return __sched_setscheduler(p, attr, true);
++}
++EXPORT_SYMBOL_GPL(sched_setattr);
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission. For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++ const struct sched_param *param)
++{
++ return _sched_setscheduler(p, policy, param, false);
++}
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++ struct sched_param lparam;
++ struct task_struct *p;
++ int retval;
++
++ if (!param || pid < 0)
++ return -EINVAL;
++ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++ return -EFAULT;
++
++ rcu_read_lock();
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (p != NULL)
++ retval = sched_setscheduler(p, policy, &lparam);
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr,
++ struct sched_attr *attr)
++{
++ u32 size;
++ int ret;
++
++ if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
++ return -EFAULT;
++
++ /*
++ * zero the full structure, so that a short copy will be nice.
++ */
++ memset(attr, 0, sizeof(*attr));
++
++ ret = get_user(size, &uattr->size);
++ if (ret)
++ return ret;
++
++ if (size > PAGE_SIZE) /* silly large */
++ goto err_size;
++
++ if (!size) /* abi compat */
++ size = SCHED_ATTR_SIZE_VER0;
++
++ if (size < SCHED_ATTR_SIZE_VER0)
++ goto err_size;
++
++ /*
++ * If we're handed a bigger struct than we know of,
++ * ensure all the unknown bits are 0 - i.e. new
++ * user-space does not rely on any kernel feature
++ * extensions we dont know about yet.
++ */
++ if (size > sizeof(*attr)) {
++ unsigned char __user *addr;
++ unsigned char __user *end;
++ unsigned char val;
++
++ addr = (void __user *)uattr + sizeof(*attr);
++ end = (void __user *)uattr + size;
++
++ for (; addr < end; addr++) {
++ ret = get_user(val, addr);
++ if (ret)
++ return ret;
++ if (val)
++ goto err_size;
++ }
++ size = sizeof(*attr);
++ }
++
++ ret = copy_from_user(attr, uattr, size);
++ if (ret)
++ return -EFAULT;
++
++ /*
++ * XXX: do we want to be lenient like existing syscalls; or do we want
++ * to be strict and return an error on out-of-bounds values?
++ */
++ attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
++
++ return 0;
++
++err_size:
++ put_user(sizeof(*attr), &uattr->size);
++ return -E2BIG;
++}
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
++ struct sched_param __user *, param)
++{
++ /* negative values for policy are not valid */
++ if (policy < 0)
++ return -EINVAL;
++
++ return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++ return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++ unsigned int, flags)
++{
++ struct sched_attr attr;
++ struct task_struct *p;
++ int retval;
++
++ if (!uattr || pid < 0 || flags)
++ return -EINVAL;
++
++ retval = sched_copy_attr(uattr, &attr);
++ if (retval)
++ return retval;
++
++ if ((int)attr.sched_policy < 0)
++ return -EINVAL;
++
++ rcu_read_lock();
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (p != NULL)
++ retval = sched_setattr(p, &attr);
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++ struct task_struct *p;
++ int retval;
++
++ if (pid < 0)
++ return -EINVAL;
++
++ retval = -ESRCH;
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ if (p) {
++ retval = security_task_getscheduler(p);
++ if (!retval)
++ retval = p->policy
++ | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
++ }
++ rcu_read_unlock();
++ return retval;
++}
++
++/**
++ * sys_sched_getparam - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++ struct sched_param lp = { .sched_priority = 0 };
++ struct task_struct *p;
++ int retval;
++
++ if (!param || pid < 0)
++ return -EINVAL;
++
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ retval = -ESRCH;
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ if (task_has_rt_policy(p))
++ lp.sched_priority = p->rt_priority;
++ rcu_read_unlock();
++
++ /*
++ * This one might sleep, we cannot do it with a spinlock held ...
++ */
++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++static int sched_read_attr(struct sched_attr __user *uattr,
++ struct sched_attr *attr,
++ unsigned int usize)
++{
++ int ret;
++
++ if (!access_ok(VERIFY_WRITE, uattr, usize))
++ return -EFAULT;
++
++ /*
++ * If we're handed a smaller struct than we know of,
++ * ensure all the unknown bits are 0 - i.e. old
++ * user-space does not get uncomplete information.
++ */
++ if (usize < sizeof(*attr)) {
++ unsigned char *addr;
++ unsigned char *end;
++
++ addr = (void *)attr + usize;
++ end = (void *)attr + sizeof(*attr);
++
++ for (; addr < end; addr++) {
++ if (*addr)
++ return -EFBIG;
++ }
++
++ attr->size = usize;
++ }
++
++ ret = copy_to_user(uattr, attr, attr->size);
++ if (ret)
++ return -EFAULT;
++
++ return 0;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @size: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++ unsigned int, size, unsigned int, flags)
++{
++ struct sched_attr attr = {
++ .size = sizeof(struct sched_attr),
++ };
++ struct task_struct *p;
++ int retval;
++
++ if (!uattr || pid < 0 || size > PAGE_SIZE ||
++ size < SCHED_ATTR_SIZE_VER0 || flags)
++ return -EINVAL;
++
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ retval = -ESRCH;
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ attr.sched_policy = p->policy;
++ if (p->sched_reset_on_fork)
++ attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++ if (task_has_dl_policy(p))
++ __getparam_dl(p, &attr);
++ else if (task_has_rt_policy(p))
++ attr.sched_priority = p->rt_priority;
++ else
++ attr.sched_nice = task_nice(p);
++
++ rcu_read_unlock();
++
++ retval = sched_read_attr(uattr, &attr, size);
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++ cpumask_var_t cpus_allowed, new_mask;
++ struct task_struct *p;
++ int retval;
++
++ rcu_read_lock();
++
++ p = find_process_by_pid(pid);
++ if (!p) {
++ rcu_read_unlock();
++ return -ESRCH;
++ }
++
++ /* Prevent p going away */
++ get_task_struct(p);
++ rcu_read_unlock();
++
++ if (p->flags & PF_NO_SETAFFINITY) {
++ retval = -EINVAL;
++ goto out_put_task;
++ }
++ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
++ retval = -ENOMEM;
++ goto out_put_task;
++ }
++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++ retval = -ENOMEM;
++ goto out_free_cpus_allowed;
++ }
++ retval = -EPERM;
++ if (!check_same_owner(p)) {
++ rcu_read_lock();
++ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
++ rcu_read_unlock();
++ goto out_free_new_mask;
++ }
++ rcu_read_unlock();
++ }
++
++ retval = security_task_setscheduler(p);
++ if (retval)
++ goto out_free_new_mask;
++
++
++ cpuset_cpus_allowed(p, cpus_allowed);
++ cpumask_and(new_mask, in_mask, cpus_allowed);
++
++ /*
++ * Since bandwidth control happens on root_domain basis,
++ * if admission test is enabled, we only admit -deadline
++ * tasks allowed to run on all the CPUs in the task's
++ * root_domain.
++ */
++#ifdef CONFIG_SMP
++ if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
++ rcu_read_lock();
++ if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
++ retval = -EBUSY;
++ rcu_read_unlock();
++ goto out_free_new_mask;
++ }
++ rcu_read_unlock();
++ }
++#endif
++again:
++ retval = set_cpus_allowed_ptr(p, new_mask);
++
++ if (!retval) {
++ cpuset_cpus_allowed(p, cpus_allowed);
++ if (!cpumask_subset(new_mask, cpus_allowed)) {
++ /*
++ * We must have raced with a concurrent cpuset
++ * update. Just reset the cpus_allowed to the
++ * cpuset's cpus_allowed
++ */
++ cpumask_copy(new_mask, cpus_allowed);
++ goto again;
++ }
++ }
++out_free_new_mask:
++ free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++ free_cpumask_var(cpus_allowed);
++out_put_task:
++ put_task_struct(p);
++ return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++ struct cpumask *new_mask)
++{
++ if (len < cpumask_size())
++ cpumask_clear(new_mask);
++ else if (len > cpumask_size())
++ len = cpumask_size();
++
++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++/**
++ * sys_sched_setaffinity - set the cpu affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new cpu mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++ unsigned long __user *, user_mask_ptr)
++{
++ cpumask_var_t new_mask;
++ int retval;
++
++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++ return -ENOMEM;
++
++ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++ if (retval == 0)
++ retval = sched_setaffinity(pid, new_mask);
++ free_cpumask_var(new_mask);
++ return retval;
++}
++
++long sched_getaffinity(pid_t pid, struct cpumask *mask)
++{
++ struct task_struct *p;
++ unsigned long flags;
++ int retval;
++
++ rcu_read_lock();
++
++ retval = -ESRCH;
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ raw_spin_lock_irqsave(&p->pi_lock, flags);
++ cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
++ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++out_unlock:
++ rcu_read_unlock();
++
++ return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the cpu affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current cpu mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++ unsigned long __user *, user_mask_ptr)
++{
++ int ret;
++ cpumask_var_t mask;
++
++ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++ return -EINVAL;
++ if (len & (sizeof(unsigned long)-1))
++ return -EINVAL;
++
++ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++ return -ENOMEM;
++
++ ret = sched_getaffinity(pid, mask);
++ if (ret == 0) {
++ size_t retlen = min_t(size_t, len, cpumask_size());
++
++ if (copy_to_user(user_mask_ptr, mask, retlen))
++ ret = -EFAULT;
++ else
++ ret = retlen;
++ }
++ free_cpumask_var(mask);
++
++ return ret;
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. If there are no
++ * other threads running on this CPU then this function will return.
++ *
++ * Return: 0.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++ struct rq *rq = this_rq_lock();
++
++ schedstat_inc(rq, yld_count);
++ current->sched_class->yield_task(rq);
++
++ /*
++ * Since we are going to call schedule() anyway, there's
++ * no need to preempt or enable interrupts:
++ */
++ __release(rq->lock);
++ spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
++ do_raw_spin_unlock(&rq->lock);
++ sched_preempt_enable_no_resched();
++
++ schedule();
++
++ return 0;
++}
++
++int __sched _cond_resched(void)
++{
++ if (should_resched()) {
++ preempt_schedule_common();
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(_cond_resched);
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++ int resched = should_resched();
++ int ret = 0;
++
++ lockdep_assert_held(lock);
++
++ if (spin_needbreak(lock) || resched) {
++ spin_unlock(lock);
++ if (resched)
++ preempt_schedule_common();
++ else
++ cpu_relax();
++ ret = 1;
++ spin_lock(lock);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++int __sched __cond_resched_softirq(void)
++{
++ BUG_ON(!in_softirq());
++
++ if (should_resched()) {
++ local_bh_enable();
++ preempt_schedule_common();
++ local_bh_disable();
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(__cond_resched_softirq);
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, its already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++ set_current_state(TASK_RUNNING);
++ sys_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * Return:
++ * true (>0) if we indeed boosted the target task.
++ * false (0) if we failed to boost the target.
++ * -ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++ struct task_struct *curr = current;
++ struct rq *rq, *p_rq;
++ unsigned long flags;
++ int yielded = 0;
++
++ local_irq_save(flags);
++ rq = this_rq();
++
++again:
++ p_rq = task_rq(p);
++ /*
++ * If we're the only runnable task on the rq and target rq also
++ * has only one task, there's absolutely no point in yielding.
++ */
++ if (rq->nr_running == 1 && p_rq->nr_running == 1) {
++ yielded = -ESRCH;
++ goto out_irq;
++ }
++
++ double_rq_lock(rq, p_rq);
++ if (task_rq(p) != p_rq) {
++ double_rq_unlock(rq, p_rq);
++ goto again;
++ }
++
++ if (!curr->sched_class->yield_to_task)
++ goto out_unlock;
++
++ if (curr->sched_class != p->sched_class)
++ goto out_unlock;
++
++ if (task_running(p_rq, p) || p->state)
++ goto out_unlock;
++
++ yielded = curr->sched_class->yield_to_task(rq, p, preempt);
++ if (yielded) {
++ schedstat_inc(rq, yld_count);
++ /*
++ * Make p's CPU reschedule; pick_next_entity takes care of
++ * fairness.
++ */
++ if (preempt && rq != p_rq)
++ resched_curr(p_rq);
++ }
++
++out_unlock:
++ double_rq_unlock(rq, p_rq);
++out_irq:
++ local_irq_restore(flags);
++
++ if (yielded > 0)
++ schedule();
++
++ return yielded;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++/*
++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ */
++long __sched io_schedule_timeout(long timeout)
++{
++ int old_iowait = current->in_iowait;
++ struct rq *rq;
++ long ret;
++
++ current->in_iowait = 1;
++ blk_schedule_flush_plug(current);
++
++ delayacct_blkio_start();
++ rq = raw_rq();
++ atomic_inc(&rq->nr_iowait);
++ ret = schedule_timeout(timeout);
++ current->in_iowait = old_iowait;
++ atomic_dec(&rq->nr_iowait);
++ delayacct_blkio_end();
++
++ return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = MAX_USER_RT_PRIO-1;
++ break;
++ case SCHED_DEADLINE:
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_IDLE:
++ ret = 0;
++ break;
++ }
++ return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++ int ret = -EINVAL;
++
++ switch (policy) {
++ case SCHED_FIFO:
++ case SCHED_RR:
++ ret = 1;
++ break;
++ case SCHED_DEADLINE:
++ case SCHED_NORMAL:
++ case SCHED_BATCH:
++ case SCHED_IDLE:
++ ret = 0;
++ }
++ return ret;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ * this syscall writes the default timeslice value of a given process
++ * into the user-space timespec buffer. A value of '0' means infinity.
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++ struct timespec __user *, interval)
++{
++ struct task_struct *p;
++ unsigned int time_slice;
++ unsigned long flags;
++ struct rq *rq;
++ int retval;
++ struct timespec t;
++
++ if (pid < 0)
++ return -EINVAL;
++
++ retval = -ESRCH;
++ rcu_read_lock();
++ p = find_process_by_pid(pid);
++ if (!p)
++ goto out_unlock;
++
++ retval = security_task_getscheduler(p);
++ if (retval)
++ goto out_unlock;
++
++ rq = task_rq_lock(p, &flags);
++ time_slice = 0;
++ if (p->sched_class->get_rr_interval)
++ time_slice = p->sched_class->get_rr_interval(rq, p);
++ task_rq_unlock(rq, p, &flags);
++
++ rcu_read_unlock();
++ jiffies_to_timespec(time_slice, &t);
++ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
++ return retval;
++
++out_unlock:
++ rcu_read_unlock();
++ return retval;
++}
++
++static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
++
++void sched_show_task(struct task_struct *p)
++{
++ unsigned long free = 0;
++ int ppid;
++ unsigned long state = p->state;
++
++ if (state)
++ state = __ffs(state) + 1;
++ printk(KERN_INFO "%-15.15s %c", p->comm,
++ state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
++#if BITS_PER_LONG == 32
++ if (state == TASK_RUNNING)
++ printk(KERN_CONT " running ");
++ else
++ printk(KERN_CONT " %08lx ", thread_saved_pc(p));
++#else
++ if (state == TASK_RUNNING)
++ printk(KERN_CONT " running task ");
++ else
++ printk(KERN_CONT " %016lx ", thread_saved_pc(p));
++#endif
++#ifdef CONFIG_DEBUG_STACK_USAGE
++ free = stack_not_used(p);
++#endif
++ ppid = 0;
++ rcu_read_lock();
++ if (pid_alive(p))
++ ppid = task_pid_nr(rcu_dereference(p->real_parent));
++ rcu_read_unlock();
++ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
++ task_pid_nr(p), ppid,
++ (unsigned long)task_thread_info(p)->flags);
++
++ print_worker_info(KERN_INFO, p);
++ show_stack(p, NULL);
++}
++
++void show_state_filter(unsigned long state_filter)
++{
++ struct task_struct *g, *p;
++
++#if BITS_PER_LONG == 32
++ printk(KERN_INFO
++ " task PC stack pid father\n");
++#else
++ printk(KERN_INFO
++ " task PC stack pid father\n");
++#endif
++ rcu_read_lock();
++ for_each_process_thread(g, p) {
++ /*
++ * reset the NMI-timeout, listing all files on a slow
++ * console might take a lot of time:
++ */
++ touch_nmi_watchdog();
++ if (!state_filter || (p->state & state_filter))
++ sched_show_task(p);
++ }
++
++ touch_all_softlockup_watchdogs();
++
++#ifdef CONFIG_SCHED_DEBUG
++ sysrq_sched_debug_show();
++#endif
++ rcu_read_unlock();
++ /*
++ * Only show locks if all tasks are dumped:
++ */
++ if (!state_filter)
++ debug_show_all_locks();
++}
++
++void init_idle_bootup_task(struct task_struct *idle)
++{
++ idle->sched_class = &idle_sched_class;
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: cpu the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void init_idle(struct task_struct *idle, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&rq->lock, flags);
++
++ __sched_fork(0, idle);
++ idle->state = TASK_RUNNING;
++ idle->se.exec_start = sched_clock();
++
++ do_set_cpus_allowed(idle, cpumask_of(cpu));
++ /*
++ * We're having a chicken and egg problem, even though we are
++ * holding rq->lock, the cpu isn't yet set to this cpu so the
++ * lockdep check in task_group() will fail.
++ *
++ * Similar case to sched_fork(). / Alternatively we could
++ * use task_rq_lock() here and obtain the other rq->lock.
++ *
++ * Silence PROVE_RCU
++ */
++ rcu_read_lock();
++ __set_task_cpu(idle, cpu);
++ rcu_read_unlock();
++
++ rq->curr = rq->idle = idle;
++ idle->on_rq = TASK_ON_RQ_QUEUED;
++#if defined(CONFIG_SMP)
++ idle->on_cpu = 1;
++#endif
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++ /* Set the preempt count _outside_ the spinlocks! */
++ init_idle_preempt_count(idle, cpu);
++
++ /*
++ * The idle tasks have their own, simple scheduling class:
++ */
++ idle->sched_class = &idle_sched_class;
++ ftrace_graph_init_idle_task(idle, cpu);
++ vtime_init_idle(idle, cpu);
++#if defined(CONFIG_SMP)
++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++int cpuset_cpumask_can_shrink(const struct cpumask *cur,
++ const struct cpumask *trial)
++{
++ int ret = 1, trial_cpus;
++ struct dl_bw *cur_dl_b;
++ unsigned long flags;
++
++ if (!cpumask_weight(cur))
++ return ret;
++
++ rcu_read_lock_sched();
++ cur_dl_b = dl_bw_of(cpumask_any(cur));
++ trial_cpus = cpumask_weight(trial);
++
++ raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
++ if (cur_dl_b->bw != -1 &&
++ cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
++ ret = 0;
++ raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
++ rcu_read_unlock_sched();
++
++ return ret;
++}
++
++int task_can_attach(struct task_struct *p,
++ const struct cpumask *cs_cpus_allowed)
++{
++ int ret = 0;
++
++ /*
++ * Kthreads which disallow setaffinity shouldn't be moved
++ * to a new cpuset; we don't want to change their cpu
++ * affinity and isolating such threads by their set of
++ * allowed nodes is unnecessary. Thus, cpusets are not
++ * applicable for such threads. This prevents checking for
++ * success of set_cpus_allowed_ptr() on all attached tasks
++ * before cpus_allowed may be changed.
++ */
++ if (p->flags & PF_NO_SETAFFINITY) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++#ifdef CONFIG_SMP
++ if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
++ cs_cpus_allowed)) {
++ unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
++ cs_cpus_allowed);
++ struct dl_bw *dl_b;
++ bool overflow;
++ int cpus;
++ unsigned long flags;
++
++ rcu_read_lock_sched();
++ dl_b = dl_bw_of(dest_cpu);
++ raw_spin_lock_irqsave(&dl_b->lock, flags);
++ cpus = dl_bw_cpus(dest_cpu);
++ overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
++ if (overflow)
++ ret = -EBUSY;
++ else {
++ /*
++ * We reserve space for this task in the destination
++ * root_domain, as we can't fail after this point.
++ * We will free resources in the source root_domain
++ * later on (see set_cpus_allowed_dl()).
++ */
++ __dl_add(dl_b, p->dl.dl_bw);
++ }
++ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
++ rcu_read_unlock_sched();
++
++ }
++#endif
++out:
++ return ret;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * move_queued_task - move a queued task to new rq.
++ *
++ * Returns (locked) new rq. Old rq's lock is released.
++ */
++static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
++{
++ struct rq *rq = task_rq(p);
++
++ lockdep_assert_held(&rq->lock);
++
++ dequeue_task(rq, p, 0);
++ p->on_rq = TASK_ON_RQ_MIGRATING;
++ set_task_cpu(p, new_cpu);
++ raw_spin_unlock(&rq->lock);
++
++ rq = cpu_rq(new_cpu);
++
++ raw_spin_lock(&rq->lock);
++ BUG_ON(task_cpu(p) != new_cpu);
++ p->on_rq = TASK_ON_RQ_QUEUED;
++ enqueue_task(rq, p, 0);
++ check_preempt_curr(rq, p, 0);
++
++ return rq;
++}
++
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, new_mask);
++
++ cpumask_copy(&p->cpus_allowed, new_mask);
++ p->nr_cpus_allowed = cpumask_weight(new_mask);
++}
++
++/*
++ * This is how migration works:
++ *
++ * 1) we invoke migration_cpu_stop() on the target CPU using
++ * stop_one_cpu().
++ * 2) stopper starts to run (implicitly forcing the migrated thread
++ * off the CPU)
++ * 3) it checks whether the migrated task is still in the wrong runqueue.
++ * 4) if it's in the wrong runqueue then the migration thread removes
++ * it and puts it into the right queue.
++ * 5) stopper completes and stop_one_cpu() returns and the migration
++ * is done.
++ */
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * proper CPU and schedule it away if the CPU it's executing on
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++ unsigned long flags;
++ struct rq *rq;
++ unsigned int dest_cpu;
++ int ret = 0;
++
++ rq = task_rq_lock(p, &flags);
++
++ if (cpumask_equal(&p->cpus_allowed, new_mask))
++ goto out;
++
++ if (!cpumask_intersects(new_mask, cpu_active_mask)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ do_set_cpus_allowed(p, new_mask);
++
++ /* Can the task run on the task's current CPU? If so, we're done */
++ if (cpumask_test_cpu(task_cpu(p), new_mask))
++ goto out;
++
++ dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
++ if (task_running(rq, p) || p->state == TASK_WAKING) {
++ struct migration_arg arg = { p, dest_cpu };
++ /* Need help from migration thread: drop lock and wait. */
++ task_rq_unlock(rq, p, &flags);
++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++ tlb_migrate_finish(p->mm);
++ return 0;
++ } else if (task_on_rq_queued(p))
++ rq = move_queued_task(p, dest_cpu);
++out:
++ task_rq_unlock(rq, p, &flags);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++/*
++ * Move (not current) task off this cpu, onto dest cpu. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ *
++ * Returns non-zero if task was successfully migrated.
++ */
++static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
++{
++ struct rq *rq;
++ int ret = 0;
++
++ if (unlikely(!cpu_active(dest_cpu)))
++ return ret;
++
++ rq = cpu_rq(src_cpu);
++
++ raw_spin_lock(&p->pi_lock);
++ raw_spin_lock(&rq->lock);
++ /* Already moved. */
++ if (task_cpu(p) != src_cpu)
++ goto done;
++
++ /* Affinity changed (again). */
++ if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
++ goto fail;
++
++ /*
++ * If we're not on a rq, the next wake-up will ensure we're
++ * placed properly.
++ */
++ if (task_on_rq_queued(p))
++ rq = move_queued_task(p, dest_cpu);
++done:
++ ret = 1;
++fail:
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock(&p->pi_lock);
++ return ret;
++}
++
++#ifdef CONFIG_NUMA_BALANCING
++/* Migrate current task p to target_cpu */
++int migrate_task_to(struct task_struct *p, int target_cpu)
++{
++ struct migration_arg arg = { p, target_cpu };
++ int curr_cpu = task_cpu(p);
++
++ if (curr_cpu == target_cpu)
++ return 0;
++
++ if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
++ return -EINVAL;
++
++ /* TODO: This is not properly updating schedstats */
++
++ trace_sched_move_numa(p, curr_cpu, target_cpu);
++ return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
++}
++
++/*
++ * Requeue a task on a given node and accurately track the number of NUMA
++ * tasks on the runqueues
++ */
++void sched_setnuma(struct task_struct *p, int nid)
++{
++ struct rq *rq;
++ unsigned long flags;
++ bool queued, running;
++
++ rq = task_rq_lock(p, &flags);
++ queued = task_on_rq_queued(p);
++ running = task_current(rq, p);
++
++ if (queued)
++ dequeue_task(rq, p, 0);
++ if (running)
++ put_prev_task(rq, p);
++
++ p->numa_preferred_nid = nid;
++
++ if (running)
++ p->sched_class->set_curr_task(rq);
++ if (queued)
++ enqueue_task(rq, p, 0);
++ task_rq_unlock(rq, p, &flags);
++}
++#endif
++
++/*
++ * migration_cpu_stop - this will be executed by a highprio stopper thread
++ * and performs thread migration by bumping thread off CPU then
++ * 'pushing' onto another runqueue.
++ */
++static int migration_cpu_stop(void *data)
++{
++ struct migration_arg *arg = data;
++
++ /*
++ * The original target cpu might have gone down and we might
++ * be on another cpu but it doesn't matter.
++ */
++ local_irq_disable();
++ /*
++ * We need to explicitly wake pending tasks before running
++ * __migrate_task() such that we will not miss enforcing cpus_allowed
++ * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
++ */
++ sched_ttwu_pending();
++ __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
++ local_irq_enable();
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Ensures that the idle task is using init_mm right before its cpu goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++ struct mm_struct *mm = current->active_mm;
++
++ BUG_ON(cpu_online(smp_processor_id()));
++
++ if (mm != &init_mm) {
++ switch_mm(mm, &init_mm, current);
++ finish_arch_post_lock_switch();
++ }
++ mmdrop(mm);
++}
++
++/*
++ * Since this CPU is going 'away' for a while, fold any nr_active delta
++ * we might have. Assumes we're called after migrate_tasks() so that the
++ * nr_active count is stable.
++ *
++ * Also see the comment "Global load-average calculations".
++ */
++static void calc_load_migrate(struct rq *rq)
++{
++ long delta = calc_load_fold_active(rq);
++ if (delta)
++ atomic_long_add(delta, &calc_load_tasks);
++}
++
++static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
++{
++}
++
++static const struct sched_class fake_sched_class = {
++ .put_prev_task = put_prev_task_fake,
++};
++
++static struct task_struct fake_task = {
++ /*
++ * Avoid pull_{rt,dl}_task()
++ */
++ .prio = MAX_PRIO + 1,
++ .sched_class = &fake_sched_class,
++};
++
++/*
++ * Migrate all tasks from the rq, sleeping tasks will be migrated by
++ * try_to_wake_up()->select_task_rq().
++ *
++ * Called with rq->lock held even though we'er in stop_machine() and
++ * there's no concurrency possible, we hold the required locks anyway
++ * because of lock validation efforts.
++ */
++static void migrate_tasks(unsigned int dead_cpu)
++{
++ struct rq *rq = cpu_rq(dead_cpu);
++ struct task_struct *next, *stop = rq->stop;
++ int dest_cpu;
++
++ /*
++ * Fudge the rq selection such that the below task selection loop
++ * doesn't get stuck on the currently eligible stop task.
++ *
++ * We're currently inside stop_machine() and the rq is either stuck
++ * in the stop_machine_cpu_stop() loop, or we're executing this code,
++ * either way we should never end up calling schedule() until we're
++ * done here.
++ */
++ rq->stop = NULL;
++
++ /*
++ * put_prev_task() and pick_next_task() sched
++ * class method both need to have an up-to-date
++ * value of rq->clock[_task]
++ */
++ update_rq_clock(rq);
++
++ for ( ; ; ) {
++ /*
++ * There's this thread running, bail when that's the only
++ * remaining thread.
++ */
++ if (rq->nr_running == 1)
++ break;
++
++ next = pick_next_task(rq, &fake_task);
++ BUG_ON(!next);
++ next->sched_class->put_prev_task(rq, next);
++
++ /* Find suitable destination for @next, with force if needed. */
++ dest_cpu = select_fallback_rq(dead_cpu, next);
++ raw_spin_unlock(&rq->lock);
++
++ __migrate_task(next, dead_cpu, dest_cpu);
++
++ raw_spin_lock(&rq->lock);
++ }
++
++ rq->stop = stop;
++}
++
++#endif /* CONFIG_HOTPLUG_CPU */
++
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++
++static struct ctl_table sd_ctl_dir[] = {
++ {
++ .procname = "sched_domain",
++ .mode = 0555,
++ },
++ {}
++};
++
++static struct ctl_table sd_ctl_root[] = {
++ {
++ .procname = "kernel",
++ .mode = 0555,
++ .child = sd_ctl_dir,
++ },
++ {}
++};
++
++static struct ctl_table *sd_alloc_ctl_entry(int n)
++{
++ struct ctl_table *entry =
++ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
++
++ return entry;
++}
++
++static void sd_free_ctl_entry(struct ctl_table **tablep)
++{
++ struct ctl_table *entry;
++
++ /*
++ * In the intermediate directories, both the child directory and
++ * procname are dynamically allocated and could fail but the mode
++ * will always be set. In the lowest directory the names are
++ * static strings and all have proc handlers.
++ */
++ for (entry = *tablep; entry->mode; entry++) {
++ if (entry->child)
++ sd_free_ctl_entry(&entry->child);
++ if (entry->proc_handler == NULL)
++ kfree(entry->procname);
++ }
++
++ kfree(*tablep);
++ *tablep = NULL;
++}
++
++static int min_load_idx = 0;
++static int max_load_idx = CPU_LOAD_IDX_MAX-1;
++
++static void
++set_table_entry(struct ctl_table *entry,
++ const char *procname, void *data, int maxlen,
++ umode_t mode, proc_handler *proc_handler,
++ bool load_idx)
++{
++ entry->procname = procname;
++ entry->data = data;
++ entry->maxlen = maxlen;
++ entry->mode = mode;
++ entry->proc_handler = proc_handler;
++
++ if (load_idx) {
++ entry->extra1 = &min_load_idx;
++ entry->extra2 = &max_load_idx;
++ }
++}
++
++static struct ctl_table *
++sd_alloc_ctl_domain_table(struct sched_domain *sd)
++{
++ struct ctl_table *table = sd_alloc_ctl_entry(14);
++
++ if (table == NULL)
++ return NULL;
++
++ set_table_entry(&table[0], "min_interval", &sd->min_interval,
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
++ set_table_entry(&table[1], "max_interval", &sd->max_interval,
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
++ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
++ sizeof(int), 0644, proc_dointvec_minmax, true);
++ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[9], "cache_nice_tries",
++ &sd->cache_nice_tries,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[10], "flags", &sd->flags,
++ sizeof(int), 0644, proc_dointvec_minmax, false);
++ set_table_entry(&table[11], "max_newidle_lb_cost",
++ &sd->max_newidle_lb_cost,
++ sizeof(long), 0644, proc_doulongvec_minmax, false);
++ set_table_entry(&table[12], "name", sd->name,
++ CORENAME_MAX_SIZE, 0444, proc_dostring, false);
++ /* &table[13] is terminator */
++
++ return table;
++}
++
++static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
++{
++ struct ctl_table *entry, *table;
++ struct sched_domain *sd;
++ int domain_num = 0, i;
++ char buf[32];
++
++ for_each_domain(cpu, sd)
++ domain_num++;
++ entry = table = sd_alloc_ctl_entry(domain_num + 1);
++ if (table == NULL)
++ return NULL;
++
++ i = 0;
++ for_each_domain(cpu, sd) {
++ snprintf(buf, 32, "domain%d", i);
++ entry->procname = kstrdup(buf, GFP_KERNEL);
++ entry->mode = 0555;
++ entry->child = sd_alloc_ctl_domain_table(sd);
++ entry++;
++ i++;
++ }
++ return table;
++}
++
++static struct ctl_table_header *sd_sysctl_header;
++static void register_sched_domain_sysctl(void)
++{
++ int i, cpu_num = num_possible_cpus();
++ struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
++ char buf[32];
++
++ WARN_ON(sd_ctl_dir[0].child);
++ sd_ctl_dir[0].child = entry;
++
++ if (entry == NULL)
++ return;
++
++ for_each_possible_cpu(i) {
++ snprintf(buf, 32, "cpu%d", i);
++ entry->procname = kstrdup(buf, GFP_KERNEL);
++ entry->mode = 0555;
++ entry->child = sd_alloc_ctl_cpu_table(i);
++ entry++;
++ }
++
++ WARN_ON(sd_sysctl_header);
++ sd_sysctl_header = register_sysctl_table(sd_ctl_root);
++}
++
++/* may be called multiple times per register */
++static void unregister_sched_domain_sysctl(void)
++{
++ if (sd_sysctl_header)
++ unregister_sysctl_table(sd_sysctl_header);
++ sd_sysctl_header = NULL;
++ if (sd_ctl_dir[0].child)
++ sd_free_ctl_entry(&sd_ctl_dir[0].child);
++}
++#else
++static void register_sched_domain_sysctl(void)
++{
++}
++static void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++static void set_rq_online(struct rq *rq)
++{
++ if (!rq->online) {
++ const struct sched_class *class;
++
++ cpumask_set_cpu(rq->cpu, rq->rd->online);
++ rq->online = 1;
++
++ for_each_class(class) {
++ if (class->rq_online)
++ class->rq_online(rq);
++ }
++ }
++}
++
++static void set_rq_offline(struct rq *rq)
++{
++ if (rq->online) {
++ const struct sched_class *class;
++
++ for_each_class(class) {
++ if (class->rq_offline)
++ class->rq_offline(rq);
++ }
++
++ cpumask_clear_cpu(rq->cpu, rq->rd->online);
++ rq->online = 0;
++ }
++}
++
++/*
++ * migration_call - callback that gets triggered when a CPU is added.
++ * Here we can start up the necessary migration thread for the new CPU.
++ */
++static int
++migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
++{
++ int cpu = (long)hcpu;
++ unsigned long flags;
++ struct rq *rq = cpu_rq(cpu);
++
++ switch (action & ~CPU_TASKS_FROZEN) {
++
++ case CPU_UP_PREPARE:
++ rq->calc_load_update = calc_load_update;
++ break;
++
++ case CPU_ONLINE:
++ /* Update our root-domain */
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ if (rq->rd) {
++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
++
++ set_rq_online(rq);
++ }
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++ break;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ case CPU_DYING:
++ sched_ttwu_pending();
++ /* Update our root-domain */
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ if (rq->rd) {
++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
++ set_rq_offline(rq);
++ }
++ migrate_tasks(cpu);
++ BUG_ON(rq->nr_running != 1); /* the migration thread */
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++ break;
++
++ case CPU_DEAD:
++ calc_load_migrate(rq);
++ break;
++#endif
++ }
++
++ update_max_interval();
++
++ return NOTIFY_OK;
++}
++
++/*
++ * Register at high priority so that task migration (migrate_all_tasks)
++ * happens before everything else. This has to be lower priority than
++ * the notifier in the perf_event subsystem, though.
++ */
++static struct notifier_block migration_notifier = {
++ .notifier_call = migration_call,
++ .priority = CPU_PRI_MIGRATION,
++};
++
++static void __cpuinit set_cpu_rq_start_time(void)
++{
++ int cpu = smp_processor_id();
++ struct rq *rq = cpu_rq(cpu);
++ rq->age_stamp = sched_clock_cpu(cpu);
++}
++
++static int sched_cpu_active(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ switch (action & ~CPU_TASKS_FROZEN) {
++ case CPU_STARTING:
++ set_cpu_rq_start_time();
++ return NOTIFY_OK;
++ case CPU_ONLINE:
++ /*
++ * At this point a starting CPU has marked itself as online via
++ * set_cpu_online(). But it might not yet have marked itself
++ * as active, which is essential from here on.
++ *
++ * Thus, fall-through and help the starting CPU along.
++ */
++ case CPU_DOWN_FAILED:
++ set_cpu_active((long)hcpu, true);
++ return NOTIFY_OK;
++ default:
++ return NOTIFY_DONE;
++ }
++}
++
++static int sched_cpu_inactive(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ switch (action & ~CPU_TASKS_FROZEN) {
++ case CPU_DOWN_PREPARE:
++ set_cpu_active((long)hcpu, false);
++ return NOTIFY_OK;
++ default:
++ return NOTIFY_DONE;
++ }
++}
++
++static int __init migration_init(void)
++{
++ void *cpu = (void *)(long)smp_processor_id();
++ int err;
++
++ /* Initialize migration for the boot CPU */
++ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
++ BUG_ON(err == NOTIFY_BAD);
++ migration_call(&migration_notifier, CPU_ONLINE, cpu);
++ register_cpu_notifier(&migration_notifier);
++
++ /* Register cpu active notifiers */
++ cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
++ cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
++
++ return 0;
++}
++early_initcall(migration_init);
++#endif
++
++#ifdef CONFIG_SMP
++
++static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
++
++#ifdef CONFIG_SCHED_DEBUG
++
++static __read_mostly int sched_debug_enabled;
++
++static int __init sched_debug_setup(char *str)
++{
++ sched_debug_enabled = 1;
++
++ return 0;
++}
++early_param("sched_debug", sched_debug_setup);
++
++static inline bool sched_debug(void)
++{
++ return sched_debug_enabled;
++}
++
++static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
++ struct cpumask *groupmask)
++{
++ struct sched_group *group = sd->groups;
++
++ cpumask_clear(groupmask);
++
++ printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
++
++ if (!(sd->flags & SD_LOAD_BALANCE)) {
++ printk("does not load-balance\n");
++ if (sd->parent)
++ printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
++ " has parent");
++ return -1;
++ }
++
++ printk(KERN_CONT "span %*pbl level %s\n",
++ cpumask_pr_args(sched_domain_span(sd)), sd->name);
++
++ if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
++ printk(KERN_ERR "ERROR: domain->span does not contain "
++ "CPU%d\n", cpu);
++ }
++ if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
++ printk(KERN_ERR "ERROR: domain->groups does not contain"
++ " CPU%d\n", cpu);
++ }
++
++ printk(KERN_DEBUG "%*s groups:", level + 1, "");
++ do {
++ if (!group) {
++ printk("\n");
++ printk(KERN_ERR "ERROR: group is NULL\n");
++ break;
++ }
++
++ if (!cpumask_weight(sched_group_cpus(group))) {
++ printk(KERN_CONT "\n");
++ printk(KERN_ERR "ERROR: empty group\n");
++ break;
++ }
++
++ if (!(sd->flags & SD_OVERLAP) &&
++ cpumask_intersects(groupmask, sched_group_cpus(group))) {
++ printk(KERN_CONT "\n");
++ printk(KERN_ERR "ERROR: repeated CPUs\n");
++ break;
++ }
++
++ cpumask_or(groupmask, groupmask, sched_group_cpus(group));
++
++ printk(KERN_CONT " %*pbl",
++ cpumask_pr_args(sched_group_cpus(group)));
++ if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
++ printk(KERN_CONT " (cpu_capacity = %d)",
++ group->sgc->capacity);
++ }
++
++ group = group->next;
++ } while (group != sd->groups);
++ printk(KERN_CONT "\n");
++
++ if (!cpumask_equal(sched_domain_span(sd), groupmask))
++ printk(KERN_ERR "ERROR: groups don't span domain->span\n");
++
++ if (sd->parent &&
++ !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
++ printk(KERN_ERR "ERROR: parent span is not a superset "
++ "of domain->span\n");
++ return 0;
++}
++
++static void sched_domain_debug(struct sched_domain *sd, int cpu)
++{
++ int level = 0;
++
++ if (!sched_debug_enabled)
++ return;
++
++ if (!sd) {
++ printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
++ return;
++ }
++
++ printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
++
++ for (;;) {
++ if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
++ break;
++ level++;
++ sd = sd->parent;
++ if (!sd)
++ break;
++ }
++}
++#else /* !CONFIG_SCHED_DEBUG */
++# define sched_domain_debug(sd, cpu) do { } while (0)
++static inline bool sched_debug(void)
++{
++ return false;
++}
++#endif /* CONFIG_SCHED_DEBUG */
++
++static int sd_degenerate(struct sched_domain *sd)
++{
++ if (cpumask_weight(sched_domain_span(sd)) == 1)
++ return 1;
++
++ /* Following flags need at least 2 groups */
++ if (sd->flags & (SD_LOAD_BALANCE |
++ SD_BALANCE_NEWIDLE |
++ SD_BALANCE_FORK |
++ SD_BALANCE_EXEC |
++ SD_SHARE_CPUCAPACITY |
++ SD_SHARE_PKG_RESOURCES |
++ SD_SHARE_POWERDOMAIN)) {
++ if (sd->groups != sd->groups->next)
++ return 0;
++ }
++
++ /* Following flags don't use groups */
++ if (sd->flags & (SD_WAKE_AFFINE))
++ return 0;
++
++ return 1;
++}
++
++static int
++sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
++{
++ unsigned long cflags = sd->flags, pflags = parent->flags;
++
++ if (sd_degenerate(parent))
++ return 1;
++
++ if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
++ return 0;
++
++ /* Flags needing groups don't count if only 1 group in parent */
++ if (parent->groups == parent->groups->next) {
++ pflags &= ~(SD_LOAD_BALANCE |
++ SD_BALANCE_NEWIDLE |
++ SD_BALANCE_FORK |
++ SD_BALANCE_EXEC |
++ SD_SHARE_CPUCAPACITY |
++ SD_SHARE_PKG_RESOURCES |
++ SD_PREFER_SIBLING |
++ SD_SHARE_POWERDOMAIN);
++ if (nr_node_ids == 1)
++ pflags &= ~SD_SERIALIZE;
++ }
++ if (~cflags & pflags)
++ return 0;
++
++ return 1;
++}
++
++static void free_rootdomain(struct rcu_head *rcu)
++{
++ struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
++
++ cpupri_cleanup(&rd->cpupri);
++ cpudl_cleanup(&rd->cpudl);
++ free_cpumask_var(rd->dlo_mask);
++ free_cpumask_var(rd->rto_mask);
++ free_cpumask_var(rd->online);
++ free_cpumask_var(rd->span);
++ kfree(rd);
++}
++
++static void rq_attach_root(struct rq *rq, struct root_domain *rd)
++{
++ struct root_domain *old_rd = NULL;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&rq->lock, flags);
++
++ if (rq->rd) {
++ old_rd = rq->rd;
++
++ if (cpumask_test_cpu(rq->cpu, old_rd->online))
++ set_rq_offline(rq);
++
++ cpumask_clear_cpu(rq->cpu, old_rd->span);
++
++ /*
++ * If we dont want to free the old_rd yet then
++ * set old_rd to NULL to skip the freeing later
++ * in this function:
++ */
++ if (!atomic_dec_and_test(&old_rd->refcount))
++ old_rd = NULL;
++ }
++
++ atomic_inc(&rd->refcount);
++ rq->rd = rd;
++
++ cpumask_set_cpu(rq->cpu, rd->span);
++ if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
++ set_rq_online(rq);
++
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++ if (old_rd)
++ call_rcu_sched(&old_rd->rcu, free_rootdomain);
++}
++
++static int init_rootdomain(struct root_domain *rd)
++{
++ memset(rd, 0, sizeof(*rd));
++
++ if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
++ goto out;
++ if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
++ goto free_span;
++ if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
++ goto free_online;
++ if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
++ goto free_dlo_mask;
++
++ init_dl_bw(&rd->dl_bw);
++ if (cpudl_init(&rd->cpudl) != 0)
++ goto free_dlo_mask;
++
++ if (cpupri_init(&rd->cpupri) != 0)
++ goto free_rto_mask;
++ return 0;
++
++free_rto_mask:
++ free_cpumask_var(rd->rto_mask);
++free_dlo_mask:
++ free_cpumask_var(rd->dlo_mask);
++free_online:
++ free_cpumask_var(rd->online);
++free_span:
++ free_cpumask_var(rd->span);
++out:
++ return -ENOMEM;
++}
++
++/*
++ * By default the system creates a single root-domain with all cpus as
++ * members (mimicking the global state we have today).
++ */
++struct root_domain def_root_domain;
++
++static void init_defrootdomain(void)
++{
++ init_rootdomain(&def_root_domain);
++
++ atomic_set(&def_root_domain.refcount, 1);
++}
++
++static struct root_domain *alloc_rootdomain(void)
++{
++ struct root_domain *rd;
++
++ rd = kmalloc(sizeof(*rd), GFP_KERNEL);
++ if (!rd)
++ return NULL;
++
++ if (init_rootdomain(rd) != 0) {
++ kfree(rd);
++ return NULL;
++ }
++
++ return rd;
++}
++
++static void free_sched_groups(struct sched_group *sg, int free_sgc)
++{
++ struct sched_group *tmp, *first;
++
++ if (!sg)
++ return;
++
++ first = sg;
++ do {
++ tmp = sg->next;
++
++ if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
++ kfree(sg->sgc);
++
++ kfree(sg);
++ sg = tmp;
++ } while (sg != first);
++}
++
++static void free_sched_domain(struct rcu_head *rcu)
++{
++ struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
++
++ /*
++ * If its an overlapping domain it has private groups, iterate and
++ * nuke them all.
++ */
++ if (sd->flags & SD_OVERLAP) {
++ free_sched_groups(sd->groups, 1);
++ } else if (atomic_dec_and_test(&sd->groups->ref)) {
++ kfree(sd->groups->sgc);
++ kfree(sd->groups);
++ }
++ kfree(sd);
++}
++
++static void destroy_sched_domain(struct sched_domain *sd, int cpu)
++{
++ call_rcu(&sd->rcu, free_sched_domain);
++}
++
++static void destroy_sched_domains(struct sched_domain *sd, int cpu)
++{
++ for (; sd; sd = sd->parent)
++ destroy_sched_domain(sd, cpu);
++}
++
++/*
++ * Keep a special pointer to the highest sched_domain that has
++ * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
++ * allows us to avoid some pointer chasing select_idle_sibling().
++ *
++ * Also keep a unique ID per domain (we use the first cpu number in
++ * the cpumask of the domain), this allows us to quickly tell if
++ * two cpus are in the same cache domain, see cpus_share_cache().
++ */
++DEFINE_PER_CPU(struct sched_domain *, sd_llc);
++DEFINE_PER_CPU(int, sd_llc_size);
++DEFINE_PER_CPU(int, sd_llc_id);
++DEFINE_PER_CPU(struct sched_domain *, sd_numa);
++DEFINE_PER_CPU(struct sched_domain *, sd_busy);
++DEFINE_PER_CPU(struct sched_domain *, sd_asym);
++
++static void update_top_cache_domain(int cpu)
++{
++ struct sched_domain *sd;
++ struct sched_domain *busy_sd = NULL;
++ int id = cpu;
++ int size = 1;
++
++ sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
++ if (sd) {
++ id = cpumask_first(sched_domain_span(sd));
++ size = cpumask_weight(sched_domain_span(sd));
++ busy_sd = sd->parent; /* sd_busy */
++ }
++ rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
++
++ rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
++ per_cpu(sd_llc_size, cpu) = size;
++ per_cpu(sd_llc_id, cpu) = id;
++
++ sd = lowest_flag_domain(cpu, SD_NUMA);
++ rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
++
++ sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
++ rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
++}
++
++/*
++ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
++ * hold the hotplug lock.
++ */
++static void
++cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
++{
++ struct rq *rq = cpu_rq(cpu);
++ struct sched_domain *tmp;
++
++ /* Remove the sched domains which do not contribute to scheduling. */
++ for (tmp = sd; tmp; ) {
++ struct sched_domain *parent = tmp->parent;
++ if (!parent)
++ break;
++
++ if (sd_parent_degenerate(tmp, parent)) {
++ tmp->parent = parent->parent;
++ if (parent->parent)
++ parent->parent->child = tmp;
++ /*
++ * Transfer SD_PREFER_SIBLING down in case of a
++ * degenerate parent; the spans match for this
++ * so the property transfers.
++ */
++ if (parent->flags & SD_PREFER_SIBLING)
++ tmp->flags |= SD_PREFER_SIBLING;
++ destroy_sched_domain(parent, cpu);
++ } else
++ tmp = tmp->parent;
++ }
++
++ if (sd && sd_degenerate(sd)) {
++ tmp = sd;
++ sd = sd->parent;
++ destroy_sched_domain(tmp, cpu);
++ if (sd)
++ sd->child = NULL;
++ }
++
++ sched_domain_debug(sd, cpu);
++
++ rq_attach_root(rq, rd);
++ tmp = rq->sd;
++ rcu_assign_pointer(rq->sd, sd);
++ destroy_sched_domains(tmp, cpu);
++
++ update_top_cache_domain(cpu);
++}
++
++/* Setup the mask of cpus configured for isolated domains */
++static int __init isolated_cpu_setup(char *str)
++{
++ alloc_bootmem_cpumask_var(&cpu_isolated_map);
++ cpulist_parse(str, cpu_isolated_map);
++ return 1;
++}
++
++__setup("isolcpus=", isolated_cpu_setup);
++
++struct s_data {
++ struct sched_domain ** __percpu sd;
++ struct root_domain *rd;
++};
++
++enum s_alloc {
++ sa_rootdomain,
++ sa_sd,
++ sa_sd_storage,
++ sa_none,
++};
++
++/*
++ * Build an iteration mask that can exclude certain CPUs from the upwards
++ * domain traversal.
++ *
++ * Asymmetric node setups can result in situations where the domain tree is of
++ * unequal depth, make sure to skip domains that already cover the entire
++ * range.
++ *
++ * In that case build_sched_domains() will have terminated the iteration early
++ * and our sibling sd spans will be empty. Domains should always include the
++ * cpu they're built on, so check that.
++ *
++ */
++static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
++{
++ const struct cpumask *span = sched_domain_span(sd);
++ struct sd_data *sdd = sd->private;
++ struct sched_domain *sibling;
++ int i;
++
++ for_each_cpu(i, span) {
++ sibling = *per_cpu_ptr(sdd->sd, i);
++ if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
++ continue;
++
++ cpumask_set_cpu(i, sched_group_mask(sg));
++ }
++}
++
++/*
++ * Return the canonical balance cpu for this group, this is the first cpu
++ * of this group that's also in the iteration mask.
++ */
++int group_balance_cpu(struct sched_group *sg)
++{
++ return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
++}
++
++static int
++build_overlap_sched_groups(struct sched_domain *sd, int cpu)
++{
++ struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
++ const struct cpumask *span = sched_domain_span(sd);
++ struct cpumask *covered = sched_domains_tmpmask;
++ struct sd_data *sdd = sd->private;
++ struct sched_domain *sibling;
++ int i;
++
++ cpumask_clear(covered);
++
++ for_each_cpu(i, span) {
++ struct cpumask *sg_span;
++
++ if (cpumask_test_cpu(i, covered))
++ continue;
++
++ sibling = *per_cpu_ptr(sdd->sd, i);
++
++ /* See the comment near build_group_mask(). */
++ if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
++ continue;
++
++ sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
++ GFP_KERNEL, cpu_to_node(cpu));
++
++ if (!sg)
++ goto fail;
++
++ sg_span = sched_group_cpus(sg);
++ if (sibling->child)
++ cpumask_copy(sg_span, sched_domain_span(sibling->child));
++ else
++ cpumask_set_cpu(i, sg_span);
++
++ cpumask_or(covered, covered, sg_span);
++
++ sg->sgc = *per_cpu_ptr(sdd->sgc, i);
++ if (atomic_inc_return(&sg->sgc->ref) == 1)
++ build_group_mask(sd, sg);
++
++ /*
++ * Initialize sgc->capacity such that even if we mess up the
++ * domains and no possible iteration will get us here, we won't
++ * die on a /0 trap.
++ */
++ sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
++
++ /*
++ * Make sure the first group of this domain contains the
++ * canonical balance cpu. Otherwise the sched_domain iteration
++ * breaks. See update_sg_lb_stats().
++ */
++ if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
++ group_balance_cpu(sg) == cpu)
++ groups = sg;
++
++ if (!first)
++ first = sg;
++ if (last)
++ last->next = sg;
++ last = sg;
++ last->next = first;
++ }
++ sd->groups = groups;
++
++ return 0;
++
++fail:
++ free_sched_groups(first, 0);
++
++ return -ENOMEM;
++}
++
++static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
++{
++ struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
++ struct sched_domain *child = sd->child;
++
++ if (child)
++ cpu = cpumask_first(sched_domain_span(child));
++
++ if (sg) {
++ *sg = *per_cpu_ptr(sdd->sg, cpu);
++ (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
++ atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
++ }
++
++ return cpu;
++}
++
++/*
++ * build_sched_groups will build a circular linked list of the groups
++ * covered by the given span, and will set each group's ->cpumask correctly,
++ * and ->cpu_capacity to 0.
++ *
++ * Assumes the sched_domain tree is fully constructed
++ */
++static int
++build_sched_groups(struct sched_domain *sd, int cpu)
++{
++ struct sched_group *first = NULL, *last = NULL;
++ struct sd_data *sdd = sd->private;
++ const struct cpumask *span = sched_domain_span(sd);
++ struct cpumask *covered;
++ int i;
++
++ get_group(cpu, sdd, &sd->groups);
++ atomic_inc(&sd->groups->ref);
++
++ if (cpu != cpumask_first(span))
++ return 0;
++
++ lockdep_assert_held(&sched_domains_mutex);
++ covered = sched_domains_tmpmask;
++
++ cpumask_clear(covered);
++
++ for_each_cpu(i, span) {
++ struct sched_group *sg;
++ int group, j;
++
++ if (cpumask_test_cpu(i, covered))
++ continue;
++
++ group = get_group(i, sdd, &sg);
++ cpumask_setall(sched_group_mask(sg));
++
++ for_each_cpu(j, span) {
++ if (get_group(j, sdd, NULL) != group)
++ continue;
++
++ cpumask_set_cpu(j, covered);
++ cpumask_set_cpu(j, sched_group_cpus(sg));
++ }
++
++ if (!first)
++ first = sg;
++ if (last)
++ last->next = sg;
++ last = sg;
++ }
++ last->next = first;
++
++ return 0;
++}
++
++/*
++ * Initialize sched groups cpu_capacity.
++ *
++ * cpu_capacity indicates the capacity of sched group, which is used while
++ * distributing the load between different sched groups in a sched domain.
++ * Typically cpu_capacity for all the groups in a sched domain will be same
++ * unless there are asymmetries in the topology. If there are asymmetries,
++ * group having more cpu_capacity will pickup more load compared to the
++ * group having less cpu_capacity.
++ */
++static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
++{
++ struct sched_group *sg = sd->groups;
++
++ WARN_ON(!sg);
++
++ do {
++ sg->group_weight = cpumask_weight(sched_group_cpus(sg));
++ sg = sg->next;
++ } while (sg != sd->groups);
++
++ if (cpu != group_balance_cpu(sg))
++ return;
++
++ update_group_capacity(sd, cpu);
++ atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
++}
++
++/*
++ * Initializers for schedule domains
++ * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
++ */
++
++static int default_relax_domain_level = -1;
++int sched_domain_level_max;
++
++static int __init setup_relax_domain_level(char *str)
++{
++ if (kstrtoint(str, 0, &default_relax_domain_level))
++ pr_warn("Unable to set relax_domain_level\n");
++
++ return 1;
++}
++__setup("relax_domain_level=", setup_relax_domain_level);
++
++static void set_domain_attribute(struct sched_domain *sd,
++ struct sched_domain_attr *attr)
++{
++ int request;
++
++ if (!attr || attr->relax_domain_level < 0) {
++ if (default_relax_domain_level < 0)
++ return;
++ else
++ request = default_relax_domain_level;
++ } else
++ request = attr->relax_domain_level;
++ if (request < sd->level) {
++ /* turn off idle balance on this domain */
++ sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
++ } else {
++ /* turn on idle balance on this domain */
++ sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
++ }
++}
++
++static void __sdt_free(const struct cpumask *cpu_map);
++static int __sdt_alloc(const struct cpumask *cpu_map);
++
++static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
++ const struct cpumask *cpu_map)
++{
++ switch (what) {
++ case sa_rootdomain:
++ if (!atomic_read(&d->rd->refcount))
++ free_rootdomain(&d->rd->rcu); /* fall through */
++ case sa_sd:
++ free_percpu(d->sd); /* fall through */
++ case sa_sd_storage:
++ __sdt_free(cpu_map); /* fall through */
++ case sa_none:
++ break;
++ }
++}
++
++static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
++ const struct cpumask *cpu_map)
++{
++ memset(d, 0, sizeof(*d));
++
++ if (__sdt_alloc(cpu_map))
++ return sa_sd_storage;
++ d->sd = alloc_percpu(struct sched_domain *);
++ if (!d->sd)
++ return sa_sd_storage;
++ d->rd = alloc_rootdomain();
++ if (!d->rd)
++ return sa_sd;
++ return sa_rootdomain;
++}
++
++/*
++ * NULL the sd_data elements we've used to build the sched_domain and
++ * sched_group structure so that the subsequent __free_domain_allocs()
++ * will not free the data we're using.
++ */
++static void claim_allocations(int cpu, struct sched_domain *sd)
++{
++ struct sd_data *sdd = sd->private;
++
++ WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
++ *per_cpu_ptr(sdd->sd, cpu) = NULL;
++
++ if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
++ *per_cpu_ptr(sdd->sg, cpu) = NULL;
++
++ if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
++ *per_cpu_ptr(sdd->sgc, cpu) = NULL;
++}
++
++#ifdef CONFIG_NUMA
++static int sched_domains_numa_levels;
++enum numa_topology_type sched_numa_topology_type;
++static int *sched_domains_numa_distance;
++int sched_max_numa_distance;
++static struct cpumask ***sched_domains_numa_masks;
++static int sched_domains_curr_level;
++#endif
++
++/*
++ * SD_flags allowed in topology descriptions.
++ *
++ * SD_SHARE_CPUCAPACITY - describes SMT topologies
++ * SD_SHARE_PKG_RESOURCES - describes shared caches
++ * SD_NUMA - describes NUMA topologies
++ * SD_SHARE_POWERDOMAIN - describes shared power domain
++ *
++ * Odd one out:
++ * SD_ASYM_PACKING - describes SMT quirks
++ */
++#define TOPOLOGY_SD_FLAGS \
++ (SD_SHARE_CPUCAPACITY | \
++ SD_SHARE_PKG_RESOURCES | \
++ SD_NUMA | \
++ SD_ASYM_PACKING | \
++ SD_SHARE_POWERDOMAIN)
++
++static struct sched_domain *
++sd_init(struct sched_domain_topology_level *tl, int cpu)
++{
++ struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
++ int sd_weight, sd_flags = 0;
++
++#ifdef CONFIG_NUMA
++ /*
++ * Ugly hack to pass state to sd_numa_mask()...
++ */
++ sched_domains_curr_level = tl->numa_level;
++#endif
++
++ sd_weight = cpumask_weight(tl->mask(cpu));
++
++ if (tl->sd_flags)
++ sd_flags = (*tl->sd_flags)();
++ if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
++ "wrong sd_flags in topology description\n"))
++ sd_flags &= ~TOPOLOGY_SD_FLAGS;
++
++ *sd = (struct sched_domain){
++ .min_interval = sd_weight,
++ .max_interval = 2*sd_weight,
++ .busy_factor = 32,
++ .imbalance_pct = 125,
++
++ .cache_nice_tries = 0,
++ .busy_idx = 0,
++ .idle_idx = 0,
++ .newidle_idx = 0,
++ .wake_idx = 0,
++ .forkexec_idx = 0,
++
++ .flags = 1*SD_LOAD_BALANCE
++ | 1*SD_BALANCE_NEWIDLE
++ | 1*SD_BALANCE_EXEC
++ | 1*SD_BALANCE_FORK
++ | 0*SD_BALANCE_WAKE
++ | 1*SD_WAKE_AFFINE
++ | 0*SD_SHARE_CPUCAPACITY
++ | 0*SD_SHARE_PKG_RESOURCES
++ | 0*SD_SERIALIZE
++ | 0*SD_PREFER_SIBLING
++ | 0*SD_NUMA
++ | sd_flags
++ ,
++
++ .last_balance = jiffies,
++ .balance_interval = sd_weight,
++ .smt_gain = 0,
++ .max_newidle_lb_cost = 0,
++ .next_decay_max_lb_cost = jiffies,
++#ifdef CONFIG_SCHED_DEBUG
++ .name = tl->name,
++#endif
++ };
++
++ /*
++ * Convert topological properties into behaviour.
++ */
++
++ if (sd->flags & SD_SHARE_CPUCAPACITY) {
++ sd->flags |= SD_PREFER_SIBLING;
++ sd->imbalance_pct = 110;
++ sd->smt_gain = 1178; /* ~15% */
++
++ } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
++ sd->imbalance_pct = 117;
++ sd->cache_nice_tries = 1;
++ sd->busy_idx = 2;
++
++#ifdef CONFIG_NUMA
++ } else if (sd->flags & SD_NUMA) {
++ sd->cache_nice_tries = 2;
++ sd->busy_idx = 3;
++ sd->idle_idx = 2;
++
++ sd->flags |= SD_SERIALIZE;
++ if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
++ sd->flags &= ~(SD_BALANCE_EXEC |
++ SD_BALANCE_FORK |
++ SD_WAKE_AFFINE);
++ }
++
++#endif
++ } else {
++ sd->flags |= SD_PREFER_SIBLING;
++ sd->cache_nice_tries = 1;
++ sd->busy_idx = 2;
++ sd->idle_idx = 1;
++ }
++
++ sd->private = &tl->data;
++
++ return sd;
++}
++
++/*
++ * Topology list, bottom-up.
++ */
++static struct sched_domain_topology_level default_topology[] = {
++#ifdef CONFIG_SCHED_SMT
++ { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
++#endif
++#ifdef CONFIG_SCHED_MC
++ { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
++#endif
++ { cpu_cpu_mask, SD_INIT_NAME(DIE) },
++ { NULL, },
++};
++
++struct sched_domain_topology_level *sched_domain_topology = default_topology;
++
++#define for_each_sd_topology(tl) \
++ for (tl = sched_domain_topology; tl->mask; tl++)
++
++void set_sched_topology(struct sched_domain_topology_level *tl)
++{
++ sched_domain_topology = tl;
++}
++
++#ifdef CONFIG_NUMA
++
++static const struct cpumask *sd_numa_mask(int cpu)
++{
++ return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
++}
++
++static void sched_numa_warn(const char *str)
++{
++ static int done = false;
++ int i,j;
++
++ if (done)
++ return;
++
++ done = true;
++
++ printk(KERN_WARNING "ERROR: %s\n\n", str);
++
++ for (i = 0; i < nr_node_ids; i++) {
++ printk(KERN_WARNING " ");
++ for (j = 0; j < nr_node_ids; j++)
++ printk(KERN_CONT "%02d ", node_distance(i,j));
++ printk(KERN_CONT "\n");
++ }
++ printk(KERN_WARNING "\n");
++}
++
++bool find_numa_distance(int distance)
++{
++ int i;
++
++ if (distance == node_distance(0, 0))
++ return true;
++
++ for (i = 0; i < sched_domains_numa_levels; i++) {
++ if (sched_domains_numa_distance[i] == distance)
++ return true;
++ }
++
++ return false;
++}
++
++/*
++ * A system can have three types of NUMA topology:
++ * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
++ * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
++ * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
++ *
++ * The difference between a glueless mesh topology and a backplane
++ * topology lies in whether communication between not directly
++ * connected nodes goes through intermediary nodes (where programs
++ * could run), or through backplane controllers. This affects
++ * placement of programs.
++ *
++ * The type of topology can be discerned with the following tests:
++ * - If the maximum distance between any nodes is 1 hop, the system
++ * is directly connected.
++ * - If for two nodes A and B, located N > 1 hops away from each other,
++ * there is an intermediary node C, which is < N hops away from both
++ * nodes A and B, the system is a glueless mesh.
++ */
++static void init_numa_topology_type(void)
++{
++ int a, b, c, n;
++
++ n = sched_max_numa_distance;
++
++ if (n <= 1)
++ sched_numa_topology_type = NUMA_DIRECT;
++
++ for_each_online_node(a) {
++ for_each_online_node(b) {
++ /* Find two nodes furthest removed from each other. */
++ if (node_distance(a, b) < n)
++ continue;
++
++ /* Is there an intermediary node between a and b? */
++ for_each_online_node(c) {
++ if (node_distance(a, c) < n &&
++ node_distance(b, c) < n) {
++ sched_numa_topology_type =
++ NUMA_GLUELESS_MESH;
++ return;
++ }
++ }
++
++ sched_numa_topology_type = NUMA_BACKPLANE;
++ return;
++ }
++ }
++}
++
++static void sched_init_numa(void)
++{
++ int next_distance, curr_distance = node_distance(0, 0);
++ struct sched_domain_topology_level *tl;
++ int level = 0;
++ int i, j, k;
++
++ sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
++ if (!sched_domains_numa_distance)
++ return;
++
++ /*
++ * O(nr_nodes^2) deduplicating selection sort -- in order to find the
++ * unique distances in the node_distance() table.
++ *
++ * Assumes node_distance(0,j) includes all distances in
++ * node_distance(i,j) in order to avoid cubic time.
++ */
++ next_distance = curr_distance;
++ for (i = 0; i < nr_node_ids; i++) {
++ for (j = 0; j < nr_node_ids; j++) {
++ for (k = 0; k < nr_node_ids; k++) {
++ int distance = node_distance(i, k);
++
++ if (distance > curr_distance &&
++ (distance < next_distance ||
++ next_distance == curr_distance))
++ next_distance = distance;
++
++ /*
++ * While not a strong assumption it would be nice to know
++ * about cases where if node A is connected to B, B is not
++ * equally connected to A.
++ */
++ if (sched_debug() && node_distance(k, i) != distance)
++ sched_numa_warn("Node-distance not symmetric");
++
++ if (sched_debug() && i && !find_numa_distance(distance))
++ sched_numa_warn("Node-0 not representative");
++ }
++ if (next_distance != curr_distance) {
++ sched_domains_numa_distance[level++] = next_distance;
++ sched_domains_numa_levels = level;
++ curr_distance = next_distance;
++ } else break;
++ }
++
++ /*
++ * In case of sched_debug() we verify the above assumption.
++ */
++ if (!sched_debug())
++ break;
++ }
++
++ if (!level)
++ return;
++
++ /*
++ * 'level' contains the number of unique distances, excluding the
++ * identity distance node_distance(i,i).
++ *
++ * The sched_domains_numa_distance[] array includes the actual distance
++ * numbers.
++ */
++
++ /*
++ * Here, we should temporarily reset sched_domains_numa_levels to 0.
++ * If it fails to allocate memory for array sched_domains_numa_masks[][],
++ * the array will contain less then 'level' members. This could be
++ * dangerous when we use it to iterate array sched_domains_numa_masks[][]
++ * in other functions.
++ *
++ * We reset it to 'level' at the end of this function.
++ */
++ sched_domains_numa_levels = 0;
++
++ sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL);
++ if (!sched_domains_numa_masks)
++ return;
++
++ /*
++ * Now for each level, construct a mask per node which contains all
++ * cpus of nodes that are that many hops away from us.
++ */
++ for (i = 0; i < level; i++) {
++ sched_domains_numa_masks[i] =
++ kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
++ if (!sched_domains_numa_masks[i])
++ return;
++
++ for (j = 0; j < nr_node_ids; j++) {
++ struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
++ if (!mask)
++ return;
++
++ sched_domains_numa_masks[i][j] = mask;
++
++ for (k = 0; k < nr_node_ids; k++) {
++ if (node_distance(j, k) > sched_domains_numa_distance[i])
++ continue;
++
++ cpumask_or(mask, mask, cpumask_of_node(k));
++ }
++ }
++ }
++
++ /* Compute default topology size */
++ for (i = 0; sched_domain_topology[i].mask; i++);
++
++ tl = kzalloc((i + level + 1) *
++ sizeof(struct sched_domain_topology_level), GFP_KERNEL);
++ if (!tl)
++ return;
++
++ /*
++ * Copy the default topology bits..
++ */
++ for (i = 0; sched_domain_topology[i].mask; i++)
++ tl[i] = sched_domain_topology[i];
++
++ /*
++ * .. and append 'j' levels of NUMA goodness.
++ */
++ for (j = 0; j < level; i++, j++) {
++ tl[i] = (struct sched_domain_topology_level){
++ .mask = sd_numa_mask,
++ .sd_flags = cpu_numa_flags,
++ .flags = SDTL_OVERLAP,
++ .numa_level = j,
++ SD_INIT_NAME(NUMA)
++ };
++ }
++
++ sched_domain_topology = tl;
++
++ sched_domains_numa_levels = level;
++ sched_max_numa_distance = sched_domains_numa_distance[level - 1];
++
++ init_numa_topology_type();
++}
++
++static void sched_domains_numa_masks_set(int cpu)
++{
++ int i, j;
++ int node = cpu_to_node(cpu);
++
++ for (i = 0; i < sched_domains_numa_levels; i++) {
++ for (j = 0; j < nr_node_ids; j++) {
++ if (node_distance(j, node) <= sched_domains_numa_distance[i])
++ cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
++ }
++ }
++}
++
++static void sched_domains_numa_masks_clear(int cpu)
++{
++ int i, j;
++ for (i = 0; i < sched_domains_numa_levels; i++) {
++ for (j = 0; j < nr_node_ids; j++)
++ cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
++ }
++}
++
++/*
++ * Update sched_domains_numa_masks[level][node] array when new cpus
++ * are onlined.
++ */
++static int sched_domains_numa_masks_update(struct notifier_block *nfb,
++ unsigned long action,
++ void *hcpu)
++{
++ int cpu = (long)hcpu;
++
++ switch (action & ~CPU_TASKS_FROZEN) {
++ case CPU_ONLINE:
++ sched_domains_numa_masks_set(cpu);
++ break;
++
++ case CPU_DEAD:
++ sched_domains_numa_masks_clear(cpu);
++ break;
++
++ default:
++ return NOTIFY_DONE;
++ }
++
++ return NOTIFY_OK;
++}
++#else
++static inline void sched_init_numa(void)
++{
++}
++
++static int sched_domains_numa_masks_update(struct notifier_block *nfb,
++ unsigned long action,
++ void *hcpu)
++{
++ return 0;
++}
++#endif /* CONFIG_NUMA */
++
++static int __sdt_alloc(const struct cpumask *cpu_map)
++{
++ struct sched_domain_topology_level *tl;
++ int j;
++
++ for_each_sd_topology(tl) {
++ struct sd_data *sdd = &tl->data;
++
++ sdd->sd = alloc_percpu(struct sched_domain *);
++ if (!sdd->sd)
++ return -ENOMEM;
++
++ sdd->sg = alloc_percpu(struct sched_group *);
++ if (!sdd->sg)
++ return -ENOMEM;
++
++ sdd->sgc = alloc_percpu(struct sched_group_capacity *);
++ if (!sdd->sgc)
++ return -ENOMEM;
++
++ for_each_cpu(j, cpu_map) {
++ struct sched_domain *sd;
++ struct sched_group *sg;
++ struct sched_group_capacity *sgc;
++
++ sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
++ GFP_KERNEL, cpu_to_node(j));
++ if (!sd)
++ return -ENOMEM;
++
++ *per_cpu_ptr(sdd->sd, j) = sd;
++
++ sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
++ GFP_KERNEL, cpu_to_node(j));
++ if (!sg)
++ return -ENOMEM;
++
++ sg->next = sg;
++
++ *per_cpu_ptr(sdd->sg, j) = sg;
++
++ sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
++ GFP_KERNEL, cpu_to_node(j));
++ if (!sgc)
++ return -ENOMEM;
++
++ *per_cpu_ptr(sdd->sgc, j) = sgc;
++ }
++ }
++
++ return 0;
++}
++
++static void __sdt_free(const struct cpumask *cpu_map)
++{
++ struct sched_domain_topology_level *tl;
++ int j;
++
++ for_each_sd_topology(tl) {
++ struct sd_data *sdd = &tl->data;
++
++ for_each_cpu(j, cpu_map) {
++ struct sched_domain *sd;
++
++ if (sdd->sd) {
++ sd = *per_cpu_ptr(sdd->sd, j);
++ if (sd && (sd->flags & SD_OVERLAP))
++ free_sched_groups(sd->groups, 0);
++ kfree(*per_cpu_ptr(sdd->sd, j));
++ }
++
++ if (sdd->sg)
++ kfree(*per_cpu_ptr(sdd->sg, j));
++ if (sdd->sgc)
++ kfree(*per_cpu_ptr(sdd->sgc, j));
++ }
++ free_percpu(sdd->sd);
++ sdd->sd = NULL;
++ free_percpu(sdd->sg);
++ sdd->sg = NULL;
++ free_percpu(sdd->sgc);
++ sdd->sgc = NULL;
++ }
++}
++
++struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
++ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
++ struct sched_domain *child, int cpu)
++{
++ struct sched_domain *sd = sd_init(tl, cpu);
++ if (!sd)
++ return child;
++
++ cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
++ if (child) {
++ sd->level = child->level + 1;
++ sched_domain_level_max = max(sched_domain_level_max, sd->level);
++ child->parent = sd;
++ sd->child = child;
++
++ if (!cpumask_subset(sched_domain_span(child),
++ sched_domain_span(sd))) {
++ pr_err("BUG: arch topology borken\n");
++#ifdef CONFIG_SCHED_DEBUG
++ pr_err(" the %s domain not a subset of the %s domain\n",
++ child->name, sd->name);
++#endif
++ /* Fixup, ensure @sd has at least @child cpus. */
++ cpumask_or(sched_domain_span(sd),
++ sched_domain_span(sd),
++ sched_domain_span(child));
++ }
++
++ }
++ set_domain_attribute(sd, attr);
++
++ return sd;
++}
++
++/*
++ * Build sched domains for a given set of cpus and attach the sched domains
++ * to the individual cpus
++ */
++static int build_sched_domains(const struct cpumask *cpu_map,
++ struct sched_domain_attr *attr)
++{
++ enum s_alloc alloc_state;
++ struct sched_domain *sd;
++ struct s_data d;
++ int i, ret = -ENOMEM;
++
++ alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
++ if (alloc_state != sa_rootdomain)
++ goto error;
++
++ /* Set up domains for cpus specified by the cpu_map. */
++ for_each_cpu(i, cpu_map) {
++ struct sched_domain_topology_level *tl;
++
++ sd = NULL;
++ for_each_sd_topology(tl) {
++ sd = build_sched_domain(tl, cpu_map, attr, sd, i);
++ if (tl == sched_domain_topology)
++ *per_cpu_ptr(d.sd, i) = sd;
++ if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
++ sd->flags |= SD_OVERLAP;
++ if (cpumask_equal(cpu_map, sched_domain_span(sd)))
++ break;
++ }
++ }
++
++ /* Build the groups for the domains */
++ for_each_cpu(i, cpu_map) {
++ for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
++ sd->span_weight = cpumask_weight(sched_domain_span(sd));
++ if (sd->flags & SD_OVERLAP) {
++ if (build_overlap_sched_groups(sd, i))
++ goto error;
++ } else {
++ if (build_sched_groups(sd, i))
++ goto error;
++ }
++ }
++ }
++
++ /* Calculate CPU capacity for physical packages and nodes */
++ for (i = nr_cpumask_bits-1; i >= 0; i--) {
++ if (!cpumask_test_cpu(i, cpu_map))
++ continue;
++
++ for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
++ claim_allocations(i, sd);
++ init_sched_groups_capacity(i, sd);
++ }
++ }
++
++ /* Attach the domains */
++ rcu_read_lock();
++ for_each_cpu(i, cpu_map) {
++ sd = *per_cpu_ptr(d.sd, i);
++ cpu_attach_domain(sd, d.rd, i);
++ }
++ rcu_read_unlock();
++
++ ret = 0;
++error:
++ __free_domain_allocs(&d, alloc_state, cpu_map);
++ return ret;
++}
++
++static cpumask_var_t *doms_cur; /* current sched domains */
++static int ndoms_cur; /* number of sched domains in 'doms_cur' */
++static struct sched_domain_attr *dattr_cur;
++ /* attribues of custom domains in 'doms_cur' */
++
++/*
++ * Special case: If a kmalloc of a doms_cur partition (array of
++ * cpumask) fails, then fallback to a single sched domain,
++ * as determined by the single cpumask fallback_doms.
++ */
++static cpumask_var_t fallback_doms;
++
++/*
++ * arch_update_cpu_topology lets virtualized architectures update the
++ * cpu core maps. It is supposed to return 1 if the topology changed
++ * or 0 if it stayed the same.
++ */
++int __weak arch_update_cpu_topology(void)
++{
++ return 0;
++}
++
++cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
++{
++ int i;
++ cpumask_var_t *doms;
++
++ doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
++ if (!doms)
++ return NULL;
++ for (i = 0; i < ndoms; i++) {
++ if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
++ free_sched_domains(doms, i);
++ return NULL;
++ }
++ }
++ return doms;
++}
++
++void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
++{
++ unsigned int i;
++ for (i = 0; i < ndoms; i++)
++ free_cpumask_var(doms[i]);
++ kfree(doms);
++}
++
++/*
++ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
++ * For now this just excludes isolated cpus, but could be used to
++ * exclude other special cases in the future.
++ */
++static int init_sched_domains(const struct cpumask *cpu_map)
++{
++ int err;
++
++ arch_update_cpu_topology();
++ ndoms_cur = 1;
++ doms_cur = alloc_sched_domains(ndoms_cur);
++ if (!doms_cur)
++ doms_cur = &fallback_doms;
++ cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
++ err = build_sched_domains(doms_cur[0], NULL);
++ register_sched_domain_sysctl();
++
++ return err;
++}
++
++/*
++ * Detach sched domains from a group of cpus specified in cpu_map
++ * These cpus will now be attached to the NULL domain
++ */
++static void detach_destroy_domains(const struct cpumask *cpu_map)
++{
++ int i;
++
++ rcu_read_lock();
++ for_each_cpu(i, cpu_map)
++ cpu_attach_domain(NULL, &def_root_domain, i);
++ rcu_read_unlock();
++}
++
++/* handle null as "default" */
++static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
++ struct sched_domain_attr *new, int idx_new)
++{
++ struct sched_domain_attr tmp;
++
++ /* fast path */
++ if (!new && !cur)
++ return 1;
++
++ tmp = SD_ATTR_INIT;
++ return !memcmp(cur ? (cur + idx_cur) : &tmp,
++ new ? (new + idx_new) : &tmp,
++ sizeof(struct sched_domain_attr));
++}
++
++/*
++ * Partition sched domains as specified by the 'ndoms_new'
++ * cpumasks in the array doms_new[] of cpumasks. This compares
++ * doms_new[] to the current sched domain partitioning, doms_cur[].
++ * It destroys each deleted domain and builds each new domain.
++ *
++ * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
++ * The masks don't intersect (don't overlap.) We should setup one
++ * sched domain for each mask. CPUs not in any of the cpumasks will
++ * not be load balanced. If the same cpumask appears both in the
++ * current 'doms_cur' domains and in the new 'doms_new', we can leave
++ * it as it is.
++ *
++ * The passed in 'doms_new' should be allocated using
++ * alloc_sched_domains. This routine takes ownership of it and will
++ * free_sched_domains it when done with it. If the caller failed the
++ * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
++ * and partition_sched_domains() will fallback to the single partition
++ * 'fallback_doms', it also forces the domains to be rebuilt.
++ *
++ * If doms_new == NULL it will be replaced with cpu_online_mask.
++ * ndoms_new == 0 is a special case for destroying existing domains,
++ * and it will not create the default domain.
++ *
++ * Call with hotplug lock held
++ */
++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
++ struct sched_domain_attr *dattr_new)
++{
++ int i, j, n;
++ int new_topology;
++
++ mutex_lock(&sched_domains_mutex);
++
++ /* always unregister in case we don't destroy any domains */
++ unregister_sched_domain_sysctl();
++
++ /* Let architecture update cpu core mappings. */
++ new_topology = arch_update_cpu_topology();
++
++ n = doms_new ? ndoms_new : 0;
++
++ /* Destroy deleted domains */
++ for (i = 0; i < ndoms_cur; i++) {
++ for (j = 0; j < n && !new_topology; j++) {
++ if (cpumask_equal(doms_cur[i], doms_new[j])
++ && dattrs_equal(dattr_cur, i, dattr_new, j))
++ goto match1;
++ }
++ /* no match - a current sched domain not in new doms_new[] */
++ detach_destroy_domains(doms_cur[i]);
++match1:
++ ;
++ }
++
++ n = ndoms_cur;
++ if (doms_new == NULL) {
++ n = 0;
++ doms_new = &fallback_doms;
++ cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
++ WARN_ON_ONCE(dattr_new);
++ }
++
++ /* Build new domains */
++ for (i = 0; i < ndoms_new; i++) {
++ for (j = 0; j < n && !new_topology; j++) {
++ if (cpumask_equal(doms_new[i], doms_cur[j])
++ && dattrs_equal(dattr_new, i, dattr_cur, j))
++ goto match2;
++ }
++ /* no match - add a new doms_new */
++ build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
++match2:
++ ;
++ }
++
++ /* Remember the new sched domains */
++ if (doms_cur != &fallback_doms)
++ free_sched_domains(doms_cur, ndoms_cur);
++ kfree(dattr_cur); /* kfree(NULL) is safe */
++ doms_cur = doms_new;
++ dattr_cur = dattr_new;
++ ndoms_cur = ndoms_new;
++
++ register_sched_domain_sysctl();
++
++ mutex_unlock(&sched_domains_mutex);
++}
++
++static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
++
++/*
++ * Update cpusets according to cpu_active mask. If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
++ void *hcpu)
++{
++ switch (action) {
++ case CPU_ONLINE_FROZEN:
++ case CPU_DOWN_FAILED_FROZEN:
++
++ /*
++ * num_cpus_frozen tracks how many CPUs are involved in suspend
++ * resume sequence. As long as this is not the last online
++ * operation in the resume sequence, just build a single sched
++ * domain, ignoring cpusets.
++ */
++ num_cpus_frozen--;
++ if (likely(num_cpus_frozen)) {
++ partition_sched_domains(1, NULL, NULL);
++ break;
++ }
++
++ /*
++ * This is the last CPU online operation. So fall through and
++ * restore the original sched domains by considering the
++ * cpuset configurations.
++ */
++
++ case CPU_ONLINE:
++ cpuset_update_active_cpus(true);
++ break;
++ default:
++ return NOTIFY_DONE;
++ }
++ return NOTIFY_OK;
++}
++
++static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
++ void *hcpu)
++{
++ unsigned long flags;
++ long cpu = (long)hcpu;
++ struct dl_bw *dl_b;
++ bool overflow;
++ int cpus;
++
++ switch (action) {
++ case CPU_DOWN_PREPARE:
++ rcu_read_lock_sched();
++ dl_b = dl_bw_of(cpu);
++
++ raw_spin_lock_irqsave(&dl_b->lock, flags);
++ cpus = dl_bw_cpus(cpu);
++ overflow = __dl_overflow(dl_b, cpus, 0, 0);
++ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
++
++ rcu_read_unlock_sched();
++
++ if (overflow)
++ return notifier_from_errno(-EBUSY);
++ cpuset_update_active_cpus(false);
++ break;
++ case CPU_DOWN_PREPARE_FROZEN:
++ num_cpus_frozen++;
++ partition_sched_domains(1, NULL, NULL);
++ break;
++ default:
++ return NOTIFY_DONE;
++ }
++ return NOTIFY_OK;
++}
++
++void __init sched_init_smp(void)
++{
++ cpumask_var_t non_isolated_cpus;
++
++ alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
++ alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
++
++ sched_init_numa();
++
++ /*
++ * There's no userspace yet to cause hotplug operations; hence all the
++ * cpu masks are stable and all blatant races in the below code cannot
++ * happen.
++ */
++ mutex_lock(&sched_domains_mutex);
++ init_sched_domains(cpu_active_mask);
++ cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
++ if (cpumask_empty(non_isolated_cpus))
++ cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
++ mutex_unlock(&sched_domains_mutex);
++
++ hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
++ hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
++ hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
++
++ init_hrtick();
++
++ /* Move init over to a non-isolated CPU */
++ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
++ BUG();
++ sched_init_granularity();
++ free_cpumask_var(non_isolated_cpus);
++
++ init_sched_rt_class();
++ init_sched_dl_class();
++}
++#else
++void __init sched_init_smp(void)
++{
++ sched_init_granularity();
++}
++#endif /* CONFIG_SMP */
++
++const_debug unsigned int sysctl_timer_migration = 1;
++
++int in_sched_functions(unsigned long addr)
++{
++ return in_lock_functions(addr) ||
++ (addr >= (unsigned long)__sched_text_start
++ && addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++#endif
++
++DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
++
++void __init sched_init(void)
++{
++ int i, j;
++ unsigned long alloc_size = 0, ptr;
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ alloc_size += 2 * nr_cpu_ids * sizeof(void **);
++#endif
++#ifdef CONFIG_RT_GROUP_SCHED
++ alloc_size += 2 * nr_cpu_ids * sizeof(void **);
++#endif
++ if (alloc_size) {
++ ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ root_task_group.se = (struct sched_entity **)ptr;
++ ptr += nr_cpu_ids * sizeof(void **);
++
++ root_task_group.cfs_rq = (struct cfs_rq **)ptr;
++ ptr += nr_cpu_ids * sizeof(void **);
++
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++#ifdef CONFIG_RT_GROUP_SCHED
++ root_task_group.rt_se = (struct sched_rt_entity **)ptr;
++ ptr += nr_cpu_ids * sizeof(void **);
++
++ root_task_group.rt_rq = (struct rt_rq **)ptr;
++ ptr += nr_cpu_ids * sizeof(void **);
++
++#endif /* CONFIG_RT_GROUP_SCHED */
++ }
++#ifdef CONFIG_CPUMASK_OFFSTACK
++ for_each_possible_cpu(i) {
++ per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
++ cpumask_size(), GFP_KERNEL, cpu_to_node(i));
++ }
++#endif /* CONFIG_CPUMASK_OFFSTACK */
++
++ init_rt_bandwidth(&def_rt_bandwidth,
++ global_rt_period(), global_rt_runtime());
++ init_dl_bandwidth(&def_dl_bandwidth,
++ global_rt_period(), global_rt_runtime());
++
++#ifdef CONFIG_SMP
++ init_defrootdomain();
++#endif
++
++#ifdef CONFIG_RT_GROUP_SCHED
++ init_rt_bandwidth(&root_task_group.rt_bandwidth,
++ global_rt_period(), global_rt_runtime());
++#endif /* CONFIG_RT_GROUP_SCHED */
++
++#ifdef CONFIG_CGROUP_SCHED
++ list_add(&root_task_group.list, &task_groups);
++ INIT_LIST_HEAD(&root_task_group.children);
++ INIT_LIST_HEAD(&root_task_group.siblings);
++ autogroup_init(&init_task);
++
++#endif /* CONFIG_CGROUP_SCHED */
++
++ for_each_possible_cpu(i) {
++ struct rq *rq;
++
++ rq = cpu_rq(i);
++ raw_spin_lock_init(&rq->lock);
++ rq->nr_running = 0;
++ rq->calc_load_active = 0;
++ rq->calc_load_update = jiffies + LOAD_FREQ;
++ init_cfs_rq(&rq->cfs);
++ init_rt_rq(&rq->rt);
++ init_dl_rq(&rq->dl);
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ root_task_group.shares = ROOT_TASK_GROUP_LOAD;
++ INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
++ /*
++ * How much cpu bandwidth does root_task_group get?
++ *
++ * In case of task-groups formed thr' the cgroup filesystem, it
++ * gets 100% of the cpu resources in the system. This overall
++ * system cpu resource is divided among the tasks of
++ * root_task_group and its child task-groups in a fair manner,
++ * based on each entity's (task or task-group's) weight
++ * (se->load.weight).
++ *
++ * In other words, if root_task_group has 10 tasks of weight
++ * 1024) and two child groups A0 and A1 (of weight 1024 each),
++ * then A0's share of the cpu resource is:
++ *
++ * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
++ *
++ * We achieve this by letting root_task_group's tasks sit
++ * directly in rq->cfs (i.e root_task_group->se[] = NULL).
++ */
++ init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
++ init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++
++ rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
++#ifdef CONFIG_RT_GROUP_SCHED
++ init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
++#endif
++
++ for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
++ rq->cpu_load[j] = 0;
++
++ rq->last_load_update_tick = jiffies;
++
++#ifdef CONFIG_SMP
++ rq->sd = NULL;
++ rq->rd = NULL;
++ rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
++ rq->post_schedule = 0;
++ rq->active_balance = 0;
++ rq->next_balance = jiffies;
++ rq->push_cpu = 0;
++ rq->cpu = i;
++ rq->online = 0;
++ rq->idle_stamp = 0;
++ rq->avg_idle = 2*sysctl_sched_migration_cost;
++ rq->max_idle_balance_cost = sysctl_sched_migration_cost;
++
++ INIT_LIST_HEAD(&rq->cfs_tasks);
++
++ rq_attach_root(rq, &def_root_domain);
++#ifdef CONFIG_NO_HZ_COMMON
++ rq->nohz_flags = 0;
++#endif
++#ifdef CONFIG_NO_HZ_FULL
++ rq->last_sched_tick = 0;
++#endif
++#endif
++ init_rq_hrtick(rq);
++ atomic_set(&rq->nr_iowait, 0);
++ }
++
++ set_load_weight(&init_task);
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++ INIT_HLIST_HEAD(&init_task.preempt_notifiers);
++#endif
++
++ /*
++ * The boot idle thread does lazy MMU switching as well:
++ */
++ atomic_inc(&init_mm.mm_count);
++ enter_lazy_tlb(&init_mm, current);
++
++ /*
++ * During early bootup we pretend to be a normal task:
++ */
++ current->sched_class = &fair_sched_class;
++
++ /*
++ * Make us the idle thread. Technically, schedule() should not be
++ * called from this thread, however somewhere below it might be,
++ * but because we are the idle thread, we just pick up running again
++ * when this runqueue becomes "idle".
++ */
++ init_idle(current, smp_processor_id());
++
++ calc_load_update = jiffies + LOAD_FREQ;
++
++#ifdef CONFIG_SMP
++ zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
++ /* May be allocated at isolcpus cmdline parse time */
++ if (cpu_isolated_map == NULL)
++ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
++ idle_thread_set_boot_cpu();
++ set_cpu_rq_start_time();
++#endif
++ init_sched_fair_class();
++
++ scheduler_running = 1;
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++static inline int preempt_count_equals(int preempt_offset)
++{
++ int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
++
++ return (nested == preempt_offset);
++}
++
++void __might_sleep(const char *file, int line, int preempt_offset)
++{
++ /*
++ * Blocking primitives will set (and therefore destroy) current->state,
++ * since we will exit with TASK_RUNNING make sure we enter with it,
++ * otherwise we will destroy state.
++ */
++ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
++ "do not call blocking ops when !TASK_RUNNING; "
++ "state=%lx set at [<%p>] %pS\n",
++ current->state,
++ (void *)current->task_state_change,
++ (void *)current->task_state_change);
++
++ ___might_sleep(file, line, preempt_offset);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++void ___might_sleep(const char *file, int line, int preempt_offset)
++{
++ static unsigned long prev_jiffy; /* ratelimiting */
++
++ rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
++ !is_idle_task(current)) ||
++ system_state != SYSTEM_RUNNING || oops_in_progress)
++ return;
++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++ return;
++ prev_jiffy = jiffies;
++
++ printk(KERN_ERR
++ "BUG: sleeping function called from invalid context at %s:%d\n",
++ file, line);
++ printk(KERN_ERR
++ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++ in_atomic(), irqs_disabled(),
++ current->pid, current->comm);
++
++ if (task_stack_end_corrupted(current))
++ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
++
++ debug_show_held_locks(current);
++ if (irqs_disabled())
++ print_irqtrace_events(current);
++#ifdef CONFIG_DEBUG_PREEMPT
++ if (!preempt_count_equals(preempt_offset)) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(current->preempt_disable_ip);
++ pr_cont("\n");
++ }
++#endif
++ dump_stack();
++}
++EXPORT_SYMBOL(___might_sleep);
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++static void normalize_task(struct rq *rq, struct task_struct *p)
++{
++ const struct sched_class *prev_class = p->sched_class;
++ struct sched_attr attr = {
++ .sched_policy = SCHED_NORMAL,
++ };
++ int old_prio = p->prio;
++ int queued;
++
++ queued = task_on_rq_queued(p);
++ if (queued)
++ dequeue_task(rq, p, 0);
++ __setscheduler(rq, p, &attr, false);
++ if (queued) {
++ enqueue_task(rq, p, 0);
++ resched_curr(rq);
++ }
++
++ check_class_changed(rq, p, prev_class, old_prio);
++}
++
++void normalize_rt_tasks(void)
++{
++ struct task_struct *g, *p;
++ unsigned long flags;
++ struct rq *rq;
++
++ read_lock(&tasklist_lock);
++ for_each_process_thread(g, p) {
++ /*
++ * Only normalize user tasks:
++ */
++ if (p->flags & PF_KTHREAD)
++ continue;
++
++ p->se.exec_start = 0;
++#ifdef CONFIG_SCHEDSTATS
++ p->se.statistics.wait_start = 0;
++ p->se.statistics.sleep_start = 0;
++ p->se.statistics.block_start = 0;
++#endif
++
++ if (!dl_task(p) && !rt_task(p)) {
++ /*
++ * Renice negative nice level userspace
++ * tasks back to 0:
++ */
++ if (task_nice(p) < 0)
++ set_user_nice(p, 0);
++ continue;
++ }
++
++ rq = task_rq_lock(p, &flags);
++ normalize_task(rq, p);
++ task_rq_unlock(rq, p, &flags);
++ }
++ read_unlock(&tasklist_lock);
++}
++
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for the IA64 MCA handling, or kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given cpu.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++ return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_IA64
++/**
++ * set_curr_task - set the current task for a given cpu.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack. It allows the architecture to switch the
++ * notion of the current task on a cpu in a non-blocking manner. This function
++ * must be called with all CPU's synchronized, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void set_curr_task(int cpu, struct task_struct *p)
++{
++ cpu_curr(cpu) = p;
++}
++
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task_group_lock serializes the addition/removal of task groups */
++static DEFINE_SPINLOCK(task_group_lock);
++
++static void free_sched_group(struct task_group *tg)
++{
++ free_fair_sched_group(tg);
++ free_rt_sched_group(tg);
++ autogroup_free(tg);
++ kfree(tg);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++ struct task_group *tg;
++
++ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
++ if (!tg)
++ return ERR_PTR(-ENOMEM);
++
++ if (!alloc_fair_sched_group(tg, parent))
++ goto err;
++
++ if (!alloc_rt_sched_group(tg, parent))
++ goto err;
++
++ return tg;
++
++err:
++ free_sched_group(tg);
++ return ERR_PTR(-ENOMEM);
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&task_group_lock, flags);
++ list_add_rcu(&tg->list, &task_groups);
++
++ WARN_ON(!parent); /* root should already exist */
++
++ tg->parent = parent;
++ INIT_LIST_HEAD(&tg->children);
++ list_add_rcu(&tg->siblings, &parent->children);
++ spin_unlock_irqrestore(&task_group_lock, flags);
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void free_sched_group_rcu(struct rcu_head *rhp)
++{
++ /* now it should be safe to free those cfs_rqs */
++ free_sched_group(container_of(rhp, struct task_group, rcu));
++}
++
++/* Destroy runqueue etc associated with a task group */
++void sched_destroy_group(struct task_group *tg)
++{
++ /* wait for possible concurrent references to cfs_rqs complete */
++ call_rcu(&tg->rcu, free_sched_group_rcu);
++}
++
++void sched_offline_group(struct task_group *tg)
++{
++ unsigned long flags;
++ int i;
++
++ /* end participation in shares distribution */
++ for_each_possible_cpu(i)
++ unregister_fair_sched_group(tg, i);
++
++ spin_lock_irqsave(&task_group_lock, flags);
++ list_del_rcu(&tg->list);
++ list_del_rcu(&tg->siblings);
++ spin_unlock_irqrestore(&task_group_lock, flags);
++}
++
++/* change task's runqueue when it moves between groups.
++ * The caller of this function should have put the task in its new group
++ * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
++ * reflect its new group.
++ */
++void sched_move_task(struct task_struct *tsk)
++{
++ struct task_group *tg;
++ int queued, running;
++ unsigned long flags;
++ struct rq *rq;
++
++ rq = task_rq_lock(tsk, &flags);
++
++ running = task_current(rq, tsk);
++ queued = task_on_rq_queued(tsk);
++
++ if (queued)
++ dequeue_task(rq, tsk, 0);
++ if (unlikely(running))
++ put_prev_task(rq, tsk);
++
++ /*
++ * All callers are synchronized by task_rq_lock(); we do not use RCU
++ * which is pointless here. Thus, we pass "true" to task_css_check()
++ * to prevent lockdep warnings.
++ */
++ tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
++ struct task_group, css);
++ tg = autogroup_task_group(tsk, tg);
++ tsk->sched_task_group = tg;
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ if (tsk->sched_class->task_move_group)
++ tsk->sched_class->task_move_group(tsk, queued);
++ else
++#endif
++ set_task_rq(tsk, task_cpu(tsk));
++
++ if (unlikely(running))
++ tsk->sched_class->set_curr_task(rq);
++ if (queued)
++ enqueue_task(rq, tsk, 0);
++
++ task_rq_unlock(rq, tsk, &flags);
++}
++#endif /* CONFIG_CGROUP_SCHED */
++
++#ifdef CONFIG_RT_GROUP_SCHED
++/*
++ * Ensure that the real time constraints are schedulable.
++ */
++static DEFINE_MUTEX(rt_constraints_mutex);
++
++/* Must be called with tasklist_lock held */
++static inline int tg_has_rt_tasks(struct task_group *tg)
++{
++ struct task_struct *g, *p;
++
++ /*
++ * Autogroups do not have RT tasks; see autogroup_create().
++ */
++ if (task_group_is_autogroup(tg))
++ return 0;
++
++ for_each_process_thread(g, p) {
++ if (rt_task(p) && task_group(p) == tg)
++ return 1;
++ }
++
++ return 0;
++}
++
++struct rt_schedulable_data {
++ struct task_group *tg;
++ u64 rt_period;
++ u64 rt_runtime;
++};
++
++static int tg_rt_schedulable(struct task_group *tg, void *data)
++{
++ struct rt_schedulable_data *d = data;
++ struct task_group *child;
++ unsigned long total, sum = 0;
++ u64 period, runtime;
++
++ period = ktime_to_ns(tg->rt_bandwidth.rt_period);
++ runtime = tg->rt_bandwidth.rt_runtime;
++
++ if (tg == d->tg) {
++ period = d->rt_period;
++ runtime = d->rt_runtime;
++ }
++
++ /*
++ * Cannot have more runtime than the period.
++ */
++ if (runtime > period && runtime != RUNTIME_INF)
++ return -EINVAL;
++
++ /*
++ * Ensure we don't starve existing RT tasks.
++ */
++ if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
++ return -EBUSY;
++
++ total = to_ratio(period, runtime);
++
++ /*
++ * Nobody can have more than the global setting allows.
++ */
++ if (total > to_ratio(global_rt_period(), global_rt_runtime()))
++ return -EINVAL;
++
++ /*
++ * The sum of our children's runtime should not exceed our own.
++ */
++ list_for_each_entry_rcu(child, &tg->children, siblings) {
++ period = ktime_to_ns(child->rt_bandwidth.rt_period);
++ runtime = child->rt_bandwidth.rt_runtime;
++
++ if (child == d->tg) {
++ period = d->rt_period;
++ runtime = d->rt_runtime;
++ }
++
++ sum += to_ratio(period, runtime);
++ }
++
++ if (sum > total)
++ return -EINVAL;
++
++ return 0;
++}
++
++static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
++{
++ int ret;
++
++ struct rt_schedulable_data data = {
++ .tg = tg,
++ .rt_period = period,
++ .rt_runtime = runtime,
++ };
++
++ rcu_read_lock();
++ ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
++ rcu_read_unlock();
++
++ return ret;
++}
++
++static int tg_set_rt_bandwidth(struct task_group *tg,
++ u64 rt_period, u64 rt_runtime)
++{
++ int i, err = 0;
++
++ /*
++ * Disallowing the root group RT runtime is BAD, it would disallow the
++ * kernel creating (and or operating) RT threads.
++ */
++ if (tg == &root_task_group && rt_runtime == 0)
++ return -EINVAL;
++
++ /* No period doesn't make any sense. */
++ if (rt_period == 0)
++ return -EINVAL;
++
++ mutex_lock(&rt_constraints_mutex);
++ read_lock(&tasklist_lock);
++ err = __rt_schedulable(tg, rt_period, rt_runtime);
++ if (err)
++ goto unlock;
++
++ raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
++ tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
++ tg->rt_bandwidth.rt_runtime = rt_runtime;
++
++ for_each_possible_cpu(i) {
++ struct rt_rq *rt_rq = tg->rt_rq[i];
++
++ raw_spin_lock(&rt_rq->rt_runtime_lock);
++ rt_rq->rt_runtime = rt_runtime;
++ raw_spin_unlock(&rt_rq->rt_runtime_lock);
++ }
++ raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
++unlock:
++ read_unlock(&tasklist_lock);
++ mutex_unlock(&rt_constraints_mutex);
++
++ return err;
++}
++
++static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
++{
++ u64 rt_runtime, rt_period;
++
++ rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
++ rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
++ if (rt_runtime_us < 0)
++ rt_runtime = RUNTIME_INF;
++
++ return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
++}
++
++static long sched_group_rt_runtime(struct task_group *tg)
++{
++ u64 rt_runtime_us;
++
++ if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
++ return -1;
++
++ rt_runtime_us = tg->rt_bandwidth.rt_runtime;
++ do_div(rt_runtime_us, NSEC_PER_USEC);
++ return rt_runtime_us;
++}
++
++static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
++{
++ u64 rt_runtime, rt_period;
++
++ rt_period = (u64)rt_period_us * NSEC_PER_USEC;
++ rt_runtime = tg->rt_bandwidth.rt_runtime;
++
++ return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
++}
++
++static long sched_group_rt_period(struct task_group *tg)
++{
++ u64 rt_period_us;
++
++ rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
++ do_div(rt_period_us, NSEC_PER_USEC);
++ return rt_period_us;
++}
++#endif /* CONFIG_RT_GROUP_SCHED */
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static int sched_rt_global_constraints(void)
++{
++ int ret = 0;
++
++ mutex_lock(&rt_constraints_mutex);
++ read_lock(&tasklist_lock);
++ ret = __rt_schedulable(NULL, 0, 0);
++ read_unlock(&tasklist_lock);
++ mutex_unlock(&rt_constraints_mutex);
++
++ return ret;
++}
++
++static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
++{
++ /* Don't accept realtime tasks when there is no way for them to run */
++ if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
++ return 0;
++
++ return 1;
++}
++
++#else /* !CONFIG_RT_GROUP_SCHED */
++static int sched_rt_global_constraints(void)
++{
++ unsigned long flags;
++ int i, ret = 0;
++
++ raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
++ for_each_possible_cpu(i) {
++ struct rt_rq *rt_rq = &cpu_rq(i)->rt;
++
++ raw_spin_lock(&rt_rq->rt_runtime_lock);
++ rt_rq->rt_runtime = global_rt_runtime();
++ raw_spin_unlock(&rt_rq->rt_runtime_lock);
++ }
++ raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
++
++ return ret;
++}
++#endif /* CONFIG_RT_GROUP_SCHED */
++
++static int sched_dl_global_validate(void)
++{
++ u64 runtime = global_rt_runtime();
++ u64 period = global_rt_period();
++ u64 new_bw = to_ratio(period, runtime);
++ struct dl_bw *dl_b;
++ int cpu, ret = 0;
++ unsigned long flags;
++
++ /*
++ * Here we want to check the bandwidth not being set to some
++ * value smaller than the currently allocated bandwidth in
++ * any of the root_domains.
++ *
++ * FIXME: Cycling on all the CPUs is overdoing, but simpler than
++ * cycling on root_domains... Discussion on different/better
++ * solutions is welcome!
++ */
++ for_each_possible_cpu(cpu) {
++ rcu_read_lock_sched();
++ dl_b = dl_bw_of(cpu);
++
++ raw_spin_lock_irqsave(&dl_b->lock, flags);
++ if (new_bw < dl_b->total_bw)
++ ret = -EBUSY;
++ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
++
++ rcu_read_unlock_sched();
++
++ if (ret)
++ break;
++ }
++
++ return ret;
++}
++
++static void sched_dl_do_global(void)
++{
++ u64 new_bw = -1;
++ struct dl_bw *dl_b;
++ int cpu;
++ unsigned long flags;
++
++ def_dl_bandwidth.dl_period = global_rt_period();
++ def_dl_bandwidth.dl_runtime = global_rt_runtime();
++
++ if (global_rt_runtime() != RUNTIME_INF)
++ new_bw = to_ratio(global_rt_period(), global_rt_runtime());
++
++ /*
++ * FIXME: As above...
++ */
++ for_each_possible_cpu(cpu) {
++ rcu_read_lock_sched();
++ dl_b = dl_bw_of(cpu);
++
++ raw_spin_lock_irqsave(&dl_b->lock, flags);
++ dl_b->bw = new_bw;
++ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
++
++ rcu_read_unlock_sched();
++ }
++}
++
++static int sched_rt_global_validate(void)
++{
++ if (sysctl_sched_rt_period <= 0)
++ return -EINVAL;
++
++ if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
++ (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
++ return -EINVAL;
++
++ return 0;
++}
++
++static void sched_rt_do_global(void)
++{
++ def_rt_bandwidth.rt_runtime = global_rt_runtime();
++ def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
++}
++
++int sched_rt_handler(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp,
++ loff_t *ppos)
++{
++ int old_period, old_runtime;
++ static DEFINE_MUTEX(mutex);
++ int ret;
++
++ mutex_lock(&mutex);
++ old_period = sysctl_sched_rt_period;
++ old_runtime = sysctl_sched_rt_runtime;
++
++ ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++ if (!ret && write) {
++ ret = sched_rt_global_validate();
++ if (ret)
++ goto undo;
++
++ ret = sched_dl_global_validate();
++ if (ret)
++ goto undo;
++
++ ret = sched_rt_global_constraints();
++ if (ret)
++ goto undo;
++
++ sched_rt_do_global();
++ sched_dl_do_global();
++ }
++ if (0) {
++undo:
++ sysctl_sched_rt_period = old_period;
++ sysctl_sched_rt_runtime = old_runtime;
++ }
++ mutex_unlock(&mutex);
++
++ return ret;
++}
++
++int sched_rr_handler(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp,
++ loff_t *ppos)
++{
++ int ret;
++ static DEFINE_MUTEX(mutex);
++
++ mutex_lock(&mutex);
++ ret = proc_dointvec(table, write, buffer, lenp, ppos);
++ /* make sure that internally we keep jiffies */
++ /* also, writing zero resets timeslice to default */
++ if (!ret && write) {
++ sched_rr_timeslice = sched_rr_timeslice <= 0 ?
++ RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
++ }
++ mutex_unlock(&mutex);
++ return ret;
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++ return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++ struct task_group *parent = css_tg(parent_css);
++ struct task_group *tg;
++
++ if (!parent) {
++ /* This is early initialization for the top cgroup */
++ return &root_task_group.css;
++ }
++
++ tg = sched_create_group(parent);
++ if (IS_ERR(tg))
++ return ERR_PTR(-ENOMEM);
++
++ return &tg->css;
++}
++
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++ struct task_group *parent = css_tg(css->parent);
++
++ if (parent)
++ sched_online_group(tg, parent);
++ return 0;
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++
++ sched_destroy_group(tg);
++}
++
++static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
++{
++ struct task_group *tg = css_tg(css);
++
++ sched_offline_group(tg);
++}
++
++static void cpu_cgroup_fork(struct task_struct *task)
++{
++ sched_move_task(task);
++}
++
++static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
++ struct cgroup_taskset *tset)
++{
++ struct task_struct *task;
++
++ cgroup_taskset_for_each(task, tset) {
++#ifdef CONFIG_RT_GROUP_SCHED
++ if (!sched_rt_can_attach(css_tg(css), task))
++ return -EINVAL;
++#else
++ /* We don't support RT-tasks being in separate groups */
++ if (task->sched_class != &fair_sched_class)
++ return -EINVAL;
++#endif
++ }
++ return 0;
++}
++
++static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
++ struct cgroup_taskset *tset)
++{
++ struct task_struct *task;
++
++ cgroup_taskset_for_each(task, tset)
++ sched_move_task(task);
++}
++
++static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
++ struct cgroup_subsys_state *old_css,
++ struct task_struct *task)
++{
++ /*
++ * cgroup_exit() is called in the copy_process() failure path.
++ * Ignore this case since the task hasn't ran yet, this avoids
++ * trying to poke a half freed task state from generic code.
++ */
++ if (!(task->flags & PF_EXITING))
++ return;
++
++ sched_move_task(task);
++}
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
++ struct cftype *cftype, u64 shareval)
++{
++ return sched_group_set_shares(css_tg(css), scale_load(shareval));
++}
++
++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
++ struct cftype *cft)
++{
++ struct task_group *tg = css_tg(css);
++
++ return (u64) scale_load_down(tg->shares);
++}
++
++#ifdef CONFIG_CFS_BANDWIDTH
++static DEFINE_MUTEX(cfs_constraints_mutex);
++
++const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
++const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
++
++static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
++
++static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
++{
++ int i, ret = 0, runtime_enabled, runtime_was_enabled;
++ struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
++
++ if (tg == &root_task_group)
++ return -EINVAL;
++
++ /*
++ * Ensure we have at some amount of bandwidth every period. This is
++ * to prevent reaching a state of large arrears when throttled via
++ * entity_tick() resulting in prolonged exit starvation.
++ */
++ if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
++ return -EINVAL;
++
++ /*
++ * Likewise, bound things on the otherside by preventing insane quota
++ * periods. This also allows us to normalize in computing quota
++ * feasibility.
++ */
++ if (period > max_cfs_quota_period)
++ return -EINVAL;
++
++ /*
++ * Prevent race between setting of cfs_rq->runtime_enabled and
++ * unthrottle_offline_cfs_rqs().
++ */
++ get_online_cpus();
++ mutex_lock(&cfs_constraints_mutex);
++ ret = __cfs_schedulable(tg, period, quota);
++ if (ret)
++ goto out_unlock;
++
++ runtime_enabled = quota != RUNTIME_INF;
++ runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
++ /*
++ * If we need to toggle cfs_bandwidth_used, off->on must occur
++ * before making related changes, and on->off must occur afterwards
++ */
++ if (runtime_enabled && !runtime_was_enabled)
++ cfs_bandwidth_usage_inc();
++ raw_spin_lock_irq(&cfs_b->lock);
++ cfs_b->period = ns_to_ktime(period);
++ cfs_b->quota = quota;
++
++ __refill_cfs_bandwidth_runtime(cfs_b);
++ /* restart the period timer (if active) to handle new period expiry */
++ if (runtime_enabled && cfs_b->timer_active) {
++ /* force a reprogram */
++ __start_cfs_bandwidth(cfs_b, true);
++ }
++ raw_spin_unlock_irq(&cfs_b->lock);
++
++ for_each_online_cpu(i) {
++ struct cfs_rq *cfs_rq = tg->cfs_rq[i];
++ struct rq *rq = cfs_rq->rq;
++
++ raw_spin_lock_irq(&rq->lock);
++ cfs_rq->runtime_enabled = runtime_enabled;
++ cfs_rq->runtime_remaining = 0;
++
++ if (cfs_rq->throttled)
++ unthrottle_cfs_rq(cfs_rq);
++ raw_spin_unlock_irq(&rq->lock);
++ }
++ if (runtime_was_enabled && !runtime_enabled)
++ cfs_bandwidth_usage_dec();
++out_unlock:
++ mutex_unlock(&cfs_constraints_mutex);
++ put_online_cpus();
++
++ return ret;
++}
++
++int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
++{
++ u64 quota, period;
++
++ period = ktime_to_ns(tg->cfs_bandwidth.period);
++ if (cfs_quota_us < 0)
++ quota = RUNTIME_INF;
++ else
++ quota = (u64)cfs_quota_us * NSEC_PER_USEC;
++
++ return tg_set_cfs_bandwidth(tg, period, quota);
++}
++
++long tg_get_cfs_quota(struct task_group *tg)
++{
++ u64 quota_us;
++
++ if (tg->cfs_bandwidth.quota == RUNTIME_INF)
++ return -1;
++
++ quota_us = tg->cfs_bandwidth.quota;
++ do_div(quota_us, NSEC_PER_USEC);
++
++ return quota_us;
++}
++
++int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
++{
++ u64 quota, period;
++
++ period = (u64)cfs_period_us * NSEC_PER_USEC;
++ quota = tg->cfs_bandwidth.quota;
++
++ return tg_set_cfs_bandwidth(tg, period, quota);
++}
++
++long tg_get_cfs_period(struct task_group *tg)
++{
++ u64 cfs_period_us;
++
++ cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
++ do_div(cfs_period_us, NSEC_PER_USEC);
++
++ return cfs_period_us;
++}
++
++static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
++ struct cftype *cft)
++{
++ return tg_get_cfs_quota(css_tg(css));
++}
++
++static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
++ struct cftype *cftype, s64 cfs_quota_us)
++{
++ return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
++}
++
++static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
++ struct cftype *cft)
++{
++ return tg_get_cfs_period(css_tg(css));
++}
++
++static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
++ struct cftype *cftype, u64 cfs_period_us)
++{
++ return tg_set_cfs_period(css_tg(css), cfs_period_us);
++}
++
++struct cfs_schedulable_data {
++ struct task_group *tg;
++ u64 period, quota;
++};
++
++/*
++ * normalize group quota/period to be quota/max_period
++ * note: units are usecs
++ */
++static u64 normalize_cfs_quota(struct task_group *tg,
++ struct cfs_schedulable_data *d)
++{
++ u64 quota, period;
++
++ if (tg == d->tg) {
++ period = d->period;
++ quota = d->quota;
++ } else {
++ period = tg_get_cfs_period(tg);
++ quota = tg_get_cfs_quota(tg);
++ }
++
++ /* note: these should typically be equivalent */
++ if (quota == RUNTIME_INF || quota == -1)
++ return RUNTIME_INF;
++
++ return to_ratio(period, quota);
++}
++
++static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
++{
++ struct cfs_schedulable_data *d = data;
++ struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
++ s64 quota = 0, parent_quota = -1;
++
++ if (!tg->parent) {
++ quota = RUNTIME_INF;
++ } else {
++ struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
++
++ quota = normalize_cfs_quota(tg, d);
++ parent_quota = parent_b->hierarchical_quota;
++
++ /*
++ * ensure max(child_quota) <= parent_quota, inherit when no
++ * limit is set
++ */
++ if (quota == RUNTIME_INF)
++ quota = parent_quota;
++ else if (parent_quota != RUNTIME_INF && quota > parent_quota)
++ return -EINVAL;
++ }
++ cfs_b->hierarchical_quota = quota;
++
++ return 0;
++}
++
++static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
++{
++ int ret;
++ struct cfs_schedulable_data data = {
++ .tg = tg,
++ .period = period,
++ .quota = quota,
++ };
++
++ if (quota != RUNTIME_INF) {
++ do_div(data.period, NSEC_PER_USEC);
++ do_div(data.quota, NSEC_PER_USEC);
++ }
++
++ rcu_read_lock();
++ ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
++ rcu_read_unlock();
++
++ return ret;
++}
++
++static int cpu_stats_show(struct seq_file *sf, void *v)
++{
++ struct task_group *tg = css_tg(seq_css(sf));
++ struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
++
++ seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
++ seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
++ seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
++
++ return 0;
++}
++#endif /* CONFIG_CFS_BANDWIDTH */
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
++ struct cftype *cft, s64 val)
++{
++ return sched_group_set_rt_runtime(css_tg(css), val);
++}
++
++static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
++ struct cftype *cft)
++{
++ return sched_group_rt_runtime(css_tg(css));
++}
++
++static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
++ struct cftype *cftype, u64 rt_period_us)
++{
++ return sched_group_set_rt_period(css_tg(css), rt_period_us);
++}
++
++static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
++ struct cftype *cft)
++{
++ return sched_group_rt_period(css_tg(css));
++}
++#endif /* CONFIG_RT_GROUP_SCHED */
++
++static struct cftype cpu_files[] = {
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ {
++ .name = "shares",
++ .read_u64 = cpu_shares_read_u64,
++ .write_u64 = cpu_shares_write_u64,
++ },
++#endif
++#ifdef CONFIG_CFS_BANDWIDTH
++ {
++ .name = "cfs_quota_us",
++ .read_s64 = cpu_cfs_quota_read_s64,
++ .write_s64 = cpu_cfs_quota_write_s64,
++ },
++ {
++ .name = "cfs_period_us",
++ .read_u64 = cpu_cfs_period_read_u64,
++ .write_u64 = cpu_cfs_period_write_u64,
++ },
++ {
++ .name = "stat",
++ .seq_show = cpu_stats_show,
++ },
++#endif
++#ifdef CONFIG_RT_GROUP_SCHED
++ {
++ .name = "rt_runtime_us",
++ .read_s64 = cpu_rt_runtime_read,
++ .write_s64 = cpu_rt_runtime_write,
++ },
++ {
++ .name = "rt_period_us",
++ .read_u64 = cpu_rt_period_read_uint,
++ .write_u64 = cpu_rt_period_write_uint,
++ },
++#endif
++ { } /* terminate */
++};
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++ .css_alloc = cpu_cgroup_css_alloc,
++ .css_free = cpu_cgroup_css_free,
++ .css_online = cpu_cgroup_css_online,
++ .css_offline = cpu_cgroup_css_offline,
++ .fork = cpu_cgroup_fork,
++ .can_attach = cpu_cgroup_can_attach,
++ .attach = cpu_cgroup_attach,
++ .exit = cpu_cgroup_exit,
++ .legacy_cftypes = cpu_files,
++ .early_init = 1,
++};
++
++#endif /* CONFIG_CGROUP_SCHED */
++
++void dump_cpu_task(int cpu)
++{
++ pr_info("Task dump for CPU %d:\n", cpu);
++ sched_show_task(cpu_curr(cpu));
++}
+diff -Nur linux-4.1.10.orig/kernel/sched/cputime.c linux-4.1.10/kernel/sched/cputime.c
+--- linux-4.1.10.orig/kernel/sched/cputime.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/sched/cputime.c 2015-10-07 18:00:08.000000000 +0200
@@ -675,37 +675,45 @@
void vtime_account_system(struct task_struct *tsk)
@@ -18528,9 +42798,9 @@ diff -Nur linux-4.1.6.orig/kernel/sched/cputime.c linux-4.1.6/kernel/sched/cputi
}
-diff -Nur linux-4.1.6.orig/kernel/sched/deadline.c linux-4.1.6/kernel/sched/deadline.c
---- linux-4.1.6.orig/kernel/sched/deadline.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/sched/deadline.c 2015-09-08 23:49:08.109866942 +0200
+diff -Nur linux-4.1.10.orig/kernel/sched/deadline.c linux-4.1.10/kernel/sched/deadline.c
+--- linux-4.1.10.orig/kernel/sched/deadline.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/sched/deadline.c 2015-10-07 18:00:08.000000000 +0200
@@ -637,6 +637,7 @@
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -18539,9 +42809,9 @@ diff -Nur linux-4.1.6.orig/kernel/sched/deadline.c linux-4.1.6/kernel/sched/dead
}
static
-diff -Nur linux-4.1.6.orig/kernel/sched/debug.c linux-4.1.6/kernel/sched/debug.c
---- linux-4.1.6.orig/kernel/sched/debug.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/sched/debug.c 2015-09-08 23:49:08.109866942 +0200
+diff -Nur linux-4.1.10.orig/kernel/sched/debug.c linux-4.1.10/kernel/sched/debug.c
+--- linux-4.1.10.orig/kernel/sched/debug.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/sched/debug.c 2015-10-07 18:00:08.000000000 +0200
@@ -260,6 +260,9 @@
P(rt_throttled);
PN(rt_time);
@@ -18563,9 +42833,9 @@ diff -Nur linux-4.1.6.orig/kernel/sched/debug.c linux-4.1.6/kernel/sched/debug.c
#undef PN
#undef __PN
#undef P
-diff -Nur linux-4.1.6.orig/kernel/sched/fair.c linux-4.1.6/kernel/sched/fair.c
---- linux-4.1.6.orig/kernel/sched/fair.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/sched/fair.c 2015-09-08 23:49:08.113866499 +0200
+diff -Nur linux-4.1.10.orig/kernel/sched/fair.c linux-4.1.10/kernel/sched/fair.c
+--- linux-4.1.10.orig/kernel/sched/fair.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/sched/fair.c 2015-10-07 18:00:08.000000000 +0200
@@ -3201,7 +3201,7 @@
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
@@ -18638,9 +42908,9 @@ diff -Nur linux-4.1.6.orig/kernel/sched/fair.c linux-4.1.6/kernel/sched/fair.c
} else
check_preempt_curr(rq, p, 0);
}
-diff -Nur linux-4.1.6.orig/kernel/sched/features.h linux-4.1.6/kernel/sched/features.h
---- linux-4.1.6.orig/kernel/sched/features.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/sched/features.h 2015-09-08 23:49:08.113866499 +0200
+diff -Nur linux-4.1.10.orig/kernel/sched/features.h linux-4.1.10/kernel/sched/features.h
+--- linux-4.1.10.orig/kernel/sched/features.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/sched/features.h 2015-10-07 18:00:08.000000000 +0200
@@ -50,11 +50,19 @@
*/
SCHED_FEAT(NONTASK_CAPACITY, true)
@@ -18661,9 +42931,9 @@ diff -Nur linux-4.1.6.orig/kernel/sched/features.h linux-4.1.6/kernel/sched/feat
#ifdef HAVE_RT_PUSH_IPI
/*
-diff -Nur linux-4.1.6.orig/kernel/sched/Makefile linux-4.1.6/kernel/sched/Makefile
---- linux-4.1.6.orig/kernel/sched/Makefile 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/sched/Makefile 2015-09-08 23:49:08.105867384 +0200
+diff -Nur linux-4.1.10.orig/kernel/sched/Makefile linux-4.1.10/kernel/sched/Makefile
+--- linux-4.1.10.orig/kernel/sched/Makefile 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/sched/Makefile 2015-10-07 18:00:08.000000000 +0200
@@ -13,7 +13,7 @@
obj-y += core.o proc.o clock.o cputime.o
@@ -18673,9 +42943,9 @@ diff -Nur linux-4.1.6.orig/kernel/sched/Makefile linux-4.1.6/kernel/sched/Makefi
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
-diff -Nur linux-4.1.6.orig/kernel/sched/rt.c linux-4.1.6/kernel/sched/rt.c
---- linux-4.1.6.orig/kernel/sched/rt.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/sched/rt.c 2015-09-08 23:49:08.113866499 +0200
+diff -Nur linux-4.1.10.orig/kernel/sched/rt.c linux-4.1.10/kernel/sched/rt.c
+--- linux-4.1.10.orig/kernel/sched/rt.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/sched/rt.c 2015-10-07 18:00:08.000000000 +0200
@@ -44,6 +44,7 @@
hrtimer_init(&rt_b->rt_period_timer,
@@ -18692,9 +42962,9 @@ diff -Nur linux-4.1.6.orig/kernel/sched/rt.c linux-4.1.6/kernel/sched/rt.c
#endif
#endif /* CONFIG_SMP */
/* We start is dequeued state, because no RT tasks are queued */
-diff -Nur linux-4.1.6.orig/kernel/sched/sched.h linux-4.1.6/kernel/sched/sched.h
---- linux-4.1.6.orig/kernel/sched/sched.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/sched/sched.h 2015-09-08 23:49:08.113866499 +0200
+diff -Nur linux-4.1.10.orig/kernel/sched/sched.h linux-4.1.10/kernel/sched/sched.h
+--- linux-4.1.10.orig/kernel/sched/sched.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/sched/sched.h 2015-10-07 18:00:08.000000000 +0200
@@ -1092,6 +1092,7 @@
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
@@ -18719,9 +42989,9 @@ diff -Nur linux-4.1.6.orig/kernel/sched/sched.h linux-4.1.6/kernel/sched/sched.h
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
-diff -Nur linux-4.1.6.orig/kernel/sched/wait-simple.c linux-4.1.6/kernel/sched/wait-simple.c
---- linux-4.1.6.orig/kernel/sched/wait-simple.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/kernel/sched/wait-simple.c 2015-09-08 23:49:08.113866499 +0200
+diff -Nur linux-4.1.10.orig/kernel/sched/wait-simple.c linux-4.1.10/kernel/sched/wait-simple.c
+--- linux-4.1.10.orig/kernel/sched/wait-simple.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/kernel/sched/wait-simple.c 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,115 @@
+/*
+ * Simple waitqueues without fancy flags and callbacks
@@ -18838,9 +43108,9 @@ diff -Nur linux-4.1.6.orig/kernel/sched/wait-simple.c linux-4.1.6/kernel/sched/w
+ return woken;
+}
+EXPORT_SYMBOL(__swait_wake);
-diff -Nur linux-4.1.6.orig/kernel/sched/work-simple.c linux-4.1.6/kernel/sched/work-simple.c
---- linux-4.1.6.orig/kernel/sched/work-simple.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/kernel/sched/work-simple.c 2015-09-08 23:49:08.113866499 +0200
+diff -Nur linux-4.1.10.orig/kernel/sched/work-simple.c linux-4.1.10/kernel/sched/work-simple.c
+--- linux-4.1.10.orig/kernel/sched/work-simple.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/kernel/sched/work-simple.c 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
@@ -19014,9 +43284,9 @@ diff -Nur linux-4.1.6.orig/kernel/sched/work-simple.c linux-4.1.6/kernel/sched/w
+ mutex_unlock(&worker_mutex);
+}
+EXPORT_SYMBOL_GPL(swork_put);
-diff -Nur linux-4.1.6.orig/kernel/signal.c linux-4.1.6/kernel/signal.c
---- linux-4.1.6.orig/kernel/signal.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/signal.c 2015-09-08 23:49:08.113866499 +0200
+diff -Nur linux-4.1.10.orig/kernel/signal.c linux-4.1.10/kernel/signal.c
+--- linux-4.1.10.orig/kernel/signal.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/signal.c 2015-10-07 18:00:08.000000000 +0200
@@ -14,6 +14,7 @@
#include <linux/export.h>
#include <linux/init.h>
@@ -19261,9 +43531,9 @@ diff -Nur linux-4.1.6.orig/kernel/signal.c linux-4.1.6/kernel/signal.c
freezable_schedule();
} else {
/*
-diff -Nur linux-4.1.6.orig/kernel/softirq.c linux-4.1.6/kernel/softirq.c
---- linux-4.1.6.orig/kernel/softirq.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/softirq.c 2015-09-08 23:49:08.117866054 +0200
+diff -Nur linux-4.1.10.orig/kernel/softirq.c linux-4.1.10/kernel/softirq.c
+--- linux-4.1.10.orig/kernel/softirq.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/softirq.c 2015-10-07 18:00:08.000000000 +0200
@@ -21,10 +21,12 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
@@ -20150,9 +44420,9 @@ diff -Nur linux-4.1.6.orig/kernel/softirq.c linux-4.1.6/kernel/softirq.c
.thread_should_run = ksoftirqd_should_run,
.thread_fn = run_ksoftirqd,
.thread_comm = "ksoftirqd/%u",
-diff -Nur linux-4.1.6.orig/kernel/stop_machine.c linux-4.1.6/kernel/stop_machine.c
---- linux-4.1.6.orig/kernel/stop_machine.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/stop_machine.c 2015-09-08 23:49:08.117866054 +0200
+diff -Nur linux-4.1.10.orig/kernel/stop_machine.c linux-4.1.10/kernel/stop_machine.c
+--- linux-4.1.10.orig/kernel/stop_machine.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/stop_machine.c 2015-10-07 18:00:08.000000000 +0200
@@ -30,12 +30,12 @@
atomic_t nr_todo; /* nr left to execute */
bool executed; /* actually executed? */
@@ -20413,9 +44683,9 @@ diff -Nur linux-4.1.6.orig/kernel/stop_machine.c linux-4.1.6/kernel/stop_machine
cpu_relax();
mutex_unlock(&stop_cpus_mutex);
-diff -Nur linux-4.1.6.orig/kernel/time/hrtimer.c linux-4.1.6/kernel/time/hrtimer.c
---- linux-4.1.6.orig/kernel/time/hrtimer.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/time/hrtimer.c 2015-09-08 23:49:08.117866054 +0200
+diff -Nur linux-4.1.10.orig/kernel/time/hrtimer.c linux-4.1.10/kernel/time/hrtimer.c
+--- linux-4.1.10.orig/kernel/time/hrtimer.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/time/hrtimer.c 2015-10-07 18:00:08.000000000 +0200
@@ -48,11 +48,13 @@
#include <linux/sched/rt.h>
#include <linux/sched/deadline.h>
@@ -21020,9 +45290,9 @@ diff -Nur linux-4.1.6.orig/kernel/time/hrtimer.c linux-4.1.6/kernel/time/hrtimer
}
/**
-diff -Nur linux-4.1.6.orig/kernel/time/itimer.c linux-4.1.6/kernel/time/itimer.c
---- linux-4.1.6.orig/kernel/time/itimer.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/time/itimer.c 2015-09-08 23:49:08.117866054 +0200
+diff -Nur linux-4.1.10.orig/kernel/time/itimer.c linux-4.1.10/kernel/time/itimer.c
+--- linux-4.1.10.orig/kernel/time/itimer.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/time/itimer.c 2015-10-07 18:00:08.000000000 +0200
@@ -213,6 +213,7 @@
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
@@ -21031,9 +45301,9 @@ diff -Nur linux-4.1.6.orig/kernel/time/itimer.c linux-4.1.6/kernel/time/itimer.c
goto again;
}
expires = timeval_to_ktime(value->it_value);
-diff -Nur linux-4.1.6.orig/kernel/time/jiffies.c linux-4.1.6/kernel/time/jiffies.c
---- linux-4.1.6.orig/kernel/time/jiffies.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/time/jiffies.c 2015-09-08 23:49:08.117866054 +0200
+diff -Nur linux-4.1.10.orig/kernel/time/jiffies.c linux-4.1.10/kernel/time/jiffies.c
+--- linux-4.1.10.orig/kernel/time/jiffies.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/time/jiffies.c 2015-10-07 18:00:08.000000000 +0200
@@ -74,7 +74,8 @@
.max_cycles = 10,
};
@@ -21056,9 +45326,9 @@ diff -Nur linux-4.1.6.orig/kernel/time/jiffies.c linux-4.1.6/kernel/time/jiffies
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
-diff -Nur linux-4.1.6.orig/kernel/time/ntp.c linux-4.1.6/kernel/time/ntp.c
---- linux-4.1.6.orig/kernel/time/ntp.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/time/ntp.c 2015-09-08 23:49:08.117866054 +0200
+diff -Nur linux-4.1.10.orig/kernel/time/ntp.c linux-4.1.10/kernel/time/ntp.c
+--- linux-4.1.10.orig/kernel/time/ntp.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/time/ntp.c 2015-10-07 18:00:08.000000000 +0200
@@ -10,6 +10,7 @@
#include <linux/workqueue.h>
#include <linux/hrtimer.h>
@@ -21120,9 +45390,9 @@ diff -Nur linux-4.1.6.orig/kernel/time/ntp.c linux-4.1.6/kernel/time/ntp.c
#else
void ntp_notify_cmos_timer(void) { }
-diff -Nur linux-4.1.6.orig/kernel/time/posix-cpu-timers.c linux-4.1.6/kernel/time/posix-cpu-timers.c
---- linux-4.1.6.orig/kernel/time/posix-cpu-timers.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/time/posix-cpu-timers.c 2015-09-08 23:49:08.117866054 +0200
+diff -Nur linux-4.1.10.orig/kernel/time/posix-cpu-timers.c linux-4.1.10/kernel/time/posix-cpu-timers.c
+--- linux-4.1.10.orig/kernel/time/posix-cpu-timers.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/time/posix-cpu-timers.c 2015-10-07 18:00:08.000000000 +0200
@@ -3,6 +3,7 @@
*/
@@ -21370,9 +45640,9 @@ diff -Nur linux-4.1.6.orig/kernel/time/posix-cpu-timers.c linux-4.1.6/kernel/tim
/*
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
-diff -Nur linux-4.1.6.orig/kernel/time/posix-timers.c linux-4.1.6/kernel/time/posix-timers.c
---- linux-4.1.6.orig/kernel/time/posix-timers.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/time/posix-timers.c 2015-09-08 23:49:08.117866054 +0200
+diff -Nur linux-4.1.10.orig/kernel/time/posix-timers.c linux-4.1.10/kernel/time/posix-timers.c
+--- linux-4.1.10.orig/kernel/time/posix-timers.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/time/posix-timers.c 2015-10-07 18:00:08.000000000 +0200
@@ -499,6 +499,7 @@
static struct pid *good_sigevent(sigevent_t * event)
{
@@ -21468,9 +45738,9 @@ diff -Nur linux-4.1.6.orig/kernel/time/posix-timers.c linux-4.1.6/kernel/time/po
goto retry_delete;
}
list_del(&timer->list);
-diff -Nur linux-4.1.6.orig/kernel/time/tick-common.c linux-4.1.6/kernel/time/tick-common.c
---- linux-4.1.6.orig/kernel/time/tick-common.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/time/tick-common.c 2015-09-08 23:49:08.117866054 +0200
+diff -Nur linux-4.1.10.orig/kernel/time/tick-common.c linux-4.1.10/kernel/time/tick-common.c
+--- linux-4.1.10.orig/kernel/time/tick-common.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/time/tick-common.c 2015-10-07 18:00:08.000000000 +0200
@@ -78,13 +78,15 @@
static void tick_periodic(int cpu)
{
@@ -21501,9 +45771,9 @@ diff -Nur linux-4.1.6.orig/kernel/time/tick-common.c linux-4.1.6/kernel/time/tic
clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
-diff -Nur linux-4.1.6.orig/kernel/time/tick-sched.c linux-4.1.6/kernel/time/tick-sched.c
---- linux-4.1.6.orig/kernel/time/tick-sched.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/time/tick-sched.c 2015-09-08 23:49:08.117866054 +0200
+diff -Nur linux-4.1.10.orig/kernel/time/tick-sched.c linux-4.1.10/kernel/time/tick-sched.c
+--- linux-4.1.10.orig/kernel/time/tick-sched.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/time/tick-sched.c 2015-10-07 18:00:08.000000000 +0200
@@ -62,7 +62,8 @@
return;
@@ -21603,9 +45873,9 @@ diff -Nur linux-4.1.6.orig/kernel/time/tick-sched.c linux-4.1.6/kernel/time/tick
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per cpu) */
-diff -Nur linux-4.1.6.orig/kernel/time/timekeeping.c linux-4.1.6/kernel/time/timekeeping.c
---- linux-4.1.6.orig/kernel/time/timekeeping.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/time/timekeeping.c 2015-09-08 23:49:08.125865168 +0200
+diff -Nur linux-4.1.10.orig/kernel/time/timekeeping.c linux-4.1.10/kernel/time/timekeeping.c
+--- linux-4.1.10.orig/kernel/time/timekeeping.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/time/timekeeping.c 2015-10-07 18:00:08.000000000 +0200
@@ -2065,8 +2065,10 @@
*/
void xtime_update(unsigned long ticks)
@@ -21619,9 +45889,9 @@ diff -Nur linux-4.1.6.orig/kernel/time/timekeeping.c linux-4.1.6/kernel/time/tim
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
-diff -Nur linux-4.1.6.orig/kernel/time/timekeeping.h linux-4.1.6/kernel/time/timekeeping.h
---- linux-4.1.6.orig/kernel/time/timekeeping.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/time/timekeeping.h 2015-09-08 23:49:08.125865168 +0200
+diff -Nur linux-4.1.10.orig/kernel/time/timekeeping.h linux-4.1.10/kernel/time/timekeeping.h
+--- linux-4.1.10.orig/kernel/time/timekeeping.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/time/timekeeping.h 2015-10-07 18:00:08.000000000 +0200
@@ -22,7 +22,8 @@
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
@@ -21632,9 +45902,9 @@ diff -Nur linux-4.1.6.orig/kernel/time/timekeeping.h linux-4.1.6/kernel/time/tim
#define CS_NAME_LEN 32
-diff -Nur linux-4.1.6.orig/kernel/time/timer.c linux-4.1.6/kernel/time/timer.c
---- linux-4.1.6.orig/kernel/time/timer.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/time/timer.c 2015-09-08 23:49:08.125865168 +0200
+diff -Nur linux-4.1.10.orig/kernel/time/timer.c linux-4.1.10/kernel/time/timer.c
+--- linux-4.1.10.orig/kernel/time/timer.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/time/timer.c 2015-10-07 18:00:08.000000000 +0200
@@ -78,6 +78,9 @@
struct tvec_base {
spinlock_t lock;
@@ -21836,9 +46106,9 @@ diff -Nur linux-4.1.6.orig/kernel/time/timer.c linux-4.1.6/kernel/time/timer.c
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
-diff -Nur linux-4.1.6.orig/kernel/trace/Kconfig linux-4.1.6/kernel/trace/Kconfig
---- linux-4.1.6.orig/kernel/trace/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/trace/Kconfig 2015-09-08 23:49:08.125865168 +0200
+diff -Nur linux-4.1.10.orig/kernel/trace/Kconfig linux-4.1.10/kernel/trace/Kconfig
+--- linux-4.1.10.orig/kernel/trace/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/trace/Kconfig 2015-10-07 18:00:08.000000000 +0200
@@ -187,6 +187,24 @@
enabled. This option and the preempt-off timing option can be
used together or separately.)
@@ -21964,9 +46234,9 @@ diff -Nur linux-4.1.6.orig/kernel/trace/Kconfig linux-4.1.6/kernel/trace/Kconfig
config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches and events"
depends on !GENERIC_TRACER
-diff -Nur linux-4.1.6.orig/kernel/trace/latency_hist.c linux-4.1.6/kernel/trace/latency_hist.c
---- linux-4.1.6.orig/kernel/trace/latency_hist.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-4.1.6/kernel/trace/latency_hist.c 2015-09-08 23:49:08.125865168 +0200
+diff -Nur linux-4.1.10.orig/kernel/trace/latency_hist.c linux-4.1.10/kernel/trace/latency_hist.c
+--- linux-4.1.10.orig/kernel/trace/latency_hist.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/kernel/trace/latency_hist.c 2015-10-07 18:00:08.000000000 +0200
@@ -0,0 +1,1178 @@
+/*
+ * kernel/trace/latency_hist.c
@@ -23146,9 +47416,9 @@ diff -Nur linux-4.1.6.orig/kernel/trace/latency_hist.c linux-4.1.6/kernel/trace/
+}
+
+device_initcall(latency_hist_init);
-diff -Nur linux-4.1.6.orig/kernel/trace/Makefile linux-4.1.6/kernel/trace/Makefile
---- linux-4.1.6.orig/kernel/trace/Makefile 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/trace/Makefile 2015-09-08 23:49:08.125865168 +0200
+diff -Nur linux-4.1.10.orig/kernel/trace/Makefile linux-4.1.10/kernel/trace/Makefile
+--- linux-4.1.10.orig/kernel/trace/Makefile 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/trace/Makefile 2015-10-07 18:00:08.000000000 +0200
@@ -36,6 +36,10 @@
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
@@ -23160,9 +47430,9 @@ diff -Nur linux-4.1.6.orig/kernel/trace/Makefile linux-4.1.6/kernel/trace/Makefi
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
-diff -Nur linux-4.1.6.orig/kernel/trace/trace.c linux-4.1.6/kernel/trace/trace.c
---- linux-4.1.6.orig/kernel/trace/trace.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/trace/trace.c 2015-09-08 23:49:08.125865168 +0200
+diff -Nur linux-4.1.10.orig/kernel/trace/trace.c linux-4.1.10/kernel/trace/trace.c
+--- linux-4.1.10.orig/kernel/trace/trace.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/trace/trace.c 2015-10-07 18:00:08.000000000 +0200
@@ -1630,6 +1630,7 @@
struct task_struct *tsk = current;
@@ -23230,9 +47500,9 @@ diff -Nur linux-4.1.6.orig/kernel/trace/trace.c linux-4.1.6/kernel/trace/trace.c
}
void
-diff -Nur linux-4.1.6.orig/kernel/trace/trace_events.c linux-4.1.6/kernel/trace/trace_events.c
---- linux-4.1.6.orig/kernel/trace/trace_events.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/trace/trace_events.c 2015-09-08 23:49:08.125865168 +0200
+diff -Nur linux-4.1.10.orig/kernel/trace/trace_events.c linux-4.1.10/kernel/trace/trace_events.c
+--- linux-4.1.10.orig/kernel/trace/trace_events.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/trace/trace_events.c 2015-10-07 18:00:08.000000000 +0200
@@ -162,6 +162,8 @@
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
@@ -23242,9 +47512,9 @@ diff -Nur linux-4.1.6.orig/kernel/trace/trace_events.c linux-4.1.6/kernel/trace/
return ret;
}
-diff -Nur linux-4.1.6.orig/kernel/trace/trace.h linux-4.1.6/kernel/trace/trace.h
---- linux-4.1.6.orig/kernel/trace/trace.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/trace/trace.h 2015-09-08 23:49:08.125865168 +0200
+diff -Nur linux-4.1.10.orig/kernel/trace/trace.h linux-4.1.10/kernel/trace/trace.h
+--- linux-4.1.10.orig/kernel/trace/trace.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/trace/trace.h 2015-10-07 18:00:08.000000000 +0200
@@ -120,6 +120,7 @@
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
@@ -23261,9 +47531,9 @@ diff -Nur linux-4.1.6.orig/kernel/trace/trace.h linux-4.1.6/kernel/trace/trace.h
};
#define TRACE_BUF_SIZE 1024
-diff -Nur linux-4.1.6.orig/kernel/trace/trace_irqsoff.c linux-4.1.6/kernel/trace/trace_irqsoff.c
---- linux-4.1.6.orig/kernel/trace/trace_irqsoff.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/trace/trace_irqsoff.c 2015-09-08 23:49:08.129864725 +0200
+diff -Nur linux-4.1.10.orig/kernel/trace/trace_irqsoff.c linux-4.1.10/kernel/trace/trace_irqsoff.c
+--- linux-4.1.10.orig/kernel/trace/trace_irqsoff.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/trace/trace_irqsoff.c 2015-10-07 18:00:08.000000000 +0200
@@ -13,6 +13,7 @@
#include <linux/uaccess.h>
#include <linux/module.h>
@@ -23290,7 +47560,7 @@ diff -Nur linux-4.1.6.orig/kernel/trace/trace_irqsoff.c linux-4.1.6/kernel/trace
#ifdef CONFIG_PROVE_LOCKING
void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
-+ trace_preemptirqsoff_hist(IRQS_ON, 0);
++ trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0);
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
@@ -23298,7 +47568,7 @@ diff -Nur linux-4.1.6.orig/kernel/trace/trace_irqsoff.c linux-4.1.6/kernel/trace
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
-+ trace_preemptirqsoff_hist(IRQS_OFF, 1);
++ trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1);
}
#else /* !CONFIG_PROVE_LOCKING */
@@ -23347,9 +47617,9 @@ diff -Nur linux-4.1.6.orig/kernel/trace/trace_irqsoff.c linux-4.1.6/kernel/trace
if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1);
}
-diff -Nur linux-4.1.6.orig/kernel/trace/trace_output.c linux-4.1.6/kernel/trace/trace_output.c
---- linux-4.1.6.orig/kernel/trace/trace_output.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/trace/trace_output.c 2015-09-08 23:49:08.137863836 +0200
+diff -Nur linux-4.1.10.orig/kernel/trace/trace_output.c linux-4.1.10/kernel/trace/trace_output.c
+--- linux-4.1.10.orig/kernel/trace/trace_output.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/trace/trace_output.c 2015-10-07 18:00:08.000000000 +0200
@@ -430,6 +430,7 @@
{
char hardsoft_irq;
@@ -23395,9 +47665,9 @@ diff -Nur linux-4.1.6.orig/kernel/trace/trace_output.c linux-4.1.6/kernel/trace/
return !trace_seq_has_overflowed(s);
}
-diff -Nur linux-4.1.6.orig/kernel/user.c linux-4.1.6/kernel/user.c
---- linux-4.1.6.orig/kernel/user.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/user.c 2015-09-08 23:49:08.137863836 +0200
+diff -Nur linux-4.1.10.orig/kernel/user.c linux-4.1.10/kernel/user.c
+--- linux-4.1.10.orig/kernel/user.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/user.c 2015-10-07 18:00:08.000000000 +0200
@@ -161,11 +161,11 @@
if (!up)
return;
@@ -23412,9 +47682,9 @@ diff -Nur linux-4.1.6.orig/kernel/user.c linux-4.1.6/kernel/user.c
}
struct user_struct *alloc_uid(kuid_t uid)
-diff -Nur linux-4.1.6.orig/kernel/watchdog.c linux-4.1.6/kernel/watchdog.c
---- linux-4.1.6.orig/kernel/watchdog.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/watchdog.c 2015-09-08 23:49:08.421832347 +0200
+diff -Nur linux-4.1.10.orig/kernel/watchdog.c linux-4.1.10/kernel/watchdog.c
+--- linux-4.1.10.orig/kernel/watchdog.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/watchdog.c 2015-10-07 18:00:08.000000000 +0200
@@ -262,6 +262,8 @@
#ifdef CONFIG_HARDLOCKUP_DETECTOR
@@ -23456,9 +47726,9 @@ diff -Nur linux-4.1.6.orig/kernel/watchdog.c linux-4.1.6/kernel/watchdog.c
/* Enable the perf event */
watchdog_nmi_enable(cpu);
-diff -Nur linux-4.1.6.orig/kernel/workqueue.c linux-4.1.6/kernel/workqueue.c
---- linux-4.1.6.orig/kernel/workqueue.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/workqueue.c 2015-09-08 23:49:08.425831903 +0200
+diff -Nur linux-4.1.10.orig/kernel/workqueue.c linux-4.1.10/kernel/workqueue.c
+--- linux-4.1.10.orig/kernel/workqueue.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/workqueue.c 2015-10-07 18:00:08.000000000 +0200
@@ -48,6 +48,8 @@
#include <linux/nodemask.h>
#include <linux/moduleparam.h>
@@ -24090,9 +48360,9 @@ diff -Nur linux-4.1.6.orig/kernel/workqueue.c linux-4.1.6/kernel/workqueue.c
return written;
}
-diff -Nur linux-4.1.6.orig/kernel/workqueue_internal.h linux-4.1.6/kernel/workqueue_internal.h
---- linux-4.1.6.orig/kernel/workqueue_internal.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/kernel/workqueue_internal.h 2015-09-08 23:49:08.425831903 +0200
+diff -Nur linux-4.1.10.orig/kernel/workqueue_internal.h linux-4.1.10/kernel/workqueue_internal.h
+--- linux-4.1.10.orig/kernel/workqueue_internal.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/kernel/workqueue_internal.h 2015-10-07 18:00:08.000000000 +0200
@@ -43,6 +43,7 @@
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
@@ -24111,9 +48381,9 @@ diff -Nur linux-4.1.6.orig/kernel/workqueue_internal.h linux-4.1.6/kernel/workqu
+void wq_worker_sleeping(struct task_struct *task);
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
-diff -Nur linux-4.1.6.orig/lib/debugobjects.c linux-4.1.6/lib/debugobjects.c
---- linux-4.1.6.orig/lib/debugobjects.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/lib/debugobjects.c 2015-09-08 23:49:08.425831903 +0200
+diff -Nur linux-4.1.10.orig/lib/debugobjects.c linux-4.1.10/lib/debugobjects.c
+--- linux-4.1.10.orig/lib/debugobjects.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/lib/debugobjects.c 2015-10-07 18:00:08.000000000 +0200
@@ -309,7 +309,10 @@
struct debug_obj *obj;
unsigned long flags;
@@ -24126,9 +48396,9 @@ diff -Nur linux-4.1.6.orig/lib/debugobjects.c linux-4.1.6/lib/debugobjects.c
db = get_bucket((unsigned long) addr);
-diff -Nur linux-4.1.6.orig/lib/dump_stack.c linux-4.1.6/lib/dump_stack.c
---- linux-4.1.6.orig/lib/dump_stack.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/lib/dump_stack.c 2015-09-08 23:49:08.429831459 +0200
+diff -Nur linux-4.1.10.orig/lib/dump_stack.c linux-4.1.10/lib/dump_stack.c
+--- linux-4.1.10.orig/lib/dump_stack.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/lib/dump_stack.c 2015-10-07 18:00:08.000000000 +0200
@@ -33,7 +33,7 @@
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
@@ -24147,9 +48417,9 @@ diff -Nur linux-4.1.6.orig/lib/dump_stack.c linux-4.1.6/lib/dump_stack.c
}
#else
asmlinkage __visible void dump_stack(void)
-diff -Nur linux-4.1.6.orig/lib/idr.c linux-4.1.6/lib/idr.c
---- linux-4.1.6.orig/lib/idr.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/lib/idr.c 2015-09-08 23:49:08.429831459 +0200
+diff -Nur linux-4.1.10.orig/lib/idr.c linux-4.1.10/lib/idr.c
+--- linux-4.1.10.orig/lib/idr.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/lib/idr.c 2015-10-07 18:00:08.000000000 +0200
@@ -30,6 +30,7 @@
#include <linux/idr.h>
#include <linux/spinlock.h>
@@ -24215,9 +48485,9 @@ diff -Nur linux-4.1.6.orig/lib/idr.c linux-4.1.6/lib/idr.c
if (!new)
break;
-diff -Nur linux-4.1.6.orig/lib/Kconfig linux-4.1.6/lib/Kconfig
---- linux-4.1.6.orig/lib/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/lib/Kconfig 2015-09-08 23:49:08.425831903 +0200
+diff -Nur linux-4.1.10.orig/lib/Kconfig linux-4.1.10/lib/Kconfig
+--- linux-4.1.10.orig/lib/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/lib/Kconfig 2015-10-07 18:00:08.000000000 +0200
@@ -391,6 +391,7 @@
config CPUMASK_OFFSTACK
@@ -24226,9 +48496,9 @@ diff -Nur linux-4.1.6.orig/lib/Kconfig linux-4.1.6/lib/Kconfig
help
Use dynamic allocation for cpumask_var_t, instead of putting
them on the stack. This is a bit more expensive, but avoids
-diff -Nur linux-4.1.6.orig/lib/locking-selftest.c linux-4.1.6/lib/locking-selftest.c
---- linux-4.1.6.orig/lib/locking-selftest.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/lib/locking-selftest.c 2015-09-08 23:49:08.429831459 +0200
+diff -Nur linux-4.1.10.orig/lib/locking-selftest.c linux-4.1.10/lib/locking-selftest.c
+--- linux-4.1.10.orig/lib/locking-selftest.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/lib/locking-selftest.c 2015-10-07 18:00:08.000000000 +0200
@@ -590,6 +590,8 @@
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
@@ -24377,9 +48647,9 @@ diff -Nur linux-4.1.6.orig/lib/locking-selftest.c linux-4.1.6/lib/locking-selfte
ww_tests();
-diff -Nur linux-4.1.6.orig/lib/percpu_ida.c linux-4.1.6/lib/percpu_ida.c
---- linux-4.1.6.orig/lib/percpu_ida.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/lib/percpu_ida.c 2015-09-08 23:49:08.433831016 +0200
+diff -Nur linux-4.1.10.orig/lib/percpu_ida.c linux-4.1.10/lib/percpu_ida.c
+--- linux-4.1.10.orig/lib/percpu_ida.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/lib/percpu_ida.c 2015-10-07 18:00:08.000000000 +0200
@@ -26,6 +26,9 @@
#include <linux/string.h>
#include <linux/spinlock.h>
@@ -24468,9 +48738,9 @@ diff -Nur linux-4.1.6.orig/lib/percpu_ida.c linux-4.1.6/lib/percpu_ida.c
return err;
}
EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
-diff -Nur linux-4.1.6.orig/lib/radix-tree.c linux-4.1.6/lib/radix-tree.c
---- linux-4.1.6.orig/lib/radix-tree.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/lib/radix-tree.c 2015-09-08 23:49:08.433831016 +0200
+diff -Nur linux-4.1.10.orig/lib/radix-tree.c linux-4.1.10/lib/radix-tree.c
+--- linux-4.1.10.orig/lib/radix-tree.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/lib/radix-tree.c 2015-10-07 18:00:08.000000000 +0200
@@ -195,12 +195,13 @@
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
@@ -24502,9 +48772,9 @@ diff -Nur linux-4.1.6.orig/lib/radix-tree.c linux-4.1.6/lib/radix-tree.c
/*
* Return the maximum key which can be store into a
-diff -Nur linux-4.1.6.orig/lib/scatterlist.c linux-4.1.6/lib/scatterlist.c
---- linux-4.1.6.orig/lib/scatterlist.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/lib/scatterlist.c 2015-09-08 23:49:08.433831016 +0200
+diff -Nur linux-4.1.10.orig/lib/scatterlist.c linux-4.1.10/lib/scatterlist.c
+--- linux-4.1.10.orig/lib/scatterlist.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/lib/scatterlist.c 2015-10-07 18:00:08.000000000 +0200
@@ -592,7 +592,7 @@
flush_kernel_dcache_page(miter->page);
@@ -24532,9 +48802,9 @@ diff -Nur linux-4.1.6.orig/lib/scatterlist.c linux-4.1.6/lib/scatterlist.c
return offset;
}
-diff -Nur linux-4.1.6.orig/lib/smp_processor_id.c linux-4.1.6/lib/smp_processor_id.c
---- linux-4.1.6.orig/lib/smp_processor_id.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/lib/smp_processor_id.c 2015-09-08 23:49:08.433831016 +0200
+diff -Nur linux-4.1.10.orig/lib/smp_processor_id.c linux-4.1.10/lib/smp_processor_id.c
+--- linux-4.1.10.orig/lib/smp_processor_id.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/lib/smp_processor_id.c 2015-10-07 18:00:08.000000000 +0200
@@ -39,8 +39,9 @@
if (!printk_ratelimit())
goto out_enable;
@@ -24547,9 +48817,9 @@ diff -Nur linux-4.1.6.orig/lib/smp_processor_id.c linux-4.1.6/lib/smp_processor_
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
-diff -Nur linux-4.1.6.orig/lib/strnlen_user.c linux-4.1.6/lib/strnlen_user.c
---- linux-4.1.6.orig/lib/strnlen_user.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/lib/strnlen_user.c 2015-09-08 23:49:08.433831016 +0200
+diff -Nur linux-4.1.10.orig/lib/strnlen_user.c linux-4.1.10/lib/strnlen_user.c
+--- linux-4.1.10.orig/lib/strnlen_user.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/lib/strnlen_user.c 2015-10-07 18:00:08.000000000 +0200
@@ -85,7 +85,8 @@
* @str: The string to measure.
* @count: Maximum count (including NUL character)
@@ -24570,9 +48840,14 @@ diff -Nur linux-4.1.6.orig/lib/strnlen_user.c linux-4.1.6/lib/strnlen_user.c
*
* Get the size of a NUL-terminated string in user space.
*
-diff -Nur linux-4.1.6.orig/mm/compaction.c linux-4.1.6/mm/compaction.c
---- linux-4.1.6.orig/mm/compaction.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/compaction.c 2015-09-08 23:49:08.437830574 +0200
+diff -Nur linux-4.1.10.orig/localversion-rt linux-4.1.10/localversion-rt
+--- linux-4.1.10.orig/localversion-rt 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/localversion-rt 2015-10-07 18:00:08.000000000 +0200
+@@ -0,0 +1 @@
++-rt8
+diff -Nur linux-4.1.10.orig/mm/compaction.c linux-4.1.10/mm/compaction.c
+--- linux-4.1.10.orig/mm/compaction.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/compaction.c 2015-10-07 18:00:08.000000000 +0200
@@ -1406,10 +1406,12 @@
cc->migrate_pfn & ~((1UL << cc->order) - 1);
@@ -24588,9 +48863,9 @@ diff -Nur linux-4.1.6.orig/mm/compaction.c linux-4.1.6/mm/compaction.c
/* No more flushing until we migrate again */
last_migrated_pfn = 0;
}
-diff -Nur linux-4.1.6.orig/mm/filemap.c linux-4.1.6/mm/filemap.c
---- linux-4.1.6.orig/mm/filemap.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/filemap.c 2015-09-08 23:49:08.437830574 +0200
+diff -Nur linux-4.1.10.orig/mm/filemap.c linux-4.1.10/mm/filemap.c
+--- linux-4.1.10.orig/mm/filemap.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/filemap.c 2015-10-07 18:00:08.000000000 +0200
@@ -167,7 +167,9 @@
if (!workingset_node_pages(node) &&
list_empty(&node->private_list)) {
@@ -24617,9 +48892,9 @@ diff -Nur linux-4.1.6.orig/mm/filemap.c linux-4.1.6/mm/filemap.c
}
return 0;
}
-diff -Nur linux-4.1.6.orig/mm/highmem.c linux-4.1.6/mm/highmem.c
---- linux-4.1.6.orig/mm/highmem.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/highmem.c 2015-09-08 23:49:08.437830574 +0200
+diff -Nur linux-4.1.10.orig/mm/highmem.c linux-4.1.10/mm/highmem.c
+--- linux-4.1.10.orig/mm/highmem.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/highmem.c 2015-10-07 18:00:08.000000000 +0200
@@ -29,10 +29,11 @@
#include <linux/kgdb.h>
#include <asm/tlbflush.h>
@@ -24644,9 +48919,9 @@ diff -Nur linux-4.1.6.orig/mm/highmem.c linux-4.1.6/mm/highmem.c
unsigned int nr_free_highpages (void)
{
-diff -Nur linux-4.1.6.orig/mm/Kconfig linux-4.1.6/mm/Kconfig
---- linux-4.1.6.orig/mm/Kconfig 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/Kconfig 2015-09-08 23:49:08.437830574 +0200
+diff -Nur linux-4.1.10.orig/mm/Kconfig linux-4.1.10/mm/Kconfig
+--- linux-4.1.10.orig/mm/Kconfig 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/Kconfig 2015-10-07 18:00:08.000000000 +0200
@@ -409,7 +409,7 @@
config TRANSPARENT_HUGEPAGE
@@ -24656,9 +48931,9 @@ diff -Nur linux-4.1.6.orig/mm/Kconfig linux-4.1.6/mm/Kconfig
select COMPACTION
help
Transparent Hugepages allows the kernel to use huge pages and
-diff -Nur linux-4.1.6.orig/mm/memcontrol.c linux-4.1.6/mm/memcontrol.c
---- linux-4.1.6.orig/mm/memcontrol.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/memcontrol.c 2015-09-08 23:49:08.437830574 +0200
+diff -Nur linux-4.1.10.orig/mm/memcontrol.c linux-4.1.10/mm/memcontrol.c
+--- linux-4.1.10.orig/mm/memcontrol.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/memcontrol.c 2015-10-07 18:00:08.000000000 +0200
@@ -66,6 +66,8 @@
#include <net/sock.h>
#include <net/ip.h>
@@ -24779,9 +49054,9 @@ diff -Nur linux-4.1.6.orig/mm/memcontrol.c linux-4.1.6/mm/memcontrol.c
}
/**
-diff -Nur linux-4.1.6.orig/mm/memory.c linux-4.1.6/mm/memory.c
---- linux-4.1.6.orig/mm/memory.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/memory.c 2015-09-08 23:49:08.437830574 +0200
+diff -Nur linux-4.1.10.orig/mm/memory.c linux-4.1.10/mm/memory.c
+--- linux-4.1.10.orig/mm/memory.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/memory.c 2015-10-07 18:00:08.000000000 +0200
@@ -3743,7 +3743,7 @@
}
@@ -24818,9 +49093,9 @@ diff -Nur linux-4.1.6.orig/mm/memory.c linux-4.1.6/mm/memory.c
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-diff -Nur linux-4.1.6.orig/mm/mmu_context.c linux-4.1.6/mm/mmu_context.c
---- linux-4.1.6.orig/mm/mmu_context.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/mmu_context.c 2015-09-08 23:49:08.437830574 +0200
+diff -Nur linux-4.1.10.orig/mm/mmu_context.c linux-4.1.10/mm/mmu_context.c
+--- linux-4.1.10.orig/mm/mmu_context.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/mmu_context.c 2015-10-07 18:00:08.000000000 +0200
@@ -23,6 +23,7 @@
struct task_struct *tsk = current;
@@ -24837,9 +49112,9 @@ diff -Nur linux-4.1.6.orig/mm/mmu_context.c linux-4.1.6/mm/mmu_context.c
task_unlock(tsk);
#ifdef finish_arch_post_lock_switch
finish_arch_post_lock_switch();
-diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
---- linux-4.1.6.orig/mm/page_alloc.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/page_alloc.c 2015-09-08 23:49:08.437830574 +0200
+diff -Nur linux-4.1.10.orig/mm/page_alloc.c linux-4.1.10/mm/page_alloc.c
+--- linux-4.1.10.orig/mm/page_alloc.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/page_alloc.c 2015-10-07 18:00:08.000000000 +0200
@@ -60,6 +60,7 @@
#include <linux/page_ext.h>
#include <linux/hugetlb.h>
@@ -24999,7 +49274,7 @@ diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
}
void __init __free_pages_bootmem(struct page *page, unsigned int order)
-@@ -1368,16 +1406,18 @@
+@@ -1371,16 +1409,18 @@
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
@@ -25021,7 +49296,7 @@ diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
}
#endif
-@@ -1393,16 +1433,21 @@
+@@ -1396,16 +1436,21 @@
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -25047,7 +49322,7 @@ diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
}
/*
-@@ -1488,8 +1533,17 @@
+@@ -1491,8 +1536,17 @@
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -25065,7 +49340,7 @@ diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
}
#ifdef CONFIG_HIBERNATION
-@@ -1545,7 +1599,7 @@
+@@ -1548,7 +1602,7 @@
migratetype = get_pfnblock_migratetype(page, pfn);
set_freepage_migratetype(page, migratetype);
@@ -25074,7 +49349,7 @@ diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
__count_vm_event(PGFREE);
/*
-@@ -1571,12 +1625,17 @@
+@@ -1574,12 +1628,17 @@
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@@ -25094,7 +49369,7 @@ diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
}
/*
-@@ -1707,7 +1766,7 @@
+@@ -1710,7 +1769,7 @@
struct per_cpu_pages *pcp;
struct list_head *list;
@@ -25103,7 +49378,7 @@ diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
-@@ -1739,13 +1798,15 @@
+@@ -1742,13 +1801,15 @@
*/
WARN_ON_ONCE(order > 1);
}
@@ -25122,7 +49397,7 @@ diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
-@@ -1755,13 +1816,13 @@
+@@ -1758,13 +1819,13 @@
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
@@ -25138,7 +49413,7 @@ diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
return NULL;
}
-@@ -5650,6 +5711,7 @@
+@@ -5653,6 +5714,7 @@
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -25146,7 +49421,7 @@ diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
}
/*
-@@ -6544,7 +6606,7 @@
+@@ -6547,7 +6609,7 @@
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -25155,7 +49430,7 @@ diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -6553,7 +6615,7 @@
+@@ -6556,7 +6618,7 @@
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
@@ -25164,9 +49439,6649 @@ diff -Nur linux-4.1.6.orig/mm/page_alloc.c linux-4.1.6/mm/page_alloc.c
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-diff -Nur linux-4.1.6.orig/mm/slab.h linux-4.1.6/mm/slab.h
---- linux-4.1.6.orig/mm/slab.h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/slab.h 2015-09-08 23:49:08.441830131 +0200
+diff -Nur linux-4.1.10.orig/mm/page_alloc.c.orig linux-4.1.10/mm/page_alloc.c.orig
+--- linux-4.1.10.orig/mm/page_alloc.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/mm/page_alloc.c.orig 2015-10-03 13:49:38.000000000 +0200
+@@ -0,0 +1,6636 @@
++/*
++ * linux/mm/page_alloc.c
++ *
++ * Manages the free list, the system allocates free pages here.
++ * Note that kmalloc() lives in slab.c
++ *
++ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
++ * Swap reorganised 29.12.95, Stephen Tweedie
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
++ * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
++ * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
++ * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
++ * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
++ */
++
++#include <linux/stddef.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/interrupt.h>
++#include <linux/pagemap.h>
++#include <linux/jiffies.h>
++#include <linux/bootmem.h>
++#include <linux/memblock.h>
++#include <linux/compiler.h>
++#include <linux/kernel.h>
++#include <linux/kmemcheck.h>
++#include <linux/kasan.h>
++#include <linux/module.h>
++#include <linux/suspend.h>
++#include <linux/pagevec.h>
++#include <linux/blkdev.h>
++#include <linux/slab.h>
++#include <linux/ratelimit.h>
++#include <linux/oom.h>
++#include <linux/notifier.h>
++#include <linux/topology.h>
++#include <linux/sysctl.h>
++#include <linux/cpu.h>
++#include <linux/cpuset.h>
++#include <linux/memory_hotplug.h>
++#include <linux/nodemask.h>
++#include <linux/vmalloc.h>
++#include <linux/vmstat.h>
++#include <linux/mempolicy.h>
++#include <linux/stop_machine.h>
++#include <linux/sort.h>
++#include <linux/pfn.h>
++#include <linux/backing-dev.h>
++#include <linux/fault-inject.h>
++#include <linux/page-isolation.h>
++#include <linux/page_ext.h>
++#include <linux/debugobjects.h>
++#include <linux/kmemleak.h>
++#include <linux/compaction.h>
++#include <trace/events/kmem.h>
++#include <linux/prefetch.h>
++#include <linux/mm_inline.h>
++#include <linux/migrate.h>
++#include <linux/page_ext.h>
++#include <linux/hugetlb.h>
++#include <linux/sched/rt.h>
++#include <linux/page_owner.h>
++
++#include <asm/sections.h>
++#include <asm/tlbflush.h>
++#include <asm/div64.h>
++#include "internal.h"
++
++/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
++static DEFINE_MUTEX(pcp_batch_high_lock);
++#define MIN_PERCPU_PAGELIST_FRACTION (8)
++
++#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
++DEFINE_PER_CPU(int, numa_node);
++EXPORT_PER_CPU_SYMBOL(numa_node);
++#endif
++
++#ifdef CONFIG_HAVE_MEMORYLESS_NODES
++/*
++ * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
++ * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
++ * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
++ * defined in <linux/topology.h>.
++ */
++DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
++EXPORT_PER_CPU_SYMBOL(_numa_mem_);
++int _node_numa_mem_[MAX_NUMNODES];
++#endif
++
++/*
++ * Array of node states.
++ */
++nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
++ [N_POSSIBLE] = NODE_MASK_ALL,
++ [N_ONLINE] = { { [0] = 1UL } },
++#ifndef CONFIG_NUMA
++ [N_NORMAL_MEMORY] = { { [0] = 1UL } },
++#ifdef CONFIG_HIGHMEM
++ [N_HIGH_MEMORY] = { { [0] = 1UL } },
++#endif
++#ifdef CONFIG_MOVABLE_NODE
++ [N_MEMORY] = { { [0] = 1UL } },
++#endif
++ [N_CPU] = { { [0] = 1UL } },
++#endif /* NUMA */
++};
++EXPORT_SYMBOL(node_states);
++
++/* Protect totalram_pages and zone->managed_pages */
++static DEFINE_SPINLOCK(managed_page_count_lock);
++
++unsigned long totalram_pages __read_mostly;
++unsigned long totalreserve_pages __read_mostly;
++unsigned long totalcma_pages __read_mostly;
++/*
++ * When calculating the number of globally allowed dirty pages, there
++ * is a certain number of per-zone reserves that should not be
++ * considered dirtyable memory. This is the sum of those reserves
++ * over all existing zones that contribute dirtyable memory.
++ */
++unsigned long dirty_balance_reserve __read_mostly;
++
++int percpu_pagelist_fraction;
++gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
++
++#ifdef CONFIG_PM_SLEEP
++/*
++ * The following functions are used by the suspend/hibernate code to temporarily
++ * change gfp_allowed_mask in order to avoid using I/O during memory allocations
++ * while devices are suspended. To avoid races with the suspend/hibernate code,
++ * they should always be called with pm_mutex held (gfp_allowed_mask also should
++ * only be modified with pm_mutex held, unless the suspend/hibernate code is
++ * guaranteed not to run in parallel with that modification).
++ */
++
++static gfp_t saved_gfp_mask;
++
++void pm_restore_gfp_mask(void)
++{
++ WARN_ON(!mutex_is_locked(&pm_mutex));
++ if (saved_gfp_mask) {
++ gfp_allowed_mask = saved_gfp_mask;
++ saved_gfp_mask = 0;
++ }
++}
++
++void pm_restrict_gfp_mask(void)
++{
++ WARN_ON(!mutex_is_locked(&pm_mutex));
++ WARN_ON(saved_gfp_mask);
++ saved_gfp_mask = gfp_allowed_mask;
++ gfp_allowed_mask &= ~GFP_IOFS;
++}
++
++bool pm_suspended_storage(void)
++{
++ if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
++ return false;
++ return true;
++}
++#endif /* CONFIG_PM_SLEEP */
++
++#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
++int pageblock_order __read_mostly;
++#endif
++
++static void __free_pages_ok(struct page *page, unsigned int order);
++
++/*
++ * results with 256, 32 in the lowmem_reserve sysctl:
++ * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
++ * 1G machine -> (16M dma, 784M normal, 224M high)
++ * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
++ * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
++ * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
++ *
++ * TBD: should special case ZONE_DMA32 machines here - in those we normally
++ * don't need any ZONE_NORMAL reservation
++ */
++int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
++#ifdef CONFIG_ZONE_DMA
++ 256,
++#endif
++#ifdef CONFIG_ZONE_DMA32
++ 256,
++#endif
++#ifdef CONFIG_HIGHMEM
++ 32,
++#endif
++ 32,
++};
++
++EXPORT_SYMBOL(totalram_pages);
++
++static char * const zone_names[MAX_NR_ZONES] = {
++#ifdef CONFIG_ZONE_DMA
++ "DMA",
++#endif
++#ifdef CONFIG_ZONE_DMA32
++ "DMA32",
++#endif
++ "Normal",
++#ifdef CONFIG_HIGHMEM
++ "HighMem",
++#endif
++ "Movable",
++};
++
++int min_free_kbytes = 1024;
++int user_min_free_kbytes = -1;
++
++static unsigned long __meminitdata nr_kernel_pages;
++static unsigned long __meminitdata nr_all_pages;
++static unsigned long __meminitdata dma_reserve;
++
++#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
++static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
++static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
++static unsigned long __initdata required_kernelcore;
++static unsigned long __initdata required_movablecore;
++static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
++
++/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
++int movable_zone;
++EXPORT_SYMBOL(movable_zone);
++#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
++
++#if MAX_NUMNODES > 1
++int nr_node_ids __read_mostly = MAX_NUMNODES;
++int nr_online_nodes __read_mostly = 1;
++EXPORT_SYMBOL(nr_node_ids);
++EXPORT_SYMBOL(nr_online_nodes);
++#endif
++
++int page_group_by_mobility_disabled __read_mostly;
++
++void set_pageblock_migratetype(struct page *page, int migratetype)
++{
++ if (unlikely(page_group_by_mobility_disabled &&
++ migratetype < MIGRATE_PCPTYPES))
++ migratetype = MIGRATE_UNMOVABLE;
++
++ set_pageblock_flags_group(page, (unsigned long)migratetype,
++ PB_migrate, PB_migrate_end);
++}
++
++#ifdef CONFIG_DEBUG_VM
++static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
++{
++ int ret = 0;
++ unsigned seq;
++ unsigned long pfn = page_to_pfn(page);
++ unsigned long sp, start_pfn;
++
++ do {
++ seq = zone_span_seqbegin(zone);
++ start_pfn = zone->zone_start_pfn;
++ sp = zone->spanned_pages;
++ if (!zone_spans_pfn(zone, pfn))
++ ret = 1;
++ } while (zone_span_seqretry(zone, seq));
++
++ if (ret)
++ pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
++ pfn, zone_to_nid(zone), zone->name,
++ start_pfn, start_pfn + sp);
++
++ return ret;
++}
++
++static int page_is_consistent(struct zone *zone, struct page *page)
++{
++ if (!pfn_valid_within(page_to_pfn(page)))
++ return 0;
++ if (zone != page_zone(page))
++ return 0;
++
++ return 1;
++}
++/*
++ * Temporary debugging check for pages not lying within a given zone.
++ */
++static int bad_range(struct zone *zone, struct page *page)
++{
++ if (page_outside_zone_boundaries(zone, page))
++ return 1;
++ if (!page_is_consistent(zone, page))
++ return 1;
++
++ return 0;
++}
++#else
++static inline int bad_range(struct zone *zone, struct page *page)
++{
++ return 0;
++}
++#endif
++
++static void bad_page(struct page *page, const char *reason,
++ unsigned long bad_flags)
++{
++ static unsigned long resume;
++ static unsigned long nr_shown;
++ static unsigned long nr_unshown;
++
++ /* Don't complain about poisoned pages */
++ if (PageHWPoison(page)) {
++ page_mapcount_reset(page); /* remove PageBuddy */
++ return;
++ }
++
++ /*
++ * Allow a burst of 60 reports, then keep quiet for that minute;
++ * or allow a steady drip of one report per second.
++ */
++ if (nr_shown == 60) {
++ if (time_before(jiffies, resume)) {
++ nr_unshown++;
++ goto out;
++ }
++ if (nr_unshown) {
++ printk(KERN_ALERT
++ "BUG: Bad page state: %lu messages suppressed\n",
++ nr_unshown);
++ nr_unshown = 0;
++ }
++ nr_shown = 0;
++ }
++ if (nr_shown++ == 0)
++ resume = jiffies + 60 * HZ;
++
++ printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
++ current->comm, page_to_pfn(page));
++ dump_page_badflags(page, reason, bad_flags);
++
++ print_modules();
++ dump_stack();
++out:
++ /* Leave bad fields for debug, except PageBuddy could make trouble */
++ page_mapcount_reset(page); /* remove PageBuddy */
++ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
++}
++
++/*
++ * Higher-order pages are called "compound pages". They are structured thusly:
++ *
++ * The first PAGE_SIZE page is called the "head page".
++ *
++ * The remaining PAGE_SIZE pages are called "tail pages".
++ *
++ * All pages have PG_compound set. All tail pages have their ->first_page
++ * pointing at the head page.
++ *
++ * The first tail page's ->lru.next holds the address of the compound page's
++ * put_page() function. Its ->lru.prev holds the order of allocation.
++ * This usage means that zero-order pages may not be compound.
++ */
++
++static void free_compound_page(struct page *page)
++{
++ __free_pages_ok(page, compound_order(page));
++}
++
++void prep_compound_page(struct page *page, unsigned long order)
++{
++ int i;
++ int nr_pages = 1 << order;
++
++ set_compound_page_dtor(page, free_compound_page);
++ set_compound_order(page, order);
++ __SetPageHead(page);
++ for (i = 1; i < nr_pages; i++) {
++ struct page *p = page + i;
++ set_page_count(p, 0);
++ p->first_page = page;
++ /* Make sure p->first_page is always valid for PageTail() */
++ smp_wmb();
++ __SetPageTail(p);
++ }
++}
++
++static inline void prep_zero_page(struct page *page, unsigned int order,
++ gfp_t gfp_flags)
++{
++ int i;
++
++ /*
++ * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
++ * and __GFP_HIGHMEM from hard or soft interrupt context.
++ */
++ VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
++ for (i = 0; i < (1 << order); i++)
++ clear_highpage(page + i);
++}
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++unsigned int _debug_guardpage_minorder;
++bool _debug_pagealloc_enabled __read_mostly;
++bool _debug_guardpage_enabled __read_mostly;
++
++static int __init early_debug_pagealloc(char *buf)
++{
++ if (!buf)
++ return -EINVAL;
++
++ if (strcmp(buf, "on") == 0)
++ _debug_pagealloc_enabled = true;
++
++ return 0;
++}
++early_param("debug_pagealloc", early_debug_pagealloc);
++
++static bool need_debug_guardpage(void)
++{
++ /* If we don't use debug_pagealloc, we don't need guard page */
++ if (!debug_pagealloc_enabled())
++ return false;
++
++ return true;
++}
++
++static void init_debug_guardpage(void)
++{
++ if (!debug_pagealloc_enabled())
++ return;
++
++ _debug_guardpage_enabled = true;
++}
++
++struct page_ext_operations debug_guardpage_ops = {
++ .need = need_debug_guardpage,
++ .init = init_debug_guardpage,
++};
++
++static int __init debug_guardpage_minorder_setup(char *buf)
++{
++ unsigned long res;
++
++ if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
++ printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
++ return 0;
++ }
++ _debug_guardpage_minorder = res;
++ printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
++ return 0;
++}
++__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
++
++static inline void set_page_guard(struct zone *zone, struct page *page,
++ unsigned int order, int migratetype)
++{
++ struct page_ext *page_ext;
++
++ if (!debug_guardpage_enabled())
++ return;
++
++ page_ext = lookup_page_ext(page);
++ __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
++
++ INIT_LIST_HEAD(&page->lru);
++ set_page_private(page, order);
++ /* Guard pages are not available for any usage */
++ __mod_zone_freepage_state(zone, -(1 << order), migratetype);
++}
++
++static inline void clear_page_guard(struct zone *zone, struct page *page,
++ unsigned int order, int migratetype)
++{
++ struct page_ext *page_ext;
++
++ if (!debug_guardpage_enabled())
++ return;
++
++ page_ext = lookup_page_ext(page);
++ __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
++
++ set_page_private(page, 0);
++ if (!is_migrate_isolate(migratetype))
++ __mod_zone_freepage_state(zone, (1 << order), migratetype);
++}
++#else
++struct page_ext_operations debug_guardpage_ops = { NULL, };
++static inline void set_page_guard(struct zone *zone, struct page *page,
++ unsigned int order, int migratetype) {}
++static inline void clear_page_guard(struct zone *zone, struct page *page,
++ unsigned int order, int migratetype) {}
++#endif
++
++static inline void set_page_order(struct page *page, unsigned int order)
++{
++ set_page_private(page, order);
++ __SetPageBuddy(page);
++}
++
++static inline void rmv_page_order(struct page *page)
++{
++ __ClearPageBuddy(page);
++ set_page_private(page, 0);
++}
++
++/*
++ * This function checks whether a page is free && is the buddy
++ * we can do coalesce a page and its buddy if
++ * (a) the buddy is not in a hole &&
++ * (b) the buddy is in the buddy system &&
++ * (c) a page and its buddy have the same order &&
++ * (d) a page and its buddy are in the same zone.
++ *
++ * For recording whether a page is in the buddy system, we set ->_mapcount
++ * PAGE_BUDDY_MAPCOUNT_VALUE.
++ * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
++ * serialized by zone->lock.
++ *
++ * For recording page's order, we use page_private(page).
++ */
++static inline int page_is_buddy(struct page *page, struct page *buddy,
++ unsigned int order)
++{
++ if (!pfn_valid_within(page_to_pfn(buddy)))
++ return 0;
++
++ if (page_is_guard(buddy) && page_order(buddy) == order) {
++ if (page_zone_id(page) != page_zone_id(buddy))
++ return 0;
++
++ VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
++
++ return 1;
++ }
++
++ if (PageBuddy(buddy) && page_order(buddy) == order) {
++ /*
++ * zone check is done late to avoid uselessly
++ * calculating zone/node ids for pages that could
++ * never merge.
++ */
++ if (page_zone_id(page) != page_zone_id(buddy))
++ return 0;
++
++ VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
++
++ return 1;
++ }
++ return 0;
++}
++
++/*
++ * Freeing function for a buddy system allocator.
++ *
++ * The concept of a buddy system is to maintain direct-mapped table
++ * (containing bit values) for memory blocks of various "orders".
++ * The bottom level table contains the map for the smallest allocatable
++ * units of memory (here, pages), and each level above it describes
++ * pairs of units from the levels below, hence, "buddies".
++ * At a high level, all that happens here is marking the table entry
++ * at the bottom level available, and propagating the changes upward
++ * as necessary, plus some accounting needed to play nicely with other
++ * parts of the VM system.
++ * At each level, we keep a list of pages, which are heads of continuous
++ * free pages of length of (1 << order) and marked with _mapcount
++ * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
++ * field.
++ * So when we are allocating or freeing one, we can derive the state of the
++ * other. That is, if we allocate a small block, and both were
++ * free, the remainder of the region must be split into blocks.
++ * If a block is freed, and its buddy is also free, then this
++ * triggers coalescing into a block of larger size.
++ *
++ * -- nyc
++ */
++
++static inline void __free_one_page(struct page *page,
++ unsigned long pfn,
++ struct zone *zone, unsigned int order,
++ int migratetype)
++{
++ unsigned long page_idx;
++ unsigned long combined_idx;
++ unsigned long uninitialized_var(buddy_idx);
++ struct page *buddy;
++ int max_order = MAX_ORDER;
++
++ VM_BUG_ON(!zone_is_initialized(zone));
++ VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
++
++ VM_BUG_ON(migratetype == -1);
++ if (is_migrate_isolate(migratetype)) {
++ /*
++ * We restrict max order of merging to prevent merge
++ * between freepages on isolate pageblock and normal
++ * pageblock. Without this, pageblock isolation
++ * could cause incorrect freepage accounting.
++ */
++ max_order = min(MAX_ORDER, pageblock_order + 1);
++ } else {
++ __mod_zone_freepage_state(zone, 1 << order, migratetype);
++ }
++
++ page_idx = pfn & ((1 << max_order) - 1);
++
++ VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
++ VM_BUG_ON_PAGE(bad_range(zone, page), page);
++
++ while (order < max_order - 1) {
++ buddy_idx = __find_buddy_index(page_idx, order);
++ buddy = page + (buddy_idx - page_idx);
++ if (!page_is_buddy(page, buddy, order))
++ break;
++ /*
++ * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
++ * merge with it and move up one order.
++ */
++ if (page_is_guard(buddy)) {
++ clear_page_guard(zone, buddy, order, migratetype);
++ } else {
++ list_del(&buddy->lru);
++ zone->free_area[order].nr_free--;
++ rmv_page_order(buddy);
++ }
++ combined_idx = buddy_idx & page_idx;
++ page = page + (combined_idx - page_idx);
++ page_idx = combined_idx;
++ order++;
++ }
++ set_page_order(page, order);
++
++ /*
++ * If this is not the largest possible page, check if the buddy
++ * of the next-highest order is free. If it is, it's possible
++ * that pages are being freed that will coalesce soon. In case,
++ * that is happening, add the free page to the tail of the list
++ * so it's less likely to be used soon and more likely to be merged
++ * as a higher order page
++ */
++ if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
++ struct page *higher_page, *higher_buddy;
++ combined_idx = buddy_idx & page_idx;
++ higher_page = page + (combined_idx - page_idx);
++ buddy_idx = __find_buddy_index(combined_idx, order + 1);
++ higher_buddy = higher_page + (buddy_idx - combined_idx);
++ if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
++ list_add_tail(&page->lru,
++ &zone->free_area[order].free_list[migratetype]);
++ goto out;
++ }
++ }
++
++ list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
++out:
++ zone->free_area[order].nr_free++;
++}
++
++static inline int free_pages_check(struct page *page)
++{
++ const char *bad_reason = NULL;
++ unsigned long bad_flags = 0;
++
++ if (unlikely(page_mapcount(page)))
++ bad_reason = "nonzero mapcount";
++ if (unlikely(page->mapping != NULL))
++ bad_reason = "non-NULL mapping";
++ if (unlikely(atomic_read(&page->_count) != 0))
++ bad_reason = "nonzero _count";
++ if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
++ bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
++ bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
++ }
++#ifdef CONFIG_MEMCG
++ if (unlikely(page->mem_cgroup))
++ bad_reason = "page still charged to cgroup";
++#endif
++ if (unlikely(bad_reason)) {
++ bad_page(page, bad_reason, bad_flags);
++ return 1;
++ }
++ page_cpupid_reset_last(page);
++ if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
++ page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
++ return 0;
++}
++
++/*
++ * Frees a number of pages from the PCP lists
++ * Assumes all pages on list are in same zone, and of same order.
++ * count is the number of pages to free.
++ *
++ * If the zone was previously in an "all pages pinned" state then look to
++ * see if this freeing clears that state.
++ *
++ * And clear the zone's pages_scanned counter, to hold off the "all pages are
++ * pinned" detection logic.
++ */
++static void free_pcppages_bulk(struct zone *zone, int count,
++ struct per_cpu_pages *pcp)
++{
++ int migratetype = 0;
++ int batch_free = 0;
++ int to_free = count;
++ unsigned long nr_scanned;
++
++ spin_lock(&zone->lock);
++ nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
++ if (nr_scanned)
++ __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
++
++ while (to_free) {
++ struct page *page;
++ struct list_head *list;
++
++ /*
++ * Remove pages from lists in a round-robin fashion. A
++ * batch_free count is maintained that is incremented when an
++ * empty list is encountered. This is so more pages are freed
++ * off fuller lists instead of spinning excessively around empty
++ * lists
++ */
++ do {
++ batch_free++;
++ if (++migratetype == MIGRATE_PCPTYPES)
++ migratetype = 0;
++ list = &pcp->lists[migratetype];
++ } while (list_empty(list));
++
++ /* This is the only non-empty list. Free them all. */
++ if (batch_free == MIGRATE_PCPTYPES)
++ batch_free = to_free;
++
++ do {
++ int mt; /* migratetype of the to-be-freed page */
++
++ page = list_entry(list->prev, struct page, lru);
++ /* must delete as __free_one_page list manipulates */
++ list_del(&page->lru);
++ mt = get_freepage_migratetype(page);
++ if (unlikely(has_isolate_pageblock(zone)))
++ mt = get_pageblock_migratetype(page);
++
++ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
++ __free_one_page(page, page_to_pfn(page), zone, 0, mt);
++ trace_mm_page_pcpu_drain(page, 0, mt);
++ } while (--to_free && --batch_free && !list_empty(list));
++ }
++ spin_unlock(&zone->lock);
++}
++
++static void free_one_page(struct zone *zone,
++ struct page *page, unsigned long pfn,
++ unsigned int order,
++ int migratetype)
++{
++ unsigned long nr_scanned;
++ spin_lock(&zone->lock);
++ nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
++ if (nr_scanned)
++ __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
++
++ if (unlikely(has_isolate_pageblock(zone) ||
++ is_migrate_isolate(migratetype))) {
++ migratetype = get_pfnblock_migratetype(page, pfn);
++ }
++ __free_one_page(page, pfn, zone, order, migratetype);
++ spin_unlock(&zone->lock);
++}
++
++static int free_tail_pages_check(struct page *head_page, struct page *page)
++{
++ if (!IS_ENABLED(CONFIG_DEBUG_VM))
++ return 0;
++ if (unlikely(!PageTail(page))) {
++ bad_page(page, "PageTail not set", 0);
++ return 1;
++ }
++ if (unlikely(page->first_page != head_page)) {
++ bad_page(page, "first_page not consistent", 0);
++ return 1;
++ }
++ return 0;
++}
++
++static bool free_pages_prepare(struct page *page, unsigned int order)
++{
++ bool compound = PageCompound(page);
++ int i, bad = 0;
++
++ VM_BUG_ON_PAGE(PageTail(page), page);
++ VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
++
++ trace_mm_page_free(page, order);
++ kmemcheck_free_shadow(page, order);
++ kasan_free_pages(page, order);
++
++ if (PageAnon(page))
++ page->mapping = NULL;
++ bad += free_pages_check(page);
++ for (i = 1; i < (1 << order); i++) {
++ if (compound)
++ bad += free_tail_pages_check(page, page + i);
++ bad += free_pages_check(page + i);
++ }
++ if (bad)
++ return false;
++
++ reset_page_owner(page, order);
++
++ if (!PageHighMem(page)) {
++ debug_check_no_locks_freed(page_address(page),
++ PAGE_SIZE << order);
++ debug_check_no_obj_freed(page_address(page),
++ PAGE_SIZE << order);
++ }
++ arch_free_page(page, order);
++ kernel_map_pages(page, 1 << order, 0);
++
++ return true;
++}
++
++static void __free_pages_ok(struct page *page, unsigned int order)
++{
++ unsigned long flags;
++ int migratetype;
++ unsigned long pfn = page_to_pfn(page);
++
++ if (!free_pages_prepare(page, order))
++ return;
++
++ migratetype = get_pfnblock_migratetype(page, pfn);
++ local_irq_save(flags);
++ __count_vm_events(PGFREE, 1 << order);
++ set_freepage_migratetype(page, migratetype);
++ free_one_page(page_zone(page), page, pfn, order, migratetype);
++ local_irq_restore(flags);
++}
++
++void __init __free_pages_bootmem(struct page *page, unsigned int order)
++{
++ unsigned int nr_pages = 1 << order;
++ struct page *p = page;
++ unsigned int loop;
++
++ prefetchw(p);
++ for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
++ prefetchw(p + 1);
++ __ClearPageReserved(p);
++ set_page_count(p, 0);
++ }
++ __ClearPageReserved(p);
++ set_page_count(p, 0);
++
++ page_zone(page)->managed_pages += nr_pages;
++ set_page_refcounted(page);
++ __free_pages(page, order);
++}
++
++#ifdef CONFIG_CMA
++/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
++void __init init_cma_reserved_pageblock(struct page *page)
++{
++ unsigned i = pageblock_nr_pages;
++ struct page *p = page;
++
++ do {
++ __ClearPageReserved(p);
++ set_page_count(p, 0);
++ } while (++p, --i);
++
++ set_pageblock_migratetype(page, MIGRATE_CMA);
++
++ if (pageblock_order >= MAX_ORDER) {
++ i = pageblock_nr_pages;
++ p = page;
++ do {
++ set_page_refcounted(p);
++ __free_pages(p, MAX_ORDER - 1);
++ p += MAX_ORDER_NR_PAGES;
++ } while (i -= MAX_ORDER_NR_PAGES);
++ } else {
++ set_page_refcounted(page);
++ __free_pages(page, pageblock_order);
++ }
++
++ adjust_managed_page_count(page, pageblock_nr_pages);
++}
++#endif
++
++/*
++ * The order of subdivision here is critical for the IO subsystem.
++ * Please do not alter this order without good reasons and regression
++ * testing. Specifically, as large blocks of memory are subdivided,
++ * the order in which smaller blocks are delivered depends on the order
++ * they're subdivided in this function. This is the primary factor
++ * influencing the order in which pages are delivered to the IO
++ * subsystem according to empirical testing, and this is also justified
++ * by considering the behavior of a buddy system containing a single
++ * large block of memory acted on by a series of small allocations.
++ * This behavior is a critical factor in sglist merging's success.
++ *
++ * -- nyc
++ */
++static inline void expand(struct zone *zone, struct page *page,
++ int low, int high, struct free_area *area,
++ int migratetype)
++{
++ unsigned long size = 1 << high;
++
++ while (high > low) {
++ area--;
++ high--;
++ size >>= 1;
++ VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
++
++ if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
++ debug_guardpage_enabled() &&
++ high < debug_guardpage_minorder()) {
++ /*
++ * Mark as guard pages (or page), that will allow to
++ * merge back to allocator when buddy will be freed.
++ * Corresponding page table entries will not be touched,
++ * pages will stay not present in virtual address space
++ */
++ set_page_guard(zone, &page[size], high, migratetype);
++ continue;
++ }
++ list_add(&page[size].lru, &area->free_list[migratetype]);
++ area->nr_free++;
++ set_page_order(&page[size], high);
++ }
++}
++
++/*
++ * This page is about to be returned from the page allocator
++ */
++static inline int check_new_page(struct page *page)
++{
++ const char *bad_reason = NULL;
++ unsigned long bad_flags = 0;
++
++ if (unlikely(page_mapcount(page)))
++ bad_reason = "nonzero mapcount";
++ if (unlikely(page->mapping != NULL))
++ bad_reason = "non-NULL mapping";
++ if (unlikely(atomic_read(&page->_count) != 0))
++ bad_reason = "nonzero _count";
++ if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
++ bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
++ bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
++ }
++#ifdef CONFIG_MEMCG
++ if (unlikely(page->mem_cgroup))
++ bad_reason = "page still charged to cgroup";
++#endif
++ if (unlikely(bad_reason)) {
++ bad_page(page, bad_reason, bad_flags);
++ return 1;
++ }
++ return 0;
++}
++
++static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
++ int alloc_flags)
++{
++ int i;
++
++ for (i = 0; i < (1 << order); i++) {
++ struct page *p = page + i;
++ if (unlikely(check_new_page(p)))
++ return 1;
++ }
++
++ set_page_private(page, 0);
++ set_page_refcounted(page);
++
++ arch_alloc_page(page, order);
++ kernel_map_pages(page, 1 << order, 1);
++ kasan_alloc_pages(page, order);
++
++ if (gfp_flags & __GFP_ZERO)
++ prep_zero_page(page, order, gfp_flags);
++
++ if (order && (gfp_flags & __GFP_COMP))
++ prep_compound_page(page, order);
++
++ set_page_owner(page, order, gfp_flags);
++
++ /*
++ * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
++ * allocate the page. The expectation is that the caller is taking
++ * steps that will free more memory. The caller should avoid the page
++ * being used for !PFMEMALLOC purposes.
++ */
++ if (alloc_flags & ALLOC_NO_WATERMARKS)
++ set_page_pfmemalloc(page);
++ else
++ clear_page_pfmemalloc(page);
++
++ return 0;
++}
++
++/*
++ * Go through the free lists for the given migratetype and remove
++ * the smallest available page from the freelists
++ */
++static inline
++struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
++ int migratetype)
++{
++ unsigned int current_order;
++ struct free_area *area;
++ struct page *page;
++
++ /* Find a page of the appropriate size in the preferred list */
++ for (current_order = order; current_order < MAX_ORDER; ++current_order) {
++ area = &(zone->free_area[current_order]);
++ if (list_empty(&area->free_list[migratetype]))
++ continue;
++
++ page = list_entry(area->free_list[migratetype].next,
++ struct page, lru);
++ list_del(&page->lru);
++ rmv_page_order(page);
++ area->nr_free--;
++ expand(zone, page, order, current_order, area, migratetype);
++ set_freepage_migratetype(page, migratetype);
++ return page;
++ }
++
++ return NULL;
++}
++
++
++/*
++ * This array describes the order lists are fallen back to when
++ * the free lists for the desirable migrate type are depleted
++ */
++static int fallbacks[MIGRATE_TYPES][4] = {
++ [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
++ [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
++ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
++#ifdef CONFIG_CMA
++ [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
++#endif
++ [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
++#ifdef CONFIG_MEMORY_ISOLATION
++ [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
++#endif
++};
++
++#ifdef CONFIG_CMA
++static struct page *__rmqueue_cma_fallback(struct zone *zone,
++ unsigned int order)
++{
++ return __rmqueue_smallest(zone, order, MIGRATE_CMA);
++}
++#else
++static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
++ unsigned int order) { return NULL; }
++#endif
++
++/*
++ * Move the free pages in a range to the free lists of the requested type.
++ * Note that start_page and end_pages are not aligned on a pageblock
++ * boundary. If alignment is required, use move_freepages_block()
++ */
++int move_freepages(struct zone *zone,
++ struct page *start_page, struct page *end_page,
++ int migratetype)
++{
++ struct page *page;
++ unsigned long order;
++ int pages_moved = 0;
++
++#ifndef CONFIG_HOLES_IN_ZONE
++ /*
++ * page_zone is not safe to call in this context when
++ * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
++ * anyway as we check zone boundaries in move_freepages_block().
++ * Remove at a later date when no bug reports exist related to
++ * grouping pages by mobility
++ */
++ VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
++#endif
++
++ for (page = start_page; page <= end_page;) {
++ /* Make sure we are not inadvertently changing nodes */
++ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
++
++ if (!pfn_valid_within(page_to_pfn(page))) {
++ page++;
++ continue;
++ }
++
++ if (!PageBuddy(page)) {
++ page++;
++ continue;
++ }
++
++ order = page_order(page);
++ list_move(&page->lru,
++ &zone->free_area[order].free_list[migratetype]);
++ set_freepage_migratetype(page, migratetype);
++ page += 1 << order;
++ pages_moved += 1 << order;
++ }
++
++ return pages_moved;
++}
++
++int move_freepages_block(struct zone *zone, struct page *page,
++ int migratetype)
++{
++ unsigned long start_pfn, end_pfn;
++ struct page *start_page, *end_page;
++
++ start_pfn = page_to_pfn(page);
++ start_pfn = start_pfn & ~(pageblock_nr_pages-1);
++ start_page = pfn_to_page(start_pfn);
++ end_page = start_page + pageblock_nr_pages - 1;
++ end_pfn = start_pfn + pageblock_nr_pages - 1;
++
++ /* Do not cross zone boundaries */
++ if (!zone_spans_pfn(zone, start_pfn))
++ start_page = page;
++ if (!zone_spans_pfn(zone, end_pfn))
++ return 0;
++
++ return move_freepages(zone, start_page, end_page, migratetype);
++}
++
++static void change_pageblock_range(struct page *pageblock_page,
++ int start_order, int migratetype)
++{
++ int nr_pageblocks = 1 << (start_order - pageblock_order);
++
++ while (nr_pageblocks--) {
++ set_pageblock_migratetype(pageblock_page, migratetype);
++ pageblock_page += pageblock_nr_pages;
++ }
++}
++
++/*
++ * When we are falling back to another migratetype during allocation, try to
++ * steal extra free pages from the same pageblocks to satisfy further
++ * allocations, instead of polluting multiple pageblocks.
++ *
++ * If we are stealing a relatively large buddy page, it is likely there will
++ * be more free pages in the pageblock, so try to steal them all. For
++ * reclaimable and unmovable allocations, we steal regardless of page size,
++ * as fragmentation caused by those allocations polluting movable pageblocks
++ * is worse than movable allocations stealing from unmovable and reclaimable
++ * pageblocks.
++ */
++static bool can_steal_fallback(unsigned int order, int start_mt)
++{
++ /*
++ * Leaving this order check is intended, although there is
++ * relaxed order check in next check. The reason is that
++ * we can actually steal whole pageblock if this condition met,
++ * but, below check doesn't guarantee it and that is just heuristic
++ * so could be changed anytime.
++ */
++ if (order >= pageblock_order)
++ return true;
++
++ if (order >= pageblock_order / 2 ||
++ start_mt == MIGRATE_RECLAIMABLE ||
++ start_mt == MIGRATE_UNMOVABLE ||
++ page_group_by_mobility_disabled)
++ return true;
++
++ return false;
++}
++
++/*
++ * This function implements actual steal behaviour. If order is large enough,
++ * we can steal whole pageblock. If not, we first move freepages in this
++ * pageblock and check whether half of pages are moved or not. If half of
++ * pages are moved, we can change migratetype of pageblock and permanently
++ * use it's pages as requested migratetype in the future.
++ */
++static void steal_suitable_fallback(struct zone *zone, struct page *page,
++ int start_type)
++{
++ int current_order = page_order(page);
++ int pages;
++
++ /* Take ownership for orders >= pageblock_order */
++ if (current_order >= pageblock_order) {
++ change_pageblock_range(page, current_order, start_type);
++ return;
++ }
++
++ pages = move_freepages_block(zone, page, start_type);
++
++ /* Claim the whole block if over half of it is free */
++ if (pages >= (1 << (pageblock_order-1)) ||
++ page_group_by_mobility_disabled)
++ set_pageblock_migratetype(page, start_type);
++}
++
++/*
++ * Check whether there is a suitable fallback freepage with requested order.
++ * If only_stealable is true, this function returns fallback_mt only if
++ * we can steal other freepages all together. This would help to reduce
++ * fragmentation due to mixed migratetype pages in one pageblock.
++ */
++int find_suitable_fallback(struct free_area *area, unsigned int order,
++ int migratetype, bool only_stealable, bool *can_steal)
++{
++ int i;
++ int fallback_mt;
++
++ if (area->nr_free == 0)
++ return -1;
++
++ *can_steal = false;
++ for (i = 0;; i++) {
++ fallback_mt = fallbacks[migratetype][i];
++ if (fallback_mt == MIGRATE_RESERVE)
++ break;
++
++ if (list_empty(&area->free_list[fallback_mt]))
++ continue;
++
++ if (can_steal_fallback(order, migratetype))
++ *can_steal = true;
++
++ if (!only_stealable)
++ return fallback_mt;
++
++ if (*can_steal)
++ return fallback_mt;
++ }
++
++ return -1;
++}
++
++/* Remove an element from the buddy allocator from the fallback list */
++static inline struct page *
++__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
++{
++ struct free_area *area;
++ unsigned int current_order;
++ struct page *page;
++ int fallback_mt;
++ bool can_steal;
++
++ /* Find the largest possible block of pages in the other list */
++ for (current_order = MAX_ORDER-1;
++ current_order >= order && current_order <= MAX_ORDER-1;
++ --current_order) {
++ area = &(zone->free_area[current_order]);
++ fallback_mt = find_suitable_fallback(area, current_order,
++ start_migratetype, false, &can_steal);
++ if (fallback_mt == -1)
++ continue;
++
++ page = list_entry(area->free_list[fallback_mt].next,
++ struct page, lru);
++ if (can_steal)
++ steal_suitable_fallback(zone, page, start_migratetype);
++
++ /* Remove the page from the freelists */
++ area->nr_free--;
++ list_del(&page->lru);
++ rmv_page_order(page);
++
++ expand(zone, page, order, current_order, area,
++ start_migratetype);
++ /*
++ * The freepage_migratetype may differ from pageblock's
++ * migratetype depending on the decisions in
++ * try_to_steal_freepages(). This is OK as long as it
++ * does not differ for MIGRATE_CMA pageblocks. For CMA
++ * we need to make sure unallocated pages flushed from
++ * pcp lists are returned to the correct freelist.
++ */
++ set_freepage_migratetype(page, start_migratetype);
++
++ trace_mm_page_alloc_extfrag(page, order, current_order,
++ start_migratetype, fallback_mt);
++
++ return page;
++ }
++
++ return NULL;
++}
++
++/*
++ * Do the hard work of removing an element from the buddy allocator.
++ * Call me with the zone->lock already held.
++ */
++static struct page *__rmqueue(struct zone *zone, unsigned int order,
++ int migratetype)
++{
++ struct page *page;
++
++retry_reserve:
++ page = __rmqueue_smallest(zone, order, migratetype);
++
++ if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
++ if (migratetype == MIGRATE_MOVABLE)
++ page = __rmqueue_cma_fallback(zone, order);
++
++ if (!page)
++ page = __rmqueue_fallback(zone, order, migratetype);
++
++ /*
++ * Use MIGRATE_RESERVE rather than fail an allocation. goto
++ * is used because __rmqueue_smallest is an inline function
++ * and we want just one call site
++ */
++ if (!page) {
++ migratetype = MIGRATE_RESERVE;
++ goto retry_reserve;
++ }
++ }
++
++ trace_mm_page_alloc_zone_locked(page, order, migratetype);
++ return page;
++}
++
++/*
++ * Obtain a specified number of elements from the buddy allocator, all under
++ * a single hold of the lock, for efficiency. Add them to the supplied list.
++ * Returns the number of new pages which were placed at *list.
++ */
++static int rmqueue_bulk(struct zone *zone, unsigned int order,
++ unsigned long count, struct list_head *list,
++ int migratetype, bool cold)
++{
++ int i;
++
++ spin_lock(&zone->lock);
++ for (i = 0; i < count; ++i) {
++ struct page *page = __rmqueue(zone, order, migratetype);
++ if (unlikely(page == NULL))
++ break;
++
++ /*
++ * Split buddy pages returned by expand() are received here
++ * in physical page order. The page is added to the callers and
++ * list and the list head then moves forward. From the callers
++ * perspective, the linked list is ordered by page number in
++ * some conditions. This is useful for IO devices that can
++ * merge IO requests if the physical pages are ordered
++ * properly.
++ */
++ if (likely(!cold))
++ list_add(&page->lru, list);
++ else
++ list_add_tail(&page->lru, list);
++ list = &page->lru;
++ if (is_migrate_cma(get_freepage_migratetype(page)))
++ __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
++ -(1 << order));
++ }
++ __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
++ spin_unlock(&zone->lock);
++ return i;
++}
++
++#ifdef CONFIG_NUMA
++/*
++ * Called from the vmstat counter updater to drain pagesets of this
++ * currently executing processor on remote nodes after they have
++ * expired.
++ *
++ * Note that this function must be called with the thread pinned to
++ * a single processor.
++ */
++void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
++{
++ unsigned long flags;
++ int to_drain, batch;
++
++ local_irq_save(flags);
++ batch = READ_ONCE(pcp->batch);
++ to_drain = min(pcp->count, batch);
++ if (to_drain > 0) {
++ free_pcppages_bulk(zone, to_drain, pcp);
++ pcp->count -= to_drain;
++ }
++ local_irq_restore(flags);
++}
++#endif
++
++/*
++ * Drain pcplists of the indicated processor and zone.
++ *
++ * The processor must either be the current processor and the
++ * thread pinned to the current processor or a processor that
++ * is not online.
++ */
++static void drain_pages_zone(unsigned int cpu, struct zone *zone)
++{
++ unsigned long flags;
++ struct per_cpu_pageset *pset;
++ struct per_cpu_pages *pcp;
++
++ local_irq_save(flags);
++ pset = per_cpu_ptr(zone->pageset, cpu);
++
++ pcp = &pset->pcp;
++ if (pcp->count) {
++ free_pcppages_bulk(zone, pcp->count, pcp);
++ pcp->count = 0;
++ }
++ local_irq_restore(flags);
++}
++
++/*
++ * Drain pcplists of all zones on the indicated processor.
++ *
++ * The processor must either be the current processor and the
++ * thread pinned to the current processor or a processor that
++ * is not online.
++ */
++static void drain_pages(unsigned int cpu)
++{
++ struct zone *zone;
++
++ for_each_populated_zone(zone) {
++ drain_pages_zone(cpu, zone);
++ }
++}
++
++/*
++ * Spill all of this CPU's per-cpu pages back into the buddy allocator.
++ *
++ * The CPU has to be pinned. When zone parameter is non-NULL, spill just
++ * the single zone's pages.
++ */
++void drain_local_pages(struct zone *zone)
++{
++ int cpu = smp_processor_id();
++
++ if (zone)
++ drain_pages_zone(cpu, zone);
++ else
++ drain_pages(cpu);
++}
++
++/*
++ * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
++ *
++ * When zone parameter is non-NULL, spill just the single zone's pages.
++ *
++ * Note that this code is protected against sending an IPI to an offline
++ * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
++ * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
++ * nothing keeps CPUs from showing up after we populated the cpumask and
++ * before the call to on_each_cpu_mask().
++ */
++void drain_all_pages(struct zone *zone)
++{
++ int cpu;
++
++ /*
++ * Allocate in the BSS so we wont require allocation in
++ * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
++ */
++ static cpumask_t cpus_with_pcps;
++
++ /*
++ * We don't care about racing with CPU hotplug event
++ * as offline notification will cause the notified
++ * cpu to drain that CPU pcps and on_each_cpu_mask
++ * disables preemption as part of its processing
++ */
++ for_each_online_cpu(cpu) {
++ struct per_cpu_pageset *pcp;
++ struct zone *z;
++ bool has_pcps = false;
++
++ if (zone) {
++ pcp = per_cpu_ptr(zone->pageset, cpu);
++ if (pcp->pcp.count)
++ has_pcps = true;
++ } else {
++ for_each_populated_zone(z) {
++ pcp = per_cpu_ptr(z->pageset, cpu);
++ if (pcp->pcp.count) {
++ has_pcps = true;
++ break;
++ }
++ }
++ }
++
++ if (has_pcps)
++ cpumask_set_cpu(cpu, &cpus_with_pcps);
++ else
++ cpumask_clear_cpu(cpu, &cpus_with_pcps);
++ }
++ on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
++ zone, 1);
++}
++
++#ifdef CONFIG_HIBERNATION
++
++void mark_free_pages(struct zone *zone)
++{
++ unsigned long pfn, max_zone_pfn;
++ unsigned long flags;
++ unsigned int order, t;
++ struct list_head *curr;
++
++ if (zone_is_empty(zone))
++ return;
++
++ spin_lock_irqsave(&zone->lock, flags);
++
++ max_zone_pfn = zone_end_pfn(zone);
++ for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
++ if (pfn_valid(pfn)) {
++ struct page *page = pfn_to_page(pfn);
++
++ if (!swsusp_page_is_forbidden(page))
++ swsusp_unset_page_free(page);
++ }
++
++ for_each_migratetype_order(order, t) {
++ list_for_each(curr, &zone->free_area[order].free_list[t]) {
++ unsigned long i;
++
++ pfn = page_to_pfn(list_entry(curr, struct page, lru));
++ for (i = 0; i < (1UL << order); i++)
++ swsusp_set_page_free(pfn_to_page(pfn + i));
++ }
++ }
++ spin_unlock_irqrestore(&zone->lock, flags);
++}
++#endif /* CONFIG_PM */
++
++/*
++ * Free a 0-order page
++ * cold == true ? free a cold page : free a hot page
++ */
++void free_hot_cold_page(struct page *page, bool cold)
++{
++ struct zone *zone = page_zone(page);
++ struct per_cpu_pages *pcp;
++ unsigned long flags;
++ unsigned long pfn = page_to_pfn(page);
++ int migratetype;
++
++ if (!free_pages_prepare(page, 0))
++ return;
++
++ migratetype = get_pfnblock_migratetype(page, pfn);
++ set_freepage_migratetype(page, migratetype);
++ local_irq_save(flags);
++ __count_vm_event(PGFREE);
++
++ /*
++ * We only track unmovable, reclaimable and movable on pcp lists.
++ * Free ISOLATE pages back to the allocator because they are being
++ * offlined but treat RESERVE as movable pages so we can get those
++ * areas back if necessary. Otherwise, we may have to free
++ * excessively into the page allocator
++ */
++ if (migratetype >= MIGRATE_PCPTYPES) {
++ if (unlikely(is_migrate_isolate(migratetype))) {
++ free_one_page(zone, page, pfn, 0, migratetype);
++ goto out;
++ }
++ migratetype = MIGRATE_MOVABLE;
++ }
++
++ pcp = &this_cpu_ptr(zone->pageset)->pcp;
++ if (!cold)
++ list_add(&page->lru, &pcp->lists[migratetype]);
++ else
++ list_add_tail(&page->lru, &pcp->lists[migratetype]);
++ pcp->count++;
++ if (pcp->count >= pcp->high) {
++ unsigned long batch = READ_ONCE(pcp->batch);
++ free_pcppages_bulk(zone, batch, pcp);
++ pcp->count -= batch;
++ }
++
++out:
++ local_irq_restore(flags);
++}
++
++/*
++ * Free a list of 0-order pages
++ */
++void free_hot_cold_page_list(struct list_head *list, bool cold)
++{
++ struct page *page, *next;
++
++ list_for_each_entry_safe(page, next, list, lru) {
++ trace_mm_page_free_batched(page, cold);
++ free_hot_cold_page(page, cold);
++ }
++}
++
++/*
++ * split_page takes a non-compound higher-order page, and splits it into
++ * n (1<<order) sub-pages: page[0..n]
++ * Each sub-page must be freed individually.
++ *
++ * Note: this is probably too low level an operation for use in drivers.
++ * Please consult with lkml before using this in your driver.
++ */
++void split_page(struct page *page, unsigned int order)
++{
++ int i;
++
++ VM_BUG_ON_PAGE(PageCompound(page), page);
++ VM_BUG_ON_PAGE(!page_count(page), page);
++
++#ifdef CONFIG_KMEMCHECK
++ /*
++ * Split shadow pages too, because free(page[0]) would
++ * otherwise free the whole shadow.
++ */
++ if (kmemcheck_page_is_tracked(page))
++ split_page(virt_to_page(page[0].shadow), order);
++#endif
++
++ set_page_owner(page, 0, 0);
++ for (i = 1; i < (1 << order); i++) {
++ set_page_refcounted(page + i);
++ set_page_owner(page + i, 0, 0);
++ }
++}
++EXPORT_SYMBOL_GPL(split_page);
++
++int __isolate_free_page(struct page *page, unsigned int order)
++{
++ unsigned long watermark;
++ struct zone *zone;
++ int mt;
++
++ BUG_ON(!PageBuddy(page));
++
++ zone = page_zone(page);
++ mt = get_pageblock_migratetype(page);
++
++ if (!is_migrate_isolate(mt)) {
++ /* Obey watermarks as if the page was being allocated */
++ watermark = low_wmark_pages(zone) + (1 << order);
++ if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
++ return 0;
++
++ __mod_zone_freepage_state(zone, -(1UL << order), mt);
++ }
++
++ /* Remove page from free list */
++ list_del(&page->lru);
++ zone->free_area[order].nr_free--;
++ rmv_page_order(page);
++
++ /* Set the pageblock if the isolated page is at least a pageblock */
++ if (order >= pageblock_order - 1) {
++ struct page *endpage = page + (1 << order) - 1;
++ for (; page < endpage; page += pageblock_nr_pages) {
++ int mt = get_pageblock_migratetype(page);
++ if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
++ set_pageblock_migratetype(page,
++ MIGRATE_MOVABLE);
++ }
++ }
++
++ set_page_owner(page, order, 0);
++ return 1UL << order;
++}
++
++/*
++ * Similar to split_page except the page is already free. As this is only
++ * being used for migration, the migratetype of the block also changes.
++ * As this is called with interrupts disabled, the caller is responsible
++ * for calling arch_alloc_page() and kernel_map_page() after interrupts
++ * are enabled.
++ *
++ * Note: this is probably too low level an operation for use in drivers.
++ * Please consult with lkml before using this in your driver.
++ */
++int split_free_page(struct page *page)
++{
++ unsigned int order;
++ int nr_pages;
++
++ order = page_order(page);
++
++ nr_pages = __isolate_free_page(page, order);
++ if (!nr_pages)
++ return 0;
++
++ /* Split into individual pages */
++ set_page_refcounted(page);
++ split_page(page, order);
++ return nr_pages;
++}
++
++/*
++ * Allocate a page from the given zone. Use pcplists for order-0 allocations.
++ */
++static inline
++struct page *buffered_rmqueue(struct zone *preferred_zone,
++ struct zone *zone, unsigned int order,
++ gfp_t gfp_flags, int migratetype)
++{
++ unsigned long flags;
++ struct page *page;
++ bool cold = ((gfp_flags & __GFP_COLD) != 0);
++
++ if (likely(order == 0)) {
++ struct per_cpu_pages *pcp;
++ struct list_head *list;
++
++ local_irq_save(flags);
++ pcp = &this_cpu_ptr(zone->pageset)->pcp;
++ list = &pcp->lists[migratetype];
++ if (list_empty(list)) {
++ pcp->count += rmqueue_bulk(zone, 0,
++ pcp->batch, list,
++ migratetype, cold);
++ if (unlikely(list_empty(list)))
++ goto failed;
++ }
++
++ if (cold)
++ page = list_entry(list->prev, struct page, lru);
++ else
++ page = list_entry(list->next, struct page, lru);
++
++ list_del(&page->lru);
++ pcp->count--;
++ } else {
++ if (unlikely(gfp_flags & __GFP_NOFAIL)) {
++ /*
++ * __GFP_NOFAIL is not to be used in new code.
++ *
++ * All __GFP_NOFAIL callers should be fixed so that they
++ * properly detect and handle allocation failures.
++ *
++ * We most definitely don't want callers attempting to
++ * allocate greater than order-1 page units with
++ * __GFP_NOFAIL.
++ */
++ WARN_ON_ONCE(order > 1);
++ }
++ spin_lock_irqsave(&zone->lock, flags);
++ page = __rmqueue(zone, order, migratetype);
++ spin_unlock(&zone->lock);
++ if (!page)
++ goto failed;
++ __mod_zone_freepage_state(zone, -(1 << order),
++ get_freepage_migratetype(page));
++ }
++
++ __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
++ if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
++ !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
++ set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
++
++ __count_zone_vm_events(PGALLOC, zone, 1 << order);
++ zone_statistics(preferred_zone, zone, gfp_flags);
++ local_irq_restore(flags);
++
++ VM_BUG_ON_PAGE(bad_range(zone, page), page);
++ return page;
++
++failed:
++ local_irq_restore(flags);
++ return NULL;
++}
++
++#ifdef CONFIG_FAIL_PAGE_ALLOC
++
++static struct {
++ struct fault_attr attr;
++
++ u32 ignore_gfp_highmem;
++ u32 ignore_gfp_wait;
++ u32 min_order;
++} fail_page_alloc = {
++ .attr = FAULT_ATTR_INITIALIZER,
++ .ignore_gfp_wait = 1,
++ .ignore_gfp_highmem = 1,
++ .min_order = 1,
++};
++
++static int __init setup_fail_page_alloc(char *str)
++{
++ return setup_fault_attr(&fail_page_alloc.attr, str);
++}
++__setup("fail_page_alloc=", setup_fail_page_alloc);
++
++static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
++{
++ if (order < fail_page_alloc.min_order)
++ return false;
++ if (gfp_mask & __GFP_NOFAIL)
++ return false;
++ if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
++ return false;
++ if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
++ return false;
++
++ return should_fail(&fail_page_alloc.attr, 1 << order);
++}
++
++#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
++
++static int __init fail_page_alloc_debugfs(void)
++{
++ umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
++ struct dentry *dir;
++
++ dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
++ &fail_page_alloc.attr);
++ if (IS_ERR(dir))
++ return PTR_ERR(dir);
++
++ if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
++ &fail_page_alloc.ignore_gfp_wait))
++ goto fail;
++ if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
++ &fail_page_alloc.ignore_gfp_highmem))
++ goto fail;
++ if (!debugfs_create_u32("min-order", mode, dir,
++ &fail_page_alloc.min_order))
++ goto fail;
++
++ return 0;
++fail:
++ debugfs_remove_recursive(dir);
++
++ return -ENOMEM;
++}
++
++late_initcall(fail_page_alloc_debugfs);
++
++#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
++
++#else /* CONFIG_FAIL_PAGE_ALLOC */
++
++static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
++{
++ return false;
++}
++
++#endif /* CONFIG_FAIL_PAGE_ALLOC */
++
++/*
++ * Return true if free pages are above 'mark'. This takes into account the order
++ * of the allocation.
++ */
++static bool __zone_watermark_ok(struct zone *z, unsigned int order,
++ unsigned long mark, int classzone_idx, int alloc_flags,
++ long free_pages)
++{
++ /* free_pages may go negative - that's OK */
++ long min = mark;
++ int o;
++ long free_cma = 0;
++
++ free_pages -= (1 << order) - 1;
++ if (alloc_flags & ALLOC_HIGH)
++ min -= min / 2;
++ if (alloc_flags & ALLOC_HARDER)
++ min -= min / 4;
++#ifdef CONFIG_CMA
++ /* If allocation can't use CMA areas don't use free CMA pages */
++ if (!(alloc_flags & ALLOC_CMA))
++ free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
++#endif
++
++ if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
++ return false;
++ for (o = 0; o < order; o++) {
++ /* At the next order, this order's pages become unavailable */
++ free_pages -= z->free_area[o].nr_free << o;
++
++ /* Require fewer higher order pages to be free */
++ min >>= 1;
++
++ if (free_pages <= min)
++ return false;
++ }
++ return true;
++}
++
++bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
++ int classzone_idx, int alloc_flags)
++{
++ return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
++ zone_page_state(z, NR_FREE_PAGES));
++}
++
++bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
++ unsigned long mark, int classzone_idx, int alloc_flags)
++{
++ long free_pages = zone_page_state(z, NR_FREE_PAGES);
++
++ if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
++ free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
++
++ return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
++ free_pages);
++}
++
++#ifdef CONFIG_NUMA
++/*
++ * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
++ * skip over zones that are not allowed by the cpuset, or that have
++ * been recently (in last second) found to be nearly full. See further
++ * comments in mmzone.h. Reduces cache footprint of zonelist scans
++ * that have to skip over a lot of full or unallowed zones.
++ *
++ * If the zonelist cache is present in the passed zonelist, then
++ * returns a pointer to the allowed node mask (either the current
++ * tasks mems_allowed, or node_states[N_MEMORY].)
++ *
++ * If the zonelist cache is not available for this zonelist, does
++ * nothing and returns NULL.
++ *
++ * If the fullzones BITMAP in the zonelist cache is stale (more than
++ * a second since last zap'd) then we zap it out (clear its bits.)
++ *
++ * We hold off even calling zlc_setup, until after we've checked the
++ * first zone in the zonelist, on the theory that most allocations will
++ * be satisfied from that first zone, so best to examine that zone as
++ * quickly as we can.
++ */
++static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
++{
++ struct zonelist_cache *zlc; /* cached zonelist speedup info */
++ nodemask_t *allowednodes; /* zonelist_cache approximation */
++
++ zlc = zonelist->zlcache_ptr;
++ if (!zlc)
++ return NULL;
++
++ if (time_after(jiffies, zlc->last_full_zap + HZ)) {
++ bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
++ zlc->last_full_zap = jiffies;
++ }
++
++ allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
++ &cpuset_current_mems_allowed :
++ &node_states[N_MEMORY];
++ return allowednodes;
++}
++
++/*
++ * Given 'z' scanning a zonelist, run a couple of quick checks to see
++ * if it is worth looking at further for free memory:
++ * 1) Check that the zone isn't thought to be full (doesn't have its
++ * bit set in the zonelist_cache fullzones BITMAP).
++ * 2) Check that the zones node (obtained from the zonelist_cache
++ * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
++ * Return true (non-zero) if zone is worth looking at further, or
++ * else return false (zero) if it is not.
++ *
++ * This check -ignores- the distinction between various watermarks,
++ * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
++ * found to be full for any variation of these watermarks, it will
++ * be considered full for up to one second by all requests, unless
++ * we are so low on memory on all allowed nodes that we are forced
++ * into the second scan of the zonelist.
++ *
++ * In the second scan we ignore this zonelist cache and exactly
++ * apply the watermarks to all zones, even it is slower to do so.
++ * We are low on memory in the second scan, and should leave no stone
++ * unturned looking for a free page.
++ */
++static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
++ nodemask_t *allowednodes)
++{
++ struct zonelist_cache *zlc; /* cached zonelist speedup info */
++ int i; /* index of *z in zonelist zones */
++ int n; /* node that zone *z is on */
++
++ zlc = zonelist->zlcache_ptr;
++ if (!zlc)
++ return 1;
++
++ i = z - zonelist->_zonerefs;
++ n = zlc->z_to_n[i];
++
++ /* This zone is worth trying if it is allowed but not full */
++ return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
++}
++
++/*
++ * Given 'z' scanning a zonelist, set the corresponding bit in
++ * zlc->fullzones, so that subsequent attempts to allocate a page
++ * from that zone don't waste time re-examining it.
++ */
++static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
++{
++ struct zonelist_cache *zlc; /* cached zonelist speedup info */
++ int i; /* index of *z in zonelist zones */
++
++ zlc = zonelist->zlcache_ptr;
++ if (!zlc)
++ return;
++
++ i = z - zonelist->_zonerefs;
++
++ set_bit(i, zlc->fullzones);
++}
++
++/*
++ * clear all zones full, called after direct reclaim makes progress so that
++ * a zone that was recently full is not skipped over for up to a second
++ */
++static void zlc_clear_zones_full(struct zonelist *zonelist)
++{
++ struct zonelist_cache *zlc; /* cached zonelist speedup info */
++
++ zlc = zonelist->zlcache_ptr;
++ if (!zlc)
++ return;
++
++ bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
++}
++
++static bool zone_local(struct zone *local_zone, struct zone *zone)
++{
++ return local_zone->node == zone->node;
++}
++
++static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
++{
++ return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
++ RECLAIM_DISTANCE;
++}
++
++#else /* CONFIG_NUMA */
++
++static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
++{
++ return NULL;
++}
++
++static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
++ nodemask_t *allowednodes)
++{
++ return 1;
++}
++
++static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
++{
++}
++
++static void zlc_clear_zones_full(struct zonelist *zonelist)
++{
++}
++
++static bool zone_local(struct zone *local_zone, struct zone *zone)
++{
++ return true;
++}
++
++static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
++{
++ return true;
++}
++
++#endif /* CONFIG_NUMA */
++
++static void reset_alloc_batches(struct zone *preferred_zone)
++{
++ struct zone *zone = preferred_zone->zone_pgdat->node_zones;
++
++ do {
++ mod_zone_page_state(zone, NR_ALLOC_BATCH,
++ high_wmark_pages(zone) - low_wmark_pages(zone) -
++ atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
++ clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
++ } while (zone++ != preferred_zone);
++}
++
++/*
++ * get_page_from_freelist goes through the zonelist trying to allocate
++ * a page.
++ */
++static struct page *
++get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
++ const struct alloc_context *ac)
++{
++ struct zonelist *zonelist = ac->zonelist;
++ struct zoneref *z;
++ struct page *page = NULL;
++ struct zone *zone;
++ nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
++ int zlc_active = 0; /* set if using zonelist_cache */
++ int did_zlc_setup = 0; /* just call zlc_setup() one time */
++ bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
++ (gfp_mask & __GFP_WRITE);
++ int nr_fair_skipped = 0;
++ bool zonelist_rescan;
++
++zonelist_scan:
++ zonelist_rescan = false;
++
++ /*
++ * Scan zonelist, looking for a zone with enough free.
++ * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
++ */
++ for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
++ ac->nodemask) {
++ unsigned long mark;
++
++ if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
++ !zlc_zone_worth_trying(zonelist, z, allowednodes))
++ continue;
++ if (cpusets_enabled() &&
++ (alloc_flags & ALLOC_CPUSET) &&
++ !cpuset_zone_allowed(zone, gfp_mask))
++ continue;
++ /*
++ * Distribute pages in proportion to the individual
++ * zone size to ensure fair page aging. The zone a
++ * page was allocated in should have no effect on the
++ * time the page has in memory before being reclaimed.
++ */
++ if (alloc_flags & ALLOC_FAIR) {
++ if (!zone_local(ac->preferred_zone, zone))
++ break;
++ if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
++ nr_fair_skipped++;
++ continue;
++ }
++ }
++ /*
++ * When allocating a page cache page for writing, we
++ * want to get it from a zone that is within its dirty
++ * limit, such that no single zone holds more than its
++ * proportional share of globally allowed dirty pages.
++ * The dirty limits take into account the zone's
++ * lowmem reserves and high watermark so that kswapd
++ * should be able to balance it without having to
++ * write pages from its LRU list.
++ *
++ * This may look like it could increase pressure on
++ * lower zones by failing allocations in higher zones
++ * before they are full. But the pages that do spill
++ * over are limited as the lower zones are protected
++ * by this very same mechanism. It should not become
++ * a practical burden to them.
++ *
++ * XXX: For now, allow allocations to potentially
++ * exceed the per-zone dirty limit in the slowpath
++ * (ALLOC_WMARK_LOW unset) before going into reclaim,
++ * which is important when on a NUMA setup the allowed
++ * zones are together not big enough to reach the
++ * global limit. The proper fix for these situations
++ * will require awareness of zones in the
++ * dirty-throttling and the flusher threads.
++ */
++ if (consider_zone_dirty && !zone_dirty_ok(zone))
++ continue;
++
++ mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
++ if (!zone_watermark_ok(zone, order, mark,
++ ac->classzone_idx, alloc_flags)) {
++ int ret;
++
++ /* Checked here to keep the fast path fast */
++ BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
++ if (alloc_flags & ALLOC_NO_WATERMARKS)
++ goto try_this_zone;
++
++ if (IS_ENABLED(CONFIG_NUMA) &&
++ !did_zlc_setup && nr_online_nodes > 1) {
++ /*
++ * we do zlc_setup if there are multiple nodes
++ * and before considering the first zone allowed
++ * by the cpuset.
++ */
++ allowednodes = zlc_setup(zonelist, alloc_flags);
++ zlc_active = 1;
++ did_zlc_setup = 1;
++ }
++
++ if (zone_reclaim_mode == 0 ||
++ !zone_allows_reclaim(ac->preferred_zone, zone))
++ goto this_zone_full;
++
++ /*
++ * As we may have just activated ZLC, check if the first
++ * eligible zone has failed zone_reclaim recently.
++ */
++ if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
++ !zlc_zone_worth_trying(zonelist, z, allowednodes))
++ continue;
++
++ ret = zone_reclaim(zone, gfp_mask, order);
++ switch (ret) {
++ case ZONE_RECLAIM_NOSCAN:
++ /* did not scan */
++ continue;
++ case ZONE_RECLAIM_FULL:
++ /* scanned but unreclaimable */
++ continue;
++ default:
++ /* did we reclaim enough */
++ if (zone_watermark_ok(zone, order, mark,
++ ac->classzone_idx, alloc_flags))
++ goto try_this_zone;
++
++ /*
++ * Failed to reclaim enough to meet watermark.
++ * Only mark the zone full if checking the min
++ * watermark or if we failed to reclaim just
++ * 1<<order pages or else the page allocator
++ * fastpath will prematurely mark zones full
++ * when the watermark is between the low and
++ * min watermarks.
++ */
++ if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
++ ret == ZONE_RECLAIM_SOME)
++ goto this_zone_full;
++
++ continue;
++ }
++ }
++
++try_this_zone:
++ page = buffered_rmqueue(ac->preferred_zone, zone, order,
++ gfp_mask, ac->migratetype);
++ if (page) {
++ if (prep_new_page(page, order, gfp_mask, alloc_flags))
++ goto try_this_zone;
++ return page;
++ }
++this_zone_full:
++ if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
++ zlc_mark_zone_full(zonelist, z);
++ }
++
++ /*
++ * The first pass makes sure allocations are spread fairly within the
++ * local node. However, the local node might have free pages left
++ * after the fairness batches are exhausted, and remote zones haven't
++ * even been considered yet. Try once more without fairness, and
++ * include remote zones now, before entering the slowpath and waking
++ * kswapd: prefer spilling to a remote zone over swapping locally.
++ */
++ if (alloc_flags & ALLOC_FAIR) {
++ alloc_flags &= ~ALLOC_FAIR;
++ if (nr_fair_skipped) {
++ zonelist_rescan = true;
++ reset_alloc_batches(ac->preferred_zone);
++ }
++ if (nr_online_nodes > 1)
++ zonelist_rescan = true;
++ }
++
++ if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
++ /* Disable zlc cache for second zonelist scan */
++ zlc_active = 0;
++ zonelist_rescan = true;
++ }
++
++ if (zonelist_rescan)
++ goto zonelist_scan;
++
++ return NULL;
++}
++
++/*
++ * Large machines with many possible nodes should not always dump per-node
++ * meminfo in irq context.
++ */
++static inline bool should_suppress_show_mem(void)
++{
++ bool ret = false;
++
++#if NODES_SHIFT > 8
++ ret = in_interrupt();
++#endif
++ return ret;
++}
++
++static DEFINE_RATELIMIT_STATE(nopage_rs,
++ DEFAULT_RATELIMIT_INTERVAL,
++ DEFAULT_RATELIMIT_BURST);
++
++void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
++{
++ unsigned int filter = SHOW_MEM_FILTER_NODES;
++
++ if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
++ debug_guardpage_minorder() > 0)
++ return;
++
++ /*
++ * This documents exceptions given to allocations in certain
++ * contexts that are allowed to allocate outside current's set
++ * of allowed nodes.
++ */
++ if (!(gfp_mask & __GFP_NOMEMALLOC))
++ if (test_thread_flag(TIF_MEMDIE) ||
++ (current->flags & (PF_MEMALLOC | PF_EXITING)))
++ filter &= ~SHOW_MEM_FILTER_NODES;
++ if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
++ filter &= ~SHOW_MEM_FILTER_NODES;
++
++ if (fmt) {
++ struct va_format vaf;
++ va_list args;
++
++ va_start(args, fmt);
++
++ vaf.fmt = fmt;
++ vaf.va = &args;
++
++ pr_warn("%pV", &vaf);
++
++ va_end(args);
++ }
++
++ pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
++ current->comm, order, gfp_mask);
++
++ dump_stack();
++ if (!should_suppress_show_mem())
++ show_mem(filter);
++}
++
++static inline int
++should_alloc_retry(gfp_t gfp_mask, unsigned int order,
++ unsigned long did_some_progress,
++ unsigned long pages_reclaimed)
++{
++ /* Do not loop if specifically requested */
++ if (gfp_mask & __GFP_NORETRY)
++ return 0;
++
++ /* Always retry if specifically requested */
++ if (gfp_mask & __GFP_NOFAIL)
++ return 1;
++
++ /*
++ * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
++ * making forward progress without invoking OOM. Suspend also disables
++ * storage devices so kswapd will not help. Bail if we are suspending.
++ */
++ if (!did_some_progress && pm_suspended_storage())
++ return 0;
++
++ /*
++ * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
++ * means __GFP_NOFAIL, but that may not be true in other
++ * implementations.
++ */
++ if (order <= PAGE_ALLOC_COSTLY_ORDER)
++ return 1;
++
++ /*
++ * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
++ * specified, then we retry until we no longer reclaim any pages
++ * (above), or we've reclaimed an order of pages at least as
++ * large as the allocation's order. In both cases, if the
++ * allocation still fails, we stop retrying.
++ */
++ if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
++ return 1;
++
++ return 0;
++}
++
++static inline struct page *
++__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
++ const struct alloc_context *ac, unsigned long *did_some_progress)
++{
++ struct page *page;
++
++ *did_some_progress = 0;
++
++ /*
++ * Acquire the per-zone oom lock for each zone. If that
++ * fails, somebody else is making progress for us.
++ */
++ if (!oom_zonelist_trylock(ac->zonelist, gfp_mask)) {
++ *did_some_progress = 1;
++ schedule_timeout_uninterruptible(1);
++ return NULL;
++ }
++
++ /*
++ * Go through the zonelist yet one more time, keep very high watermark
++ * here, this is only to catch a parallel oom killing, we must fail if
++ * we're still under heavy pressure.
++ */
++ page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
++ ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
++ if (page)
++ goto out;
++
++ if (!(gfp_mask & __GFP_NOFAIL)) {
++ /* Coredumps can quickly deplete all memory reserves */
++ if (current->flags & PF_DUMPCORE)
++ goto out;
++ /* The OOM killer will not help higher order allocs */
++ if (order > PAGE_ALLOC_COSTLY_ORDER)
++ goto out;
++ /* The OOM killer does not needlessly kill tasks for lowmem */
++ if (ac->high_zoneidx < ZONE_NORMAL)
++ goto out;
++ /* The OOM killer does not compensate for light reclaim */
++ if (!(gfp_mask & __GFP_FS)) {
++ /*
++ * XXX: Page reclaim didn't yield anything,
++ * and the OOM killer can't be invoked, but
++ * keep looping as per should_alloc_retry().
++ */
++ *did_some_progress = 1;
++ goto out;
++ }
++ /* The OOM killer may not free memory on a specific node */
++ if (gfp_mask & __GFP_THISNODE)
++ goto out;
++ }
++ /* Exhausted what can be done so it's blamo time */
++ if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)
++ || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
++ *did_some_progress = 1;
++out:
++ oom_zonelist_unlock(ac->zonelist, gfp_mask);
++ return page;
++}
++
++#ifdef CONFIG_COMPACTION
++/* Try memory compaction for high-order allocations before reclaim */
++static struct page *
++__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
++ int alloc_flags, const struct alloc_context *ac,
++ enum migrate_mode mode, int *contended_compaction,
++ bool *deferred_compaction)
++{
++ unsigned long compact_result;
++ struct page *page;
++
++ if (!order)
++ return NULL;
++
++ current->flags |= PF_MEMALLOC;
++ compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
++ mode, contended_compaction);
++ current->flags &= ~PF_MEMALLOC;
++
++ switch (compact_result) {
++ case COMPACT_DEFERRED:
++ *deferred_compaction = true;
++ /* fall-through */
++ case COMPACT_SKIPPED:
++ return NULL;
++ default:
++ break;
++ }
++
++ /*
++ * At least in one zone compaction wasn't deferred or skipped, so let's
++ * count a compaction stall
++ */
++ count_vm_event(COMPACTSTALL);
++
++ page = get_page_from_freelist(gfp_mask, order,
++ alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
++
++ if (page) {
++ struct zone *zone = page_zone(page);
++
++ zone->compact_blockskip_flush = false;
++ compaction_defer_reset(zone, order, true);
++ count_vm_event(COMPACTSUCCESS);
++ return page;
++ }
++
++ /*
++ * It's bad if compaction run occurs and fails. The most likely reason
++ * is that pages exist, but not enough to satisfy watermarks.
++ */
++ count_vm_event(COMPACTFAIL);
++
++ cond_resched();
++
++ return NULL;
++}
++#else
++static inline struct page *
++__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
++ int alloc_flags, const struct alloc_context *ac,
++ enum migrate_mode mode, int *contended_compaction,
++ bool *deferred_compaction)
++{
++ return NULL;
++}
++#endif /* CONFIG_COMPACTION */
++
++/* Perform direct synchronous page reclaim */
++static int
++__perform_reclaim(gfp_t gfp_mask, unsigned int order,
++ const struct alloc_context *ac)
++{
++ struct reclaim_state reclaim_state;
++ int progress;
++
++ cond_resched();
++
++ /* We now go into synchronous reclaim */
++ cpuset_memory_pressure_bump();
++ current->flags |= PF_MEMALLOC;
++ lockdep_set_current_reclaim_state(gfp_mask);
++ reclaim_state.reclaimed_slab = 0;
++ current->reclaim_state = &reclaim_state;
++
++ progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
++ ac->nodemask);
++
++ current->reclaim_state = NULL;
++ lockdep_clear_current_reclaim_state();
++ current->flags &= ~PF_MEMALLOC;
++
++ cond_resched();
++
++ return progress;
++}
++
++/* The really slow allocator path where we enter direct reclaim */
++static inline struct page *
++__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
++ int alloc_flags, const struct alloc_context *ac,
++ unsigned long *did_some_progress)
++{
++ struct page *page = NULL;
++ bool drained = false;
++
++ *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
++ if (unlikely(!(*did_some_progress)))
++ return NULL;
++
++ /* After successful reclaim, reconsider all zones for allocation */
++ if (IS_ENABLED(CONFIG_NUMA))
++ zlc_clear_zones_full(ac->zonelist);
++
++retry:
++ page = get_page_from_freelist(gfp_mask, order,
++ alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
++
++ /*
++ * If an allocation failed after direct reclaim, it could be because
++ * pages are pinned on the per-cpu lists. Drain them and try again
++ */
++ if (!page && !drained) {
++ drain_all_pages(NULL);
++ drained = true;
++ goto retry;
++ }
++
++ return page;
++}
++
++/*
++ * This is called in the allocator slow-path if the allocation request is of
++ * sufficient urgency to ignore watermarks and take other desperate measures
++ */
++static inline struct page *
++__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
++ const struct alloc_context *ac)
++{
++ struct page *page;
++
++ do {
++ page = get_page_from_freelist(gfp_mask, order,
++ ALLOC_NO_WATERMARKS, ac);
++
++ if (!page && gfp_mask & __GFP_NOFAIL)
++ wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC,
++ HZ/50);
++ } while (!page && (gfp_mask & __GFP_NOFAIL));
++
++ return page;
++}
++
++static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
++{
++ struct zoneref *z;
++ struct zone *zone;
++
++ for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
++ ac->high_zoneidx, ac->nodemask)
++ wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
++}
++
++static inline int
++gfp_to_alloc_flags(gfp_t gfp_mask)
++{
++ int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
++ const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
++
++ /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
++ BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
++
++ /*
++ * The caller may dip into page reserves a bit more if the caller
++ * cannot run direct reclaim, or if the caller has realtime scheduling
++ * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
++ * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
++ */
++ alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
++
++ if (atomic) {
++ /*
++ * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
++ * if it can't schedule.
++ */
++ if (!(gfp_mask & __GFP_NOMEMALLOC))
++ alloc_flags |= ALLOC_HARDER;
++ /*
++ * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
++ * comment for __cpuset_node_allowed().
++ */
++ alloc_flags &= ~ALLOC_CPUSET;
++ } else if (unlikely(rt_task(current)) && !in_interrupt())
++ alloc_flags |= ALLOC_HARDER;
++
++ if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
++ if (gfp_mask & __GFP_MEMALLOC)
++ alloc_flags |= ALLOC_NO_WATERMARKS;
++ else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
++ alloc_flags |= ALLOC_NO_WATERMARKS;
++ else if (!in_interrupt() &&
++ ((current->flags & PF_MEMALLOC) ||
++ unlikely(test_thread_flag(TIF_MEMDIE))))
++ alloc_flags |= ALLOC_NO_WATERMARKS;
++ }
++#ifdef CONFIG_CMA
++ if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
++ alloc_flags |= ALLOC_CMA;
++#endif
++ return alloc_flags;
++}
++
++bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
++{
++ return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
++}
++
++static inline struct page *
++__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
++ struct alloc_context *ac)
++{
++ const gfp_t wait = gfp_mask & __GFP_WAIT;
++ struct page *page = NULL;
++ int alloc_flags;
++ unsigned long pages_reclaimed = 0;
++ unsigned long did_some_progress;
++ enum migrate_mode migration_mode = MIGRATE_ASYNC;
++ bool deferred_compaction = false;
++ int contended_compaction = COMPACT_CONTENDED_NONE;
++
++ /*
++ * In the slowpath, we sanity check order to avoid ever trying to
++ * reclaim >= MAX_ORDER areas which will never succeed. Callers may
++ * be using allocators in order of preference for an area that is
++ * too large.
++ */
++ if (order >= MAX_ORDER) {
++ WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
++ return NULL;
++ }
++
++ /*
++ * If this allocation cannot block and it is for a specific node, then
++ * fail early. There's no need to wakeup kswapd or retry for a
++ * speculative node-specific allocation.
++ */
++ if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !wait)
++ goto nopage;
++
++retry:
++ if (!(gfp_mask & __GFP_NO_KSWAPD))
++ wake_all_kswapds(order, ac);
++
++ /*
++ * OK, we're below the kswapd watermark and have kicked background
++ * reclaim. Now things get more complex, so set up alloc_flags according
++ * to how we want to proceed.
++ */
++ alloc_flags = gfp_to_alloc_flags(gfp_mask);
++
++ /*
++ * Find the true preferred zone if the allocation is unconstrained by
++ * cpusets.
++ */
++ if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
++ struct zoneref *preferred_zoneref;
++ preferred_zoneref = first_zones_zonelist(ac->zonelist,
++ ac->high_zoneidx, NULL, &ac->preferred_zone);
++ ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
++ }
++
++ /* This is the last chance, in general, before the goto nopage. */
++ page = get_page_from_freelist(gfp_mask, order,
++ alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
++ if (page)
++ goto got_pg;
++
++ /* Allocate without watermarks if the context allows */
++ if (alloc_flags & ALLOC_NO_WATERMARKS) {
++ /*
++ * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
++ * the allocation is high priority and these type of
++ * allocations are system rather than user orientated
++ */
++ ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
++
++ page = __alloc_pages_high_priority(gfp_mask, order, ac);
++
++ if (page) {
++ goto got_pg;
++ }
++ }
++
++ /* Atomic allocations - we can't balance anything */
++ if (!wait) {
++ /*
++ * All existing users of the deprecated __GFP_NOFAIL are
++ * blockable, so warn of any new users that actually allow this
++ * type of allocation to fail.
++ */
++ WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
++ goto nopage;
++ }
++
++ /* Avoid recursion of direct reclaim */
++ if (current->flags & PF_MEMALLOC)
++ goto nopage;
++
++ /* Avoid allocations with no watermarks from looping endlessly */
++ if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
++ goto nopage;
++
++ /*
++ * Try direct compaction. The first pass is asynchronous. Subsequent
++ * attempts after direct reclaim are synchronous
++ */
++ page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
++ migration_mode,
++ &contended_compaction,
++ &deferred_compaction);
++ if (page)
++ goto got_pg;
++
++ /* Checks for THP-specific high-order allocations */
++ if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) {
++ /*
++ * If compaction is deferred for high-order allocations, it is
++ * because sync compaction recently failed. If this is the case
++ * and the caller requested a THP allocation, we do not want
++ * to heavily disrupt the system, so we fail the allocation
++ * instead of entering direct reclaim.
++ */
++ if (deferred_compaction)
++ goto nopage;
++
++ /*
++ * In all zones where compaction was attempted (and not
++ * deferred or skipped), lock contention has been detected.
++ * For THP allocation we do not want to disrupt the others
++ * so we fallback to base pages instead.
++ */
++ if (contended_compaction == COMPACT_CONTENDED_LOCK)
++ goto nopage;
++
++ /*
++ * If compaction was aborted due to need_resched(), we do not
++ * want to further increase allocation latency, unless it is
++ * khugepaged trying to collapse.
++ */
++ if (contended_compaction == COMPACT_CONTENDED_SCHED
++ && !(current->flags & PF_KTHREAD))
++ goto nopage;
++ }
++
++ /*
++ * It can become very expensive to allocate transparent hugepages at
++ * fault, so use asynchronous memory compaction for THP unless it is
++ * khugepaged trying to collapse.
++ */
++ if ((gfp_mask & GFP_TRANSHUGE) != GFP_TRANSHUGE ||
++ (current->flags & PF_KTHREAD))
++ migration_mode = MIGRATE_SYNC_LIGHT;
++
++ /* Try direct reclaim and then allocating */
++ page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
++ &did_some_progress);
++ if (page)
++ goto got_pg;
++
++ /* Check if we should retry the allocation */
++ pages_reclaimed += did_some_progress;
++ if (should_alloc_retry(gfp_mask, order, did_some_progress,
++ pages_reclaimed)) {
++ /*
++ * If we fail to make progress by freeing individual
++ * pages, but the allocation wants us to keep going,
++ * start OOM killing tasks.
++ */
++ if (!did_some_progress) {
++ page = __alloc_pages_may_oom(gfp_mask, order, ac,
++ &did_some_progress);
++ if (page)
++ goto got_pg;
++ if (!did_some_progress)
++ goto nopage;
++ }
++ /* Wait for some write requests to complete then retry */
++ wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
++ goto retry;
++ } else {
++ /*
++ * High-order allocations do not necessarily loop after
++ * direct reclaim and reclaim/compaction depends on compaction
++ * being called after reclaim so call directly if necessary
++ */
++ page = __alloc_pages_direct_compact(gfp_mask, order,
++ alloc_flags, ac, migration_mode,
++ &contended_compaction,
++ &deferred_compaction);
++ if (page)
++ goto got_pg;
++ }
++
++nopage:
++ warn_alloc_failed(gfp_mask, order, NULL);
++got_pg:
++ return page;
++}
++
++/*
++ * This is the 'heart' of the zoned buddy allocator.
++ */
++struct page *
++__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
++ struct zonelist *zonelist, nodemask_t *nodemask)
++{
++ struct zoneref *preferred_zoneref;
++ struct page *page = NULL;
++ unsigned int cpuset_mems_cookie;
++ int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
++ gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
++ struct alloc_context ac = {
++ .high_zoneidx = gfp_zone(gfp_mask),
++ .nodemask = nodemask,
++ .migratetype = gfpflags_to_migratetype(gfp_mask),
++ };
++
++ gfp_mask &= gfp_allowed_mask;
++
++ lockdep_trace_alloc(gfp_mask);
++
++ might_sleep_if(gfp_mask & __GFP_WAIT);
++
++ if (should_fail_alloc_page(gfp_mask, order))
++ return NULL;
++
++ /*
++ * Check the zones suitable for the gfp_mask contain at least one
++ * valid zone. It's possible to have an empty zonelist as a result
++ * of __GFP_THISNODE and a memoryless node
++ */
++ if (unlikely(!zonelist->_zonerefs->zone))
++ return NULL;
++
++ if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
++ alloc_flags |= ALLOC_CMA;
++
++retry_cpuset:
++ cpuset_mems_cookie = read_mems_allowed_begin();
++
++ /* We set it here, as __alloc_pages_slowpath might have changed it */
++ ac.zonelist = zonelist;
++ /* The preferred zone is used for statistics later */
++ preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
++ ac.nodemask ? : &cpuset_current_mems_allowed,
++ &ac.preferred_zone);
++ if (!ac.preferred_zone)
++ goto out;
++ ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
++
++ /* First allocation attempt */
++ alloc_mask = gfp_mask|__GFP_HARDWALL;
++ page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
++ if (unlikely(!page)) {
++ /*
++ * Runtime PM, block IO and its error handling path
++ * can deadlock because I/O on the device might not
++ * complete.
++ */
++ alloc_mask = memalloc_noio_flags(gfp_mask);
++
++ page = __alloc_pages_slowpath(alloc_mask, order, &ac);
++ }
++
++ if (kmemcheck_enabled && page)
++ kmemcheck_pagealloc_alloc(page, order, gfp_mask);
++
++ trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
++
++out:
++ /*
++ * When updating a task's mems_allowed, it is possible to race with
++ * parallel threads in such a way that an allocation can fail while
++ * the mask is being updated. If a page allocation is about to fail,
++ * check if the cpuset changed during allocation and if so, retry.
++ */
++ if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
++ goto retry_cpuset;
++
++ return page;
++}
++EXPORT_SYMBOL(__alloc_pages_nodemask);
++
++/*
++ * Common helper functions.
++ */
++unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
++{
++ struct page *page;
++
++ /*
++ * __get_free_pages() returns a 32-bit address, which cannot represent
++ * a highmem page
++ */
++ VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
++
++ page = alloc_pages(gfp_mask, order);
++ if (!page)
++ return 0;
++ return (unsigned long) page_address(page);
++}
++EXPORT_SYMBOL(__get_free_pages);
++
++unsigned long get_zeroed_page(gfp_t gfp_mask)
++{
++ return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
++}
++EXPORT_SYMBOL(get_zeroed_page);
++
++void __free_pages(struct page *page, unsigned int order)
++{
++ if (put_page_testzero(page)) {
++ if (order == 0)
++ free_hot_cold_page(page, false);
++ else
++ __free_pages_ok(page, order);
++ }
++}
++
++EXPORT_SYMBOL(__free_pages);
++
++void free_pages(unsigned long addr, unsigned int order)
++{
++ if (addr != 0) {
++ VM_BUG_ON(!virt_addr_valid((void *)addr));
++ __free_pages(virt_to_page((void *)addr), order);
++ }
++}
++
++EXPORT_SYMBOL(free_pages);
++
++/*
++ * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
++ * of the current memory cgroup.
++ *
++ * It should be used when the caller would like to use kmalloc, but since the
++ * allocation is large, it has to fall back to the page allocator.
++ */
++struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
++{
++ struct page *page;
++ struct mem_cgroup *memcg = NULL;
++
++ if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
++ return NULL;
++ page = alloc_pages(gfp_mask, order);
++ memcg_kmem_commit_charge(page, memcg, order);
++ return page;
++}
++
++struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
++{
++ struct page *page;
++ struct mem_cgroup *memcg = NULL;
++
++ if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
++ return NULL;
++ page = alloc_pages_node(nid, gfp_mask, order);
++ memcg_kmem_commit_charge(page, memcg, order);
++ return page;
++}
++
++/*
++ * __free_kmem_pages and free_kmem_pages will free pages allocated with
++ * alloc_kmem_pages.
++ */
++void __free_kmem_pages(struct page *page, unsigned int order)
++{
++ memcg_kmem_uncharge_pages(page, order);
++ __free_pages(page, order);
++}
++
++void free_kmem_pages(unsigned long addr, unsigned int order)
++{
++ if (addr != 0) {
++ VM_BUG_ON(!virt_addr_valid((void *)addr));
++ __free_kmem_pages(virt_to_page((void *)addr), order);
++ }
++}
++
++static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
++{
++ if (addr) {
++ unsigned long alloc_end = addr + (PAGE_SIZE << order);
++ unsigned long used = addr + PAGE_ALIGN(size);
++
++ split_page(virt_to_page((void *)addr), order);
++ while (used < alloc_end) {
++ free_page(used);
++ used += PAGE_SIZE;
++ }
++ }
++ return (void *)addr;
++}
++
++/**
++ * alloc_pages_exact - allocate an exact number physically-contiguous pages.
++ * @size: the number of bytes to allocate
++ * @gfp_mask: GFP flags for the allocation
++ *
++ * This function is similar to alloc_pages(), except that it allocates the
++ * minimum number of pages to satisfy the request. alloc_pages() can only
++ * allocate memory in power-of-two pages.
++ *
++ * This function is also limited by MAX_ORDER.
++ *
++ * Memory allocated by this function must be released by free_pages_exact().
++ */
++void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
++{
++ unsigned int order = get_order(size);
++ unsigned long addr;
++
++ addr = __get_free_pages(gfp_mask, order);
++ return make_alloc_exact(addr, order, size);
++}
++EXPORT_SYMBOL(alloc_pages_exact);
++
++/**
++ * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
++ * pages on a node.
++ * @nid: the preferred node ID where memory should be allocated
++ * @size: the number of bytes to allocate
++ * @gfp_mask: GFP flags for the allocation
++ *
++ * Like alloc_pages_exact(), but try to allocate on node nid first before falling
++ * back.
++ * Note this is not alloc_pages_exact_node() which allocates on a specific node,
++ * but is not exact.
++ */
++void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
++{
++ unsigned order = get_order(size);
++ struct page *p = alloc_pages_node(nid, gfp_mask, order);
++ if (!p)
++ return NULL;
++ return make_alloc_exact((unsigned long)page_address(p), order, size);
++}
++
++/**
++ * free_pages_exact - release memory allocated via alloc_pages_exact()
++ * @virt: the value returned by alloc_pages_exact.
++ * @size: size of allocation, same value as passed to alloc_pages_exact().
++ *
++ * Release the memory allocated by a previous call to alloc_pages_exact.
++ */
++void free_pages_exact(void *virt, size_t size)
++{
++ unsigned long addr = (unsigned long)virt;
++ unsigned long end = addr + PAGE_ALIGN(size);
++
++ while (addr < end) {
++ free_page(addr);
++ addr += PAGE_SIZE;
++ }
++}
++EXPORT_SYMBOL(free_pages_exact);
++
++/**
++ * nr_free_zone_pages - count number of pages beyond high watermark
++ * @offset: The zone index of the highest zone
++ *
++ * nr_free_zone_pages() counts the number of counts pages which are beyond the
++ * high watermark within all zones at or below a given zone index. For each
++ * zone, the number of pages is calculated as:
++ * managed_pages - high_pages
++ */
++static unsigned long nr_free_zone_pages(int offset)
++{
++ struct zoneref *z;
++ struct zone *zone;
++
++ /* Just pick one node, since fallback list is circular */
++ unsigned long sum = 0;
++
++ struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
++
++ for_each_zone_zonelist(zone, z, zonelist, offset) {
++ unsigned long size = zone->managed_pages;
++ unsigned long high = high_wmark_pages(zone);
++ if (size > high)
++ sum += size - high;
++ }
++
++ return sum;
++}
++
++/**
++ * nr_free_buffer_pages - count number of pages beyond high watermark
++ *
++ * nr_free_buffer_pages() counts the number of pages which are beyond the high
++ * watermark within ZONE_DMA and ZONE_NORMAL.
++ */
++unsigned long nr_free_buffer_pages(void)
++{
++ return nr_free_zone_pages(gfp_zone(GFP_USER));
++}
++EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
++
++/**
++ * nr_free_pagecache_pages - count number of pages beyond high watermark
++ *
++ * nr_free_pagecache_pages() counts the number of pages which are beyond the
++ * high watermark within all zones.
++ */
++unsigned long nr_free_pagecache_pages(void)
++{
++ return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
++}
++
++static inline void show_node(struct zone *zone)
++{
++ if (IS_ENABLED(CONFIG_NUMA))
++ printk("Node %d ", zone_to_nid(zone));
++}
++
++void si_meminfo(struct sysinfo *val)
++{
++ val->totalram = totalram_pages;
++ val->sharedram = global_page_state(NR_SHMEM);
++ val->freeram = global_page_state(NR_FREE_PAGES);
++ val->bufferram = nr_blockdev_pages();
++ val->totalhigh = totalhigh_pages;
++ val->freehigh = nr_free_highpages();
++ val->mem_unit = PAGE_SIZE;
++}
++
++EXPORT_SYMBOL(si_meminfo);
++
++#ifdef CONFIG_NUMA
++void si_meminfo_node(struct sysinfo *val, int nid)
++{
++ int zone_type; /* needs to be signed */
++ unsigned long managed_pages = 0;
++ pg_data_t *pgdat = NODE_DATA(nid);
++
++ for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
++ managed_pages += pgdat->node_zones[zone_type].managed_pages;
++ val->totalram = managed_pages;
++ val->sharedram = node_page_state(nid, NR_SHMEM);
++ val->freeram = node_page_state(nid, NR_FREE_PAGES);
++#ifdef CONFIG_HIGHMEM
++ val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
++ val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
++ NR_FREE_PAGES);
++#else
++ val->totalhigh = 0;
++ val->freehigh = 0;
++#endif
++ val->mem_unit = PAGE_SIZE;
++}
++#endif
++
++/*
++ * Determine whether the node should be displayed or not, depending on whether
++ * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
++ */
++bool skip_free_areas_node(unsigned int flags, int nid)
++{
++ bool ret = false;
++ unsigned int cpuset_mems_cookie;
++
++ if (!(flags & SHOW_MEM_FILTER_NODES))
++ goto out;
++
++ do {
++ cpuset_mems_cookie = read_mems_allowed_begin();
++ ret = !node_isset(nid, cpuset_current_mems_allowed);
++ } while (read_mems_allowed_retry(cpuset_mems_cookie));
++out:
++ return ret;
++}
++
++#define K(x) ((x) << (PAGE_SHIFT-10))
++
++static void show_migration_types(unsigned char type)
++{
++ static const char types[MIGRATE_TYPES] = {
++ [MIGRATE_UNMOVABLE] = 'U',
++ [MIGRATE_RECLAIMABLE] = 'E',
++ [MIGRATE_MOVABLE] = 'M',
++ [MIGRATE_RESERVE] = 'R',
++#ifdef CONFIG_CMA
++ [MIGRATE_CMA] = 'C',
++#endif
++#ifdef CONFIG_MEMORY_ISOLATION
++ [MIGRATE_ISOLATE] = 'I',
++#endif
++ };
++ char tmp[MIGRATE_TYPES + 1];
++ char *p = tmp;
++ int i;
++
++ for (i = 0; i < MIGRATE_TYPES; i++) {
++ if (type & (1 << i))
++ *p++ = types[i];
++ }
++
++ *p = '\0';
++ printk("(%s) ", tmp);
++}
++
++/*
++ * Show free area list (used inside shift_scroll-lock stuff)
++ * We also calculate the percentage fragmentation. We do this by counting the
++ * memory on each free list with the exception of the first item on the list.
++ *
++ * Bits in @filter:
++ * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
++ * cpuset.
++ */
++void show_free_areas(unsigned int filter)
++{
++ unsigned long free_pcp = 0;
++ int cpu;
++ struct zone *zone;
++
++ for_each_populated_zone(zone) {
++ if (skip_free_areas_node(filter, zone_to_nid(zone)))
++ continue;
++
++ for_each_online_cpu(cpu)
++ free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
++ }
++
++ printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
++ " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
++ " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
++ " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
++ " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
++ " free:%lu free_pcp:%lu free_cma:%lu\n",
++ global_page_state(NR_ACTIVE_ANON),
++ global_page_state(NR_INACTIVE_ANON),
++ global_page_state(NR_ISOLATED_ANON),
++ global_page_state(NR_ACTIVE_FILE),
++ global_page_state(NR_INACTIVE_FILE),
++ global_page_state(NR_ISOLATED_FILE),
++ global_page_state(NR_UNEVICTABLE),
++ global_page_state(NR_FILE_DIRTY),
++ global_page_state(NR_WRITEBACK),
++ global_page_state(NR_UNSTABLE_NFS),
++ global_page_state(NR_SLAB_RECLAIMABLE),
++ global_page_state(NR_SLAB_UNRECLAIMABLE),
++ global_page_state(NR_FILE_MAPPED),
++ global_page_state(NR_SHMEM),
++ global_page_state(NR_PAGETABLE),
++ global_page_state(NR_BOUNCE),
++ global_page_state(NR_FREE_PAGES),
++ free_pcp,
++ global_page_state(NR_FREE_CMA_PAGES));
++
++ for_each_populated_zone(zone) {
++ int i;
++
++ if (skip_free_areas_node(filter, zone_to_nid(zone)))
++ continue;
++
++ free_pcp = 0;
++ for_each_online_cpu(cpu)
++ free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
++
++ show_node(zone);
++ printk("%s"
++ " free:%lukB"
++ " min:%lukB"
++ " low:%lukB"
++ " high:%lukB"
++ " active_anon:%lukB"
++ " inactive_anon:%lukB"
++ " active_file:%lukB"
++ " inactive_file:%lukB"
++ " unevictable:%lukB"
++ " isolated(anon):%lukB"
++ " isolated(file):%lukB"
++ " present:%lukB"
++ " managed:%lukB"
++ " mlocked:%lukB"
++ " dirty:%lukB"
++ " writeback:%lukB"
++ " mapped:%lukB"
++ " shmem:%lukB"
++ " slab_reclaimable:%lukB"
++ " slab_unreclaimable:%lukB"
++ " kernel_stack:%lukB"
++ " pagetables:%lukB"
++ " unstable:%lukB"
++ " bounce:%lukB"
++ " free_pcp:%lukB"
++ " local_pcp:%ukB"
++ " free_cma:%lukB"
++ " writeback_tmp:%lukB"
++ " pages_scanned:%lu"
++ " all_unreclaimable? %s"
++ "\n",
++ zone->name,
++ K(zone_page_state(zone, NR_FREE_PAGES)),
++ K(min_wmark_pages(zone)),
++ K(low_wmark_pages(zone)),
++ K(high_wmark_pages(zone)),
++ K(zone_page_state(zone, NR_ACTIVE_ANON)),
++ K(zone_page_state(zone, NR_INACTIVE_ANON)),
++ K(zone_page_state(zone, NR_ACTIVE_FILE)),
++ K(zone_page_state(zone, NR_INACTIVE_FILE)),
++ K(zone_page_state(zone, NR_UNEVICTABLE)),
++ K(zone_page_state(zone, NR_ISOLATED_ANON)),
++ K(zone_page_state(zone, NR_ISOLATED_FILE)),
++ K(zone->present_pages),
++ K(zone->managed_pages),
++ K(zone_page_state(zone, NR_MLOCK)),
++ K(zone_page_state(zone, NR_FILE_DIRTY)),
++ K(zone_page_state(zone, NR_WRITEBACK)),
++ K(zone_page_state(zone, NR_FILE_MAPPED)),
++ K(zone_page_state(zone, NR_SHMEM)),
++ K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
++ K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
++ zone_page_state(zone, NR_KERNEL_STACK) *
++ THREAD_SIZE / 1024,
++ K(zone_page_state(zone, NR_PAGETABLE)),
++ K(zone_page_state(zone, NR_UNSTABLE_NFS)),
++ K(zone_page_state(zone, NR_BOUNCE)),
++ K(free_pcp),
++ K(this_cpu_read(zone->pageset->pcp.count)),
++ K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
++ K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
++ K(zone_page_state(zone, NR_PAGES_SCANNED)),
++ (!zone_reclaimable(zone) ? "yes" : "no")
++ );
++ printk("lowmem_reserve[]:");
++ for (i = 0; i < MAX_NR_ZONES; i++)
++ printk(" %ld", zone->lowmem_reserve[i]);
++ printk("\n");
++ }
++
++ for_each_populated_zone(zone) {
++ unsigned long nr[MAX_ORDER], flags, order, total = 0;
++ unsigned char types[MAX_ORDER];
++
++ if (skip_free_areas_node(filter, zone_to_nid(zone)))
++ continue;
++ show_node(zone);
++ printk("%s: ", zone->name);
++
++ spin_lock_irqsave(&zone->lock, flags);
++ for (order = 0; order < MAX_ORDER; order++) {
++ struct free_area *area = &zone->free_area[order];
++ int type;
++
++ nr[order] = area->nr_free;
++ total += nr[order] << order;
++
++ types[order] = 0;
++ for (type = 0; type < MIGRATE_TYPES; type++) {
++ if (!list_empty(&area->free_list[type]))
++ types[order] |= 1 << type;
++ }
++ }
++ spin_unlock_irqrestore(&zone->lock, flags);
++ for (order = 0; order < MAX_ORDER; order++) {
++ printk("%lu*%lukB ", nr[order], K(1UL) << order);
++ if (nr[order])
++ show_migration_types(types[order]);
++ }
++ printk("= %lukB\n", K(total));
++ }
++
++ hugetlb_show_meminfo();
++
++ printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
++
++ show_swap_cache_info();
++}
++
++static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
++{
++ zoneref->zone = zone;
++ zoneref->zone_idx = zone_idx(zone);
++}
++
++/*
++ * Builds allocation fallback zone lists.
++ *
++ * Add all populated zones of a node to the zonelist.
++ */
++static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
++ int nr_zones)
++{
++ struct zone *zone;
++ enum zone_type zone_type = MAX_NR_ZONES;
++
++ do {
++ zone_type--;
++ zone = pgdat->node_zones + zone_type;
++ if (populated_zone(zone)) {
++ zoneref_set_zone(zone,
++ &zonelist->_zonerefs[nr_zones++]);
++ check_highest_zone(zone_type);
++ }
++ } while (zone_type);
++
++ return nr_zones;
++}
++
++
++/*
++ * zonelist_order:
++ * 0 = automatic detection of better ordering.
++ * 1 = order by ([node] distance, -zonetype)
++ * 2 = order by (-zonetype, [node] distance)
++ *
++ * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
++ * the same zonelist. So only NUMA can configure this param.
++ */
++#define ZONELIST_ORDER_DEFAULT 0
++#define ZONELIST_ORDER_NODE 1
++#define ZONELIST_ORDER_ZONE 2
++
++/* zonelist order in the kernel.
++ * set_zonelist_order() will set this to NODE or ZONE.
++ */
++static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
++static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
++
++
++#ifdef CONFIG_NUMA
++/* The value user specified ....changed by config */
++static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
++/* string for sysctl */
++#define NUMA_ZONELIST_ORDER_LEN 16
++char numa_zonelist_order[16] = "default";
++
++/*
++ * interface for configure zonelist ordering.
++ * command line option "numa_zonelist_order"
++ * = "[dD]efault - default, automatic configuration.
++ * = "[nN]ode - order by node locality, then by zone within node
++ * = "[zZ]one - order by zone, then by locality within zone
++ */
++
++static int __parse_numa_zonelist_order(char *s)
++{
++ if (*s == 'd' || *s == 'D') {
++ user_zonelist_order = ZONELIST_ORDER_DEFAULT;
++ } else if (*s == 'n' || *s == 'N') {
++ user_zonelist_order = ZONELIST_ORDER_NODE;
++ } else if (*s == 'z' || *s == 'Z') {
++ user_zonelist_order = ZONELIST_ORDER_ZONE;
++ } else {
++ printk(KERN_WARNING
++ "Ignoring invalid numa_zonelist_order value: "
++ "%s\n", s);
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static __init int setup_numa_zonelist_order(char *s)
++{
++ int ret;
++
++ if (!s)
++ return 0;
++
++ ret = __parse_numa_zonelist_order(s);
++ if (ret == 0)
++ strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
++
++ return ret;
++}
++early_param("numa_zonelist_order", setup_numa_zonelist_order);
++
++/*
++ * sysctl handler for numa_zonelist_order
++ */
++int numa_zonelist_order_handler(struct ctl_table *table, int write,
++ void __user *buffer, size_t *length,
++ loff_t *ppos)
++{
++ char saved_string[NUMA_ZONELIST_ORDER_LEN];
++ int ret;
++ static DEFINE_MUTEX(zl_order_mutex);
++
++ mutex_lock(&zl_order_mutex);
++ if (write) {
++ if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
++ ret = -EINVAL;
++ goto out;
++ }
++ strcpy(saved_string, (char *)table->data);
++ }
++ ret = proc_dostring(table, write, buffer, length, ppos);
++ if (ret)
++ goto out;
++ if (write) {
++ int oldval = user_zonelist_order;
++
++ ret = __parse_numa_zonelist_order((char *)table->data);
++ if (ret) {
++ /*
++ * bogus value. restore saved string
++ */
++ strncpy((char *)table->data, saved_string,
++ NUMA_ZONELIST_ORDER_LEN);
++ user_zonelist_order = oldval;
++ } else if (oldval != user_zonelist_order) {
++ mutex_lock(&zonelists_mutex);
++ build_all_zonelists(NULL, NULL);
++ mutex_unlock(&zonelists_mutex);
++ }
++ }
++out:
++ mutex_unlock(&zl_order_mutex);
++ return ret;
++}
++
++
++#define MAX_NODE_LOAD (nr_online_nodes)
++static int node_load[MAX_NUMNODES];
++
++/**
++ * find_next_best_node - find the next node that should appear in a given node's fallback list
++ * @node: node whose fallback list we're appending
++ * @used_node_mask: nodemask_t of already used nodes
++ *
++ * We use a number of factors to determine which is the next node that should
++ * appear on a given node's fallback list. The node should not have appeared
++ * already in @node's fallback list, and it should be the next closest node
++ * according to the distance array (which contains arbitrary distance values
++ * from each node to each node in the system), and should also prefer nodes
++ * with no CPUs, since presumably they'll have very little allocation pressure
++ * on them otherwise.
++ * It returns -1 if no node is found.
++ */
++static int find_next_best_node(int node, nodemask_t *used_node_mask)
++{
++ int n, val;
++ int min_val = INT_MAX;
++ int best_node = NUMA_NO_NODE;
++ const struct cpumask *tmp = cpumask_of_node(0);
++
++ /* Use the local node if we haven't already */
++ if (!node_isset(node, *used_node_mask)) {
++ node_set(node, *used_node_mask);
++ return node;
++ }
++
++ for_each_node_state(n, N_MEMORY) {
++
++ /* Don't want a node to appear more than once */
++ if (node_isset(n, *used_node_mask))
++ continue;
++
++ /* Use the distance array to find the distance */
++ val = node_distance(node, n);
++
++ /* Penalize nodes under us ("prefer the next node") */
++ val += (n < node);
++
++ /* Give preference to headless and unused nodes */
++ tmp = cpumask_of_node(n);
++ if (!cpumask_empty(tmp))
++ val += PENALTY_FOR_NODE_WITH_CPUS;
++
++ /* Slight preference for less loaded node */
++ val *= (MAX_NODE_LOAD*MAX_NUMNODES);
++ val += node_load[n];
++
++ if (val < min_val) {
++ min_val = val;
++ best_node = n;
++ }
++ }
++
++ if (best_node >= 0)
++ node_set(best_node, *used_node_mask);
++
++ return best_node;
++}
++
++
++/*
++ * Build zonelists ordered by node and zones within node.
++ * This results in maximum locality--normal zone overflows into local
++ * DMA zone, if any--but risks exhausting DMA zone.
++ */
++static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
++{
++ int j;
++ struct zonelist *zonelist;
++
++ zonelist = &pgdat->node_zonelists[0];
++ for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
++ ;
++ j = build_zonelists_node(NODE_DATA(node), zonelist, j);
++ zonelist->_zonerefs[j].zone = NULL;
++ zonelist->_zonerefs[j].zone_idx = 0;
++}
++
++/*
++ * Build gfp_thisnode zonelists
++ */
++static void build_thisnode_zonelists(pg_data_t *pgdat)
++{
++ int j;
++ struct zonelist *zonelist;
++
++ zonelist = &pgdat->node_zonelists[1];
++ j = build_zonelists_node(pgdat, zonelist, 0);
++ zonelist->_zonerefs[j].zone = NULL;
++ zonelist->_zonerefs[j].zone_idx = 0;
++}
++
++/*
++ * Build zonelists ordered by zone and nodes within zones.
++ * This results in conserving DMA zone[s] until all Normal memory is
++ * exhausted, but results in overflowing to remote node while memory
++ * may still exist in local DMA zone.
++ */
++static int node_order[MAX_NUMNODES];
++
++static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
++{
++ int pos, j, node;
++ int zone_type; /* needs to be signed */
++ struct zone *z;
++ struct zonelist *zonelist;
++
++ zonelist = &pgdat->node_zonelists[0];
++ pos = 0;
++ for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
++ for (j = 0; j < nr_nodes; j++) {
++ node = node_order[j];
++ z = &NODE_DATA(node)->node_zones[zone_type];
++ if (populated_zone(z)) {
++ zoneref_set_zone(z,
++ &zonelist->_zonerefs[pos++]);
++ check_highest_zone(zone_type);
++ }
++ }
++ }
++ zonelist->_zonerefs[pos].zone = NULL;
++ zonelist->_zonerefs[pos].zone_idx = 0;
++}
++
++#if defined(CONFIG_64BIT)
++/*
++ * Devices that require DMA32/DMA are relatively rare and do not justify a
++ * penalty to every machine in case the specialised case applies. Default
++ * to Node-ordering on 64-bit NUMA machines
++ */
++static int default_zonelist_order(void)
++{
++ return ZONELIST_ORDER_NODE;
++}
++#else
++/*
++ * On 32-bit, the Normal zone needs to be preserved for allocations accessible
++ * by the kernel. If processes running on node 0 deplete the low memory zone
++ * then reclaim will occur more frequency increasing stalls and potentially
++ * be easier to OOM if a large percentage of the zone is under writeback or
++ * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
++ * Hence, default to zone ordering on 32-bit.
++ */
++static int default_zonelist_order(void)
++{
++ return ZONELIST_ORDER_ZONE;
++}
++#endif /* CONFIG_64BIT */
++
++static void set_zonelist_order(void)
++{
++ if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
++ current_zonelist_order = default_zonelist_order();
++ else
++ current_zonelist_order = user_zonelist_order;
++}
++
++static void build_zonelists(pg_data_t *pgdat)
++{
++ int j, node, load;
++ enum zone_type i;
++ nodemask_t used_mask;
++ int local_node, prev_node;
++ struct zonelist *zonelist;
++ int order = current_zonelist_order;
++
++ /* initialize zonelists */
++ for (i = 0; i < MAX_ZONELISTS; i++) {
++ zonelist = pgdat->node_zonelists + i;
++ zonelist->_zonerefs[0].zone = NULL;
++ zonelist->_zonerefs[0].zone_idx = 0;
++ }
++
++ /* NUMA-aware ordering of nodes */
++ local_node = pgdat->node_id;
++ load = nr_online_nodes;
++ prev_node = local_node;
++ nodes_clear(used_mask);
++
++ memset(node_order, 0, sizeof(node_order));
++ j = 0;
++
++ while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
++ /*
++ * We don't want to pressure a particular node.
++ * So adding penalty to the first node in same
++ * distance group to make it round-robin.
++ */
++ if (node_distance(local_node, node) !=
++ node_distance(local_node, prev_node))
++ node_load[node] = load;
++
++ prev_node = node;
++ load--;
++ if (order == ZONELIST_ORDER_NODE)
++ build_zonelists_in_node_order(pgdat, node);
++ else
++ node_order[j++] = node; /* remember order */
++ }
++
++ if (order == ZONELIST_ORDER_ZONE) {
++ /* calculate node order -- i.e., DMA last! */
++ build_zonelists_in_zone_order(pgdat, j);
++ }
++
++ build_thisnode_zonelists(pgdat);
++}
++
++/* Construct the zonelist performance cache - see further mmzone.h */
++static void build_zonelist_cache(pg_data_t *pgdat)
++{
++ struct zonelist *zonelist;
++ struct zonelist_cache *zlc;
++ struct zoneref *z;
++
++ zonelist = &pgdat->node_zonelists[0];
++ zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
++ bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
++ for (z = zonelist->_zonerefs; z->zone; z++)
++ zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
++}
++
++#ifdef CONFIG_HAVE_MEMORYLESS_NODES
++/*
++ * Return node id of node used for "local" allocations.
++ * I.e., first node id of first zone in arg node's generic zonelist.
++ * Used for initializing percpu 'numa_mem', which is used primarily
++ * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
++ */
++int local_memory_node(int node)
++{
++ struct zone *zone;
++
++ (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
++ gfp_zone(GFP_KERNEL),
++ NULL,
++ &zone);
++ return zone->node;
++}
++#endif
++
++#else /* CONFIG_NUMA */
++
++static void set_zonelist_order(void)
++{
++ current_zonelist_order = ZONELIST_ORDER_ZONE;
++}
++
++static void build_zonelists(pg_data_t *pgdat)
++{
++ int node, local_node;
++ enum zone_type j;
++ struct zonelist *zonelist;
++
++ local_node = pgdat->node_id;
++
++ zonelist = &pgdat->node_zonelists[0];
++ j = build_zonelists_node(pgdat, zonelist, 0);
++
++ /*
++ * Now we build the zonelist so that it contains the zones
++ * of all the other nodes.
++ * We don't want to pressure a particular node, so when
++ * building the zones for node N, we make sure that the
++ * zones coming right after the local ones are those from
++ * node N+1 (modulo N)
++ */
++ for (node = local_node + 1; node < MAX_NUMNODES; node++) {
++ if (!node_online(node))
++ continue;
++ j = build_zonelists_node(NODE_DATA(node), zonelist, j);
++ }
++ for (node = 0; node < local_node; node++) {
++ if (!node_online(node))
++ continue;
++ j = build_zonelists_node(NODE_DATA(node), zonelist, j);
++ }
++
++ zonelist->_zonerefs[j].zone = NULL;
++ zonelist->_zonerefs[j].zone_idx = 0;
++}
++
++/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
++static void build_zonelist_cache(pg_data_t *pgdat)
++{
++ pgdat->node_zonelists[0].zlcache_ptr = NULL;
++}
++
++#endif /* CONFIG_NUMA */
++
++/*
++ * Boot pageset table. One per cpu which is going to be used for all
++ * zones and all nodes. The parameters will be set in such a way
++ * that an item put on a list will immediately be handed over to
++ * the buddy list. This is safe since pageset manipulation is done
++ * with interrupts disabled.
++ *
++ * The boot_pagesets must be kept even after bootup is complete for
++ * unused processors and/or zones. They do play a role for bootstrapping
++ * hotplugged processors.
++ *
++ * zoneinfo_show() and maybe other functions do
++ * not check if the processor is online before following the pageset pointer.
++ * Other parts of the kernel may not check if the zone is available.
++ */
++static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
++static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
++static void setup_zone_pageset(struct zone *zone);
++
++/*
++ * Global mutex to protect against size modification of zonelists
++ * as well as to serialize pageset setup for the new populated zone.
++ */
++DEFINE_MUTEX(zonelists_mutex);
++
++/* return values int ....just for stop_machine() */
++static int __build_all_zonelists(void *data)
++{
++ int nid;
++ int cpu;
++ pg_data_t *self = data;
++
++#ifdef CONFIG_NUMA
++ memset(node_load, 0, sizeof(node_load));
++#endif
++
++ if (self && !node_online(self->node_id)) {
++ build_zonelists(self);
++ build_zonelist_cache(self);
++ }
++
++ for_each_online_node(nid) {
++ pg_data_t *pgdat = NODE_DATA(nid);
++
++ build_zonelists(pgdat);
++ build_zonelist_cache(pgdat);
++ }
++
++ /*
++ * Initialize the boot_pagesets that are going to be used
++ * for bootstrapping processors. The real pagesets for
++ * each zone will be allocated later when the per cpu
++ * allocator is available.
++ *
++ * boot_pagesets are used also for bootstrapping offline
++ * cpus if the system is already booted because the pagesets
++ * are needed to initialize allocators on a specific cpu too.
++ * F.e. the percpu allocator needs the page allocator which
++ * needs the percpu allocator in order to allocate its pagesets
++ * (a chicken-egg dilemma).
++ */
++ for_each_possible_cpu(cpu) {
++ setup_pageset(&per_cpu(boot_pageset, cpu), 0);
++
++#ifdef CONFIG_HAVE_MEMORYLESS_NODES
++ /*
++ * We now know the "local memory node" for each node--
++ * i.e., the node of the first zone in the generic zonelist.
++ * Set up numa_mem percpu variable for on-line cpus. During
++ * boot, only the boot cpu should be on-line; we'll init the
++ * secondary cpus' numa_mem as they come on-line. During
++ * node/memory hotplug, we'll fixup all on-line cpus.
++ */
++ if (cpu_online(cpu))
++ set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
++#endif
++ }
++
++ return 0;
++}
++
++static noinline void __init
++build_all_zonelists_init(void)
++{
++ __build_all_zonelists(NULL);
++ mminit_verify_zonelist();
++ cpuset_init_current_mems_allowed();
++}
++
++/*
++ * Called with zonelists_mutex held always
++ * unless system_state == SYSTEM_BOOTING.
++ *
++ * __ref due to (1) call of __meminit annotated setup_zone_pageset
++ * [we're only called with non-NULL zone through __meminit paths] and
++ * (2) call of __init annotated helper build_all_zonelists_init
++ * [protected by SYSTEM_BOOTING].
++ */
++void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
++{
++ set_zonelist_order();
++
++ if (system_state == SYSTEM_BOOTING) {
++ build_all_zonelists_init();
++ } else {
++#ifdef CONFIG_MEMORY_HOTPLUG
++ if (zone)
++ setup_zone_pageset(zone);
++#endif
++ /* we have to stop all cpus to guarantee there is no user
++ of zonelist */
++ stop_machine(__build_all_zonelists, pgdat, NULL);
++ /* cpuset refresh routine should be here */
++ }
++ vm_total_pages = nr_free_pagecache_pages();
++ /*
++ * Disable grouping by mobility if the number of pages in the
++ * system is too low to allow the mechanism to work. It would be
++ * more accurate, but expensive to check per-zone. This check is
++ * made on memory-hotadd so a system can start with mobility
++ * disabled and enable it later
++ */
++ if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
++ page_group_by_mobility_disabled = 1;
++ else
++ page_group_by_mobility_disabled = 0;
++
++ pr_info("Built %i zonelists in %s order, mobility grouping %s. "
++ "Total pages: %ld\n",
++ nr_online_nodes,
++ zonelist_order_name[current_zonelist_order],
++ page_group_by_mobility_disabled ? "off" : "on",
++ vm_total_pages);
++#ifdef CONFIG_NUMA
++ pr_info("Policy zone: %s\n", zone_names[policy_zone]);
++#endif
++}
++
++/*
++ * Helper functions to size the waitqueue hash table.
++ * Essentially these want to choose hash table sizes sufficiently
++ * large so that collisions trying to wait on pages are rare.
++ * But in fact, the number of active page waitqueues on typical
++ * systems is ridiculously low, less than 200. So this is even
++ * conservative, even though it seems large.
++ *
++ * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
++ * waitqueues, i.e. the size of the waitq table given the number of pages.
++ */
++#define PAGES_PER_WAITQUEUE 256
++
++#ifndef CONFIG_MEMORY_HOTPLUG
++static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
++{
++ unsigned long size = 1;
++
++ pages /= PAGES_PER_WAITQUEUE;
++
++ while (size < pages)
++ size <<= 1;
++
++ /*
++ * Once we have dozens or even hundreds of threads sleeping
++ * on IO we've got bigger problems than wait queue collision.
++ * Limit the size of the wait table to a reasonable size.
++ */
++ size = min(size, 4096UL);
++
++ return max(size, 4UL);
++}
++#else
++/*
++ * A zone's size might be changed by hot-add, so it is not possible to determine
++ * a suitable size for its wait_table. So we use the maximum size now.
++ *
++ * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
++ *
++ * i386 (preemption config) : 4096 x 16 = 64Kbyte.
++ * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
++ * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
++ *
++ * The maximum entries are prepared when a zone's memory is (512K + 256) pages
++ * or more by the traditional way. (See above). It equals:
++ *
++ * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
++ * ia64(16K page size) : = ( 8G + 4M)byte.
++ * powerpc (64K page size) : = (32G +16M)byte.
++ */
++static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
++{
++ return 4096UL;
++}
++#endif
++
++/*
++ * This is an integer logarithm so that shifts can be used later
++ * to extract the more random high bits from the multiplicative
++ * hash function before the remainder is taken.
++ */
++static inline unsigned long wait_table_bits(unsigned long size)
++{
++ return ffz(~size);
++}
++
++/*
++ * Check if a pageblock contains reserved pages
++ */
++static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
++{
++ unsigned long pfn;
++
++ for (pfn = start_pfn; pfn < end_pfn; pfn++) {
++ if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
++ return 1;
++ }
++ return 0;
++}
++
++/*
++ * Mark a number of pageblocks as MIGRATE_RESERVE. The number
++ * of blocks reserved is based on min_wmark_pages(zone). The memory within
++ * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
++ * higher will lead to a bigger reserve which will get freed as contiguous
++ * blocks as reclaim kicks in
++ */
++static void setup_zone_migrate_reserve(struct zone *zone)
++{
++ unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
++ struct page *page;
++ unsigned long block_migratetype;
++ int reserve;
++ int old_reserve;
++
++ /*
++ * Get the start pfn, end pfn and the number of blocks to reserve
++ * We have to be careful to be aligned to pageblock_nr_pages to
++ * make sure that we always check pfn_valid for the first page in
++ * the block.
++ */
++ start_pfn = zone->zone_start_pfn;
++ end_pfn = zone_end_pfn(zone);
++ start_pfn = roundup(start_pfn, pageblock_nr_pages);
++ reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
++ pageblock_order;
++
++ /*
++ * Reserve blocks are generally in place to help high-order atomic
++ * allocations that are short-lived. A min_free_kbytes value that
++ * would result in more than 2 reserve blocks for atomic allocations
++ * is assumed to be in place to help anti-fragmentation for the
++ * future allocation of hugepages at runtime.
++ */
++ reserve = min(2, reserve);
++ old_reserve = zone->nr_migrate_reserve_block;
++
++ /* When memory hot-add, we almost always need to do nothing */
++ if (reserve == old_reserve)
++ return;
++ zone->nr_migrate_reserve_block = reserve;
++
++ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
++ if (!pfn_valid(pfn))
++ continue;
++ page = pfn_to_page(pfn);
++
++ /* Watch out for overlapping nodes */
++ if (page_to_nid(page) != zone_to_nid(zone))
++ continue;
++
++ block_migratetype = get_pageblock_migratetype(page);
++
++ /* Only test what is necessary when the reserves are not met */
++ if (reserve > 0) {
++ /*
++ * Blocks with reserved pages will never free, skip
++ * them.
++ */
++ block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
++ if (pageblock_is_reserved(pfn, block_end_pfn))
++ continue;
++
++ /* If this block is reserved, account for it */
++ if (block_migratetype == MIGRATE_RESERVE) {
++ reserve--;
++ continue;
++ }
++
++ /* Suitable for reserving if this block is movable */
++ if (block_migratetype == MIGRATE_MOVABLE) {
++ set_pageblock_migratetype(page,
++ MIGRATE_RESERVE);
++ move_freepages_block(zone, page,
++ MIGRATE_RESERVE);
++ reserve--;
++ continue;
++ }
++ } else if (!old_reserve) {
++ /*
++ * At boot time we don't need to scan the whole zone
++ * for turning off MIGRATE_RESERVE.
++ */
++ break;
++ }
++
++ /*
++ * If the reserve is met and this is a previous reserved block,
++ * take it back
++ */
++ if (block_migratetype == MIGRATE_RESERVE) {
++ set_pageblock_migratetype(page, MIGRATE_MOVABLE);
++ move_freepages_block(zone, page, MIGRATE_MOVABLE);
++ }
++ }
++}
++
++/*
++ * Initially all pages are reserved - free ones are freed
++ * up by free_all_bootmem() once the early boot process is
++ * done. Non-atomic initialization, single-pass.
++ */
++void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
++ unsigned long start_pfn, enum memmap_context context)
++{
++ struct page *page;
++ unsigned long end_pfn = start_pfn + size;
++ unsigned long pfn;
++ struct zone *z;
++
++ if (highest_memmap_pfn < end_pfn - 1)
++ highest_memmap_pfn = end_pfn - 1;
++
++ z = &NODE_DATA(nid)->node_zones[zone];
++ for (pfn = start_pfn; pfn < end_pfn; pfn++) {
++ /*
++ * There can be holes in boot-time mem_map[]s
++ * handed to this function. They do not
++ * exist on hotplugged memory.
++ */
++ if (context == MEMMAP_EARLY) {
++ if (!early_pfn_valid(pfn))
++ continue;
++ if (!early_pfn_in_nid(pfn, nid))
++ continue;
++ }
++ page = pfn_to_page(pfn);
++ set_page_links(page, zone, nid, pfn);
++ mminit_verify_page_links(page, zone, nid, pfn);
++ init_page_count(page);
++ page_mapcount_reset(page);
++ page_cpupid_reset_last(page);
++ SetPageReserved(page);
++ /*
++ * Mark the block movable so that blocks are reserved for
++ * movable at startup. This will force kernel allocations
++ * to reserve their blocks rather than leaking throughout
++ * the address space during boot when many long-lived
++ * kernel allocations are made. Later some blocks near
++ * the start are marked MIGRATE_RESERVE by
++ * setup_zone_migrate_reserve()
++ *
++ * bitmap is created for zone's valid pfn range. but memmap
++ * can be created for invalid pages (for alignment)
++ * check here not to call set_pageblock_migratetype() against
++ * pfn out of zone.
++ */
++ if ((z->zone_start_pfn <= pfn)
++ && (pfn < zone_end_pfn(z))
++ && !(pfn & (pageblock_nr_pages - 1)))
++ set_pageblock_migratetype(page, MIGRATE_MOVABLE);
++
++ INIT_LIST_HEAD(&page->lru);
++#ifdef WANT_PAGE_VIRTUAL
++ /* The shift won't overflow because ZONE_NORMAL is below 4G. */
++ if (!is_highmem_idx(zone))
++ set_page_address(page, __va(pfn << PAGE_SHIFT));
++#endif
++ }
++}
++
++static void __meminit zone_init_free_lists(struct zone *zone)
++{
++ unsigned int order, t;
++ for_each_migratetype_order(order, t) {
++ INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
++ zone->free_area[order].nr_free = 0;
++ }
++}
++
++#ifndef __HAVE_ARCH_MEMMAP_INIT
++#define memmap_init(size, nid, zone, start_pfn) \
++ memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
++#endif
++
++static int zone_batchsize(struct zone *zone)
++{
++#ifdef CONFIG_MMU
++ int batch;
++
++ /*
++ * The per-cpu-pages pools are set to around 1000th of the
++ * size of the zone. But no more than 1/2 of a meg.
++ *
++ * OK, so we don't know how big the cache is. So guess.
++ */
++ batch = zone->managed_pages / 1024;
++ if (batch * PAGE_SIZE > 512 * 1024)
++ batch = (512 * 1024) / PAGE_SIZE;
++ batch /= 4; /* We effectively *= 4 below */
++ if (batch < 1)
++ batch = 1;
++
++ /*
++ * Clamp the batch to a 2^n - 1 value. Having a power
++ * of 2 value was found to be more likely to have
++ * suboptimal cache aliasing properties in some cases.
++ *
++ * For example if 2 tasks are alternately allocating
++ * batches of pages, one task can end up with a lot
++ * of pages of one half of the possible page colors
++ * and the other with pages of the other colors.
++ */
++ batch = rounddown_pow_of_two(batch + batch/2) - 1;
++
++ return batch;
++
++#else
++ /* The deferral and batching of frees should be suppressed under NOMMU
++ * conditions.
++ *
++ * The problem is that NOMMU needs to be able to allocate large chunks
++ * of contiguous memory as there's no hardware page translation to
++ * assemble apparent contiguous memory from discontiguous pages.
++ *
++ * Queueing large contiguous runs of pages for batching, however,
++ * causes the pages to actually be freed in smaller chunks. As there
++ * can be a significant delay between the individual batches being
++ * recycled, this leads to the once large chunks of space being
++ * fragmented and becoming unavailable for high-order allocations.
++ */
++ return 0;
++#endif
++}
++
++/*
++ * pcp->high and pcp->batch values are related and dependent on one another:
++ * ->batch must never be higher then ->high.
++ * The following function updates them in a safe manner without read side
++ * locking.
++ *
++ * Any new users of pcp->batch and pcp->high should ensure they can cope with
++ * those fields changing asynchronously (acording the the above rule).
++ *
++ * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
++ * outside of boot time (or some other assurance that no concurrent updaters
++ * exist).
++ */
++static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
++ unsigned long batch)
++{
++ /* start with a fail safe value for batch */
++ pcp->batch = 1;
++ smp_wmb();
++
++ /* Update high, then batch, in order */
++ pcp->high = high;
++ smp_wmb();
++
++ pcp->batch = batch;
++}
++
++/* a companion to pageset_set_high() */
++static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
++{
++ pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
++}
++
++static void pageset_init(struct per_cpu_pageset *p)
++{
++ struct per_cpu_pages *pcp;
++ int migratetype;
++
++ memset(p, 0, sizeof(*p));
++
++ pcp = &p->pcp;
++ pcp->count = 0;
++ for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
++ INIT_LIST_HEAD(&pcp->lists[migratetype]);
++}
++
++static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
++{
++ pageset_init(p);
++ pageset_set_batch(p, batch);
++}
++
++/*
++ * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
++ * to the value high for the pageset p.
++ */
++static void pageset_set_high(struct per_cpu_pageset *p,
++ unsigned long high)
++{
++ unsigned long batch = max(1UL, high / 4);
++ if ((high / 4) > (PAGE_SHIFT * 8))
++ batch = PAGE_SHIFT * 8;
++
++ pageset_update(&p->pcp, high, batch);
++}
++
++static void pageset_set_high_and_batch(struct zone *zone,
++ struct per_cpu_pageset *pcp)
++{
++ if (percpu_pagelist_fraction)
++ pageset_set_high(pcp,
++ (zone->managed_pages /
++ percpu_pagelist_fraction));
++ else
++ pageset_set_batch(pcp, zone_batchsize(zone));
++}
++
++static void __meminit zone_pageset_init(struct zone *zone, int cpu)
++{
++ struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
++
++ pageset_init(pcp);
++ pageset_set_high_and_batch(zone, pcp);
++}
++
++static void __meminit setup_zone_pageset(struct zone *zone)
++{
++ int cpu;
++ zone->pageset = alloc_percpu(struct per_cpu_pageset);
++ for_each_possible_cpu(cpu)
++ zone_pageset_init(zone, cpu);
++}
++
++/*
++ * Allocate per cpu pagesets and initialize them.
++ * Before this call only boot pagesets were available.
++ */
++void __init setup_per_cpu_pageset(void)
++{
++ struct zone *zone;
++
++ for_each_populated_zone(zone)
++ setup_zone_pageset(zone);
++}
++
++static noinline __init_refok
++int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
++{
++ int i;
++ size_t alloc_size;
++
++ /*
++ * The per-page waitqueue mechanism uses hashed waitqueues
++ * per zone.
++ */
++ zone->wait_table_hash_nr_entries =
++ wait_table_hash_nr_entries(zone_size_pages);
++ zone->wait_table_bits =
++ wait_table_bits(zone->wait_table_hash_nr_entries);
++ alloc_size = zone->wait_table_hash_nr_entries
++ * sizeof(wait_queue_head_t);
++
++ if (!slab_is_available()) {
++ zone->wait_table = (wait_queue_head_t *)
++ memblock_virt_alloc_node_nopanic(
++ alloc_size, zone->zone_pgdat->node_id);
++ } else {
++ /*
++ * This case means that a zone whose size was 0 gets new memory
++ * via memory hot-add.
++ * But it may be the case that a new node was hot-added. In
++ * this case vmalloc() will not be able to use this new node's
++ * memory - this wait_table must be initialized to use this new
++ * node itself as well.
++ * To use this new node's memory, further consideration will be
++ * necessary.
++ */
++ zone->wait_table = vmalloc(alloc_size);
++ }
++ if (!zone->wait_table)
++ return -ENOMEM;
++
++ for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
++ init_waitqueue_head(zone->wait_table + i);
++
++ return 0;
++}
++
++static __meminit void zone_pcp_init(struct zone *zone)
++{
++ /*
++ * per cpu subsystem is not up at this point. The following code
++ * relies on the ability of the linker to provide the
++ * offset of a (static) per cpu variable into the per cpu area.
++ */
++ zone->pageset = &boot_pageset;
++
++ if (populated_zone(zone))
++ printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
++ zone->name, zone->present_pages,
++ zone_batchsize(zone));
++}
++
++int __meminit init_currently_empty_zone(struct zone *zone,
++ unsigned long zone_start_pfn,
++ unsigned long size,
++ enum memmap_context context)
++{
++ struct pglist_data *pgdat = zone->zone_pgdat;
++ int ret;
++ ret = zone_wait_table_init(zone, size);
++ if (ret)
++ return ret;
++ pgdat->nr_zones = zone_idx(zone) + 1;
++
++ zone->zone_start_pfn = zone_start_pfn;
++
++ mminit_dprintk(MMINIT_TRACE, "memmap_init",
++ "Initialising map node %d zone %lu pfns %lu -> %lu\n",
++ pgdat->node_id,
++ (unsigned long)zone_idx(zone),
++ zone_start_pfn, (zone_start_pfn + size));
++
++ zone_init_free_lists(zone);
++
++ return 0;
++}
++
++#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
++#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
++/*
++ * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
++ */
++int __meminit __early_pfn_to_nid(unsigned long pfn)
++{
++ unsigned long start_pfn, end_pfn;
++ int nid;
++ /*
++ * NOTE: The following SMP-unsafe globals are only used early in boot
++ * when the kernel is running single-threaded.
++ */
++ static unsigned long __meminitdata last_start_pfn, last_end_pfn;
++ static int __meminitdata last_nid;
++
++ if (last_start_pfn <= pfn && pfn < last_end_pfn)
++ return last_nid;
++
++ nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
++ if (nid != -1) {
++ last_start_pfn = start_pfn;
++ last_end_pfn = end_pfn;
++ last_nid = nid;
++ }
++
++ return nid;
++}
++#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
++
++int __meminit early_pfn_to_nid(unsigned long pfn)
++{
++ int nid;
++
++ nid = __early_pfn_to_nid(pfn);
++ if (nid >= 0)
++ return nid;
++ /* just returns 0 */
++ return 0;
++}
++
++#ifdef CONFIG_NODES_SPAN_OTHER_NODES
++bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
++{
++ int nid;
++
++ nid = __early_pfn_to_nid(pfn);
++ if (nid >= 0 && nid != node)
++ return false;
++ return true;
++}
++#endif
++
++/**
++ * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
++ * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
++ * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
++ *
++ * If an architecture guarantees that all ranges registered contain no holes
++ * and may be freed, this this function may be used instead of calling
++ * memblock_free_early_nid() manually.
++ */
++void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
++{
++ unsigned long start_pfn, end_pfn;
++ int i, this_nid;
++
++ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
++ start_pfn = min(start_pfn, max_low_pfn);
++ end_pfn = min(end_pfn, max_low_pfn);
++
++ if (start_pfn < end_pfn)
++ memblock_free_early_nid(PFN_PHYS(start_pfn),
++ (end_pfn - start_pfn) << PAGE_SHIFT,
++ this_nid);
++ }
++}
++
++/**
++ * sparse_memory_present_with_active_regions - Call memory_present for each active range
++ * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
++ *
++ * If an architecture guarantees that all ranges registered contain no holes and may
++ * be freed, this function may be used instead of calling memory_present() manually.
++ */
++void __init sparse_memory_present_with_active_regions(int nid)
++{
++ unsigned long start_pfn, end_pfn;
++ int i, this_nid;
++
++ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
++ memory_present(this_nid, start_pfn, end_pfn);
++}
++
++/**
++ * get_pfn_range_for_nid - Return the start and end page frames for a node
++ * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
++ * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
++ * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
++ *
++ * It returns the start and end page frame of a node based on information
++ * provided by memblock_set_node(). If called for a node
++ * with no available memory, a warning is printed and the start and end
++ * PFNs will be 0.
++ */
++void __meminit get_pfn_range_for_nid(unsigned int nid,
++ unsigned long *start_pfn, unsigned long *end_pfn)
++{
++ unsigned long this_start_pfn, this_end_pfn;
++ int i;
++
++ *start_pfn = -1UL;
++ *end_pfn = 0;
++
++ for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
++ *start_pfn = min(*start_pfn, this_start_pfn);
++ *end_pfn = max(*end_pfn, this_end_pfn);
++ }
++
++ if (*start_pfn == -1UL)
++ *start_pfn = 0;
++}
++
++/*
++ * This finds a zone that can be used for ZONE_MOVABLE pages. The
++ * assumption is made that zones within a node are ordered in monotonic
++ * increasing memory addresses so that the "highest" populated zone is used
++ */
++static void __init find_usable_zone_for_movable(void)
++{
++ int zone_index;
++ for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
++ if (zone_index == ZONE_MOVABLE)
++ continue;
++
++ if (arch_zone_highest_possible_pfn[zone_index] >
++ arch_zone_lowest_possible_pfn[zone_index])
++ break;
++ }
++
++ VM_BUG_ON(zone_index == -1);
++ movable_zone = zone_index;
++}
++
++/*
++ * The zone ranges provided by the architecture do not include ZONE_MOVABLE
++ * because it is sized independent of architecture. Unlike the other zones,
++ * the starting point for ZONE_MOVABLE is not fixed. It may be different
++ * in each node depending on the size of each node and how evenly kernelcore
++ * is distributed. This helper function adjusts the zone ranges
++ * provided by the architecture for a given node by using the end of the
++ * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
++ * zones within a node are in order of monotonic increases memory addresses
++ */
++static void __meminit adjust_zone_range_for_zone_movable(int nid,
++ unsigned long zone_type,
++ unsigned long node_start_pfn,
++ unsigned long node_end_pfn,
++ unsigned long *zone_start_pfn,
++ unsigned long *zone_end_pfn)
++{
++ /* Only adjust if ZONE_MOVABLE is on this node */
++ if (zone_movable_pfn[nid]) {
++ /* Size ZONE_MOVABLE */
++ if (zone_type == ZONE_MOVABLE) {
++ *zone_start_pfn = zone_movable_pfn[nid];
++ *zone_end_pfn = min(node_end_pfn,
++ arch_zone_highest_possible_pfn[movable_zone]);
++
++ /* Adjust for ZONE_MOVABLE starting within this range */
++ } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
++ *zone_end_pfn > zone_movable_pfn[nid]) {
++ *zone_end_pfn = zone_movable_pfn[nid];
++
++ /* Check if this whole range is within ZONE_MOVABLE */
++ } else if (*zone_start_pfn >= zone_movable_pfn[nid])
++ *zone_start_pfn = *zone_end_pfn;
++ }
++}
++
++/*
++ * Return the number of pages a zone spans in a node, including holes
++ * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
++ */
++static unsigned long __meminit zone_spanned_pages_in_node(int nid,
++ unsigned long zone_type,
++ unsigned long node_start_pfn,
++ unsigned long node_end_pfn,
++ unsigned long *ignored)
++{
++ unsigned long zone_start_pfn, zone_end_pfn;
++
++ /* Get the start and end of the zone */
++ zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
++ zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
++ adjust_zone_range_for_zone_movable(nid, zone_type,
++ node_start_pfn, node_end_pfn,
++ &zone_start_pfn, &zone_end_pfn);
++
++ /* Check that this node has pages within the zone's required range */
++ if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
++ return 0;
++
++ /* Move the zone boundaries inside the node if necessary */
++ zone_end_pfn = min(zone_end_pfn, node_end_pfn);
++ zone_start_pfn = max(zone_start_pfn, node_start_pfn);
++
++ /* Return the spanned pages */
++ return zone_end_pfn - zone_start_pfn;
++}
++
++/*
++ * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
++ * then all holes in the requested range will be accounted for.
++ */
++unsigned long __meminit __absent_pages_in_range(int nid,
++ unsigned long range_start_pfn,
++ unsigned long range_end_pfn)
++{
++ unsigned long nr_absent = range_end_pfn - range_start_pfn;
++ unsigned long start_pfn, end_pfn;
++ int i;
++
++ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
++ start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
++ end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
++ nr_absent -= end_pfn - start_pfn;
++ }
++ return nr_absent;
++}
++
++/**
++ * absent_pages_in_range - Return number of page frames in holes within a range
++ * @start_pfn: The start PFN to start searching for holes
++ * @end_pfn: The end PFN to stop searching for holes
++ *
++ * It returns the number of pages frames in memory holes within a range.
++ */
++unsigned long __init absent_pages_in_range(unsigned long start_pfn,
++ unsigned long end_pfn)
++{
++ return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
++}
++
++/* Return the number of page frames in holes in a zone on a node */
++static unsigned long __meminit zone_absent_pages_in_node(int nid,
++ unsigned long zone_type,
++ unsigned long node_start_pfn,
++ unsigned long node_end_pfn,
++ unsigned long *ignored)
++{
++ unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
++ unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
++ unsigned long zone_start_pfn, zone_end_pfn;
++
++ zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
++ zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
++
++ adjust_zone_range_for_zone_movable(nid, zone_type,
++ node_start_pfn, node_end_pfn,
++ &zone_start_pfn, &zone_end_pfn);
++ return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
++}
++
++#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
++static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
++ unsigned long zone_type,
++ unsigned long node_start_pfn,
++ unsigned long node_end_pfn,
++ unsigned long *zones_size)
++{
++ return zones_size[zone_type];
++}
++
++static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
++ unsigned long zone_type,
++ unsigned long node_start_pfn,
++ unsigned long node_end_pfn,
++ unsigned long *zholes_size)
++{
++ if (!zholes_size)
++ return 0;
++
++ return zholes_size[zone_type];
++}
++
++#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
++
++static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
++ unsigned long node_start_pfn,
++ unsigned long node_end_pfn,
++ unsigned long *zones_size,
++ unsigned long *zholes_size)
++{
++ unsigned long realtotalpages, totalpages = 0;
++ enum zone_type i;
++
++ for (i = 0; i < MAX_NR_ZONES; i++)
++ totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
++ node_start_pfn,
++ node_end_pfn,
++ zones_size);
++ pgdat->node_spanned_pages = totalpages;
++
++ realtotalpages = totalpages;
++ for (i = 0; i < MAX_NR_ZONES; i++)
++ realtotalpages -=
++ zone_absent_pages_in_node(pgdat->node_id, i,
++ node_start_pfn, node_end_pfn,
++ zholes_size);
++ pgdat->node_present_pages = realtotalpages;
++ printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
++ realtotalpages);
++}
++
++#ifndef CONFIG_SPARSEMEM
++/*
++ * Calculate the size of the zone->blockflags rounded to an unsigned long
++ * Start by making sure zonesize is a multiple of pageblock_order by rounding
++ * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
++ * round what is now in bits to nearest long in bits, then return it in
++ * bytes.
++ */
++static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
++{
++ unsigned long usemapsize;
++
++ zonesize += zone_start_pfn & (pageblock_nr_pages-1);
++ usemapsize = roundup(zonesize, pageblock_nr_pages);
++ usemapsize = usemapsize >> pageblock_order;
++ usemapsize *= NR_PAGEBLOCK_BITS;
++ usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
++
++ return usemapsize / 8;
++}
++
++static void __init setup_usemap(struct pglist_data *pgdat,
++ struct zone *zone,
++ unsigned long zone_start_pfn,
++ unsigned long zonesize)
++{
++ unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
++ zone->pageblock_flags = NULL;
++ if (usemapsize)
++ zone->pageblock_flags =
++ memblock_virt_alloc_node_nopanic(usemapsize,
++ pgdat->node_id);
++}
++#else
++static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
++ unsigned long zone_start_pfn, unsigned long zonesize) {}
++#endif /* CONFIG_SPARSEMEM */
++
++#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
++
++/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
++void __paginginit set_pageblock_order(void)
++{
++ unsigned int order;
++
++ /* Check that pageblock_nr_pages has not already been setup */
++ if (pageblock_order)
++ return;
++
++ if (HPAGE_SHIFT > PAGE_SHIFT)
++ order = HUGETLB_PAGE_ORDER;
++ else
++ order = MAX_ORDER - 1;
++
++ /*
++ * Assume the largest contiguous order of interest is a huge page.
++ * This value may be variable depending on boot parameters on IA64 and
++ * powerpc.
++ */
++ pageblock_order = order;
++}
++#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
++
++/*
++ * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
++ * is unused as pageblock_order is set at compile-time. See
++ * include/linux/pageblock-flags.h for the values of pageblock_order based on
++ * the kernel config
++ */
++void __paginginit set_pageblock_order(void)
++{
++}
++
++#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
++
++static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
++ unsigned long present_pages)
++{
++ unsigned long pages = spanned_pages;
++
++ /*
++ * Provide a more accurate estimation if there are holes within
++ * the zone and SPARSEMEM is in use. If there are holes within the
++ * zone, each populated memory region may cost us one or two extra
++ * memmap pages due to alignment because memmap pages for each
++ * populated regions may not naturally algined on page boundary.
++ * So the (present_pages >> 4) heuristic is a tradeoff for that.
++ */
++ if (spanned_pages > present_pages + (present_pages >> 4) &&
++ IS_ENABLED(CONFIG_SPARSEMEM))
++ pages = present_pages;
++
++ return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
++}
++
++/*
++ * Set up the zone data structures:
++ * - mark all pages reserved
++ * - mark all memory queues empty
++ * - clear the memory bitmaps
++ *
++ * NOTE: pgdat should get zeroed by caller.
++ */
++static void __paginginit free_area_init_core(struct pglist_data *pgdat,
++ unsigned long node_start_pfn, unsigned long node_end_pfn,
++ unsigned long *zones_size, unsigned long *zholes_size)
++{
++ enum zone_type j;
++ int nid = pgdat->node_id;
++ unsigned long zone_start_pfn = pgdat->node_start_pfn;
++ int ret;
++
++ pgdat_resize_init(pgdat);
++#ifdef CONFIG_NUMA_BALANCING
++ spin_lock_init(&pgdat->numabalancing_migrate_lock);
++ pgdat->numabalancing_migrate_nr_pages = 0;
++ pgdat->numabalancing_migrate_next_window = jiffies;
++#endif
++ init_waitqueue_head(&pgdat->kswapd_wait);
++ init_waitqueue_head(&pgdat->pfmemalloc_wait);
++ pgdat_page_ext_init(pgdat);
++
++ for (j = 0; j < MAX_NR_ZONES; j++) {
++ struct zone *zone = pgdat->node_zones + j;
++ unsigned long size, realsize, freesize, memmap_pages;
++
++ size = zone_spanned_pages_in_node(nid, j, node_start_pfn,
++ node_end_pfn, zones_size);
++ realsize = freesize = size - zone_absent_pages_in_node(nid, j,
++ node_start_pfn,
++ node_end_pfn,
++ zholes_size);
++
++ /*
++ * Adjust freesize so that it accounts for how much memory
++ * is used by this zone for memmap. This affects the watermark
++ * and per-cpu initialisations
++ */
++ memmap_pages = calc_memmap_size(size, realsize);
++ if (!is_highmem_idx(j)) {
++ if (freesize >= memmap_pages) {
++ freesize -= memmap_pages;
++ if (memmap_pages)
++ printk(KERN_DEBUG
++ " %s zone: %lu pages used for memmap\n",
++ zone_names[j], memmap_pages);
++ } else
++ printk(KERN_WARNING
++ " %s zone: %lu pages exceeds freesize %lu\n",
++ zone_names[j], memmap_pages, freesize);
++ }
++
++ /* Account for reserved pages */
++ if (j == 0 && freesize > dma_reserve) {
++ freesize -= dma_reserve;
++ printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
++ zone_names[0], dma_reserve);
++ }
++
++ if (!is_highmem_idx(j))
++ nr_kernel_pages += freesize;
++ /* Charge for highmem memmap if there are enough kernel pages */
++ else if (nr_kernel_pages > memmap_pages * 2)
++ nr_kernel_pages -= memmap_pages;
++ nr_all_pages += freesize;
++
++ zone->spanned_pages = size;
++ zone->present_pages = realsize;
++ /*
++ * Set an approximate value for lowmem here, it will be adjusted
++ * when the bootmem allocator frees pages into the buddy system.
++ * And all highmem pages will be managed by the buddy system.
++ */
++ zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
++#ifdef CONFIG_NUMA
++ zone->node = nid;
++ zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
++ / 100;
++ zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
++#endif
++ zone->name = zone_names[j];
++ spin_lock_init(&zone->lock);
++ spin_lock_init(&zone->lru_lock);
++ zone_seqlock_init(zone);
++ zone->zone_pgdat = pgdat;
++ zone_pcp_init(zone);
++
++ /* For bootup, initialized properly in watermark setup */
++ mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
++
++ lruvec_init(&zone->lruvec);
++ if (!size)
++ continue;
++
++ set_pageblock_order();
++ setup_usemap(pgdat, zone, zone_start_pfn, size);
++ ret = init_currently_empty_zone(zone, zone_start_pfn,
++ size, MEMMAP_EARLY);
++ BUG_ON(ret);
++ memmap_init(size, nid, j, zone_start_pfn);
++ zone_start_pfn += size;
++ }
++}
++
++static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
++{
++ /* Skip empty nodes */
++ if (!pgdat->node_spanned_pages)
++ return;
++
++#ifdef CONFIG_FLAT_NODE_MEM_MAP
++ /* ia64 gets its own node_mem_map, before this, without bootmem */
++ if (!pgdat->node_mem_map) {
++ unsigned long size, start, end;
++ struct page *map;
++
++ /*
++ * The zone's endpoints aren't required to be MAX_ORDER
++ * aligned but the node_mem_map endpoints must be in order
++ * for the buddy allocator to function correctly.
++ */
++ start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
++ end = pgdat_end_pfn(pgdat);
++ end = ALIGN(end, MAX_ORDER_NR_PAGES);
++ size = (end - start) * sizeof(struct page);
++ map = alloc_remap(pgdat->node_id, size);
++ if (!map)
++ map = memblock_virt_alloc_node_nopanic(size,
++ pgdat->node_id);
++ pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
++ }
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++ /*
++ * With no DISCONTIG, the global mem_map is just set as node 0's
++ */
++ if (pgdat == NODE_DATA(0)) {
++ mem_map = NODE_DATA(0)->node_mem_map;
++#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
++ if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
++ mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
++#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
++ }
++#endif
++#endif /* CONFIG_FLAT_NODE_MEM_MAP */
++}
++
++void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
++ unsigned long node_start_pfn, unsigned long *zholes_size)
++{
++ pg_data_t *pgdat = NODE_DATA(nid);
++ unsigned long start_pfn = 0;
++ unsigned long end_pfn = 0;
++
++ /* pg_data_t should be reset to zero when it's allocated */
++ WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
++
++ pgdat->node_id = nid;
++ pgdat->node_start_pfn = node_start_pfn;
++#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
++ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
++ pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
++ (u64)start_pfn << PAGE_SHIFT, ((u64)end_pfn << PAGE_SHIFT) - 1);
++#endif
++ calculate_node_totalpages(pgdat, start_pfn, end_pfn,
++ zones_size, zholes_size);
++
++ alloc_node_mem_map(pgdat);
++#ifdef CONFIG_FLAT_NODE_MEM_MAP
++ printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
++ nid, (unsigned long)pgdat,
++ (unsigned long)pgdat->node_mem_map);
++#endif
++
++ free_area_init_core(pgdat, start_pfn, end_pfn,
++ zones_size, zholes_size);
++}
++
++#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
++
++#if MAX_NUMNODES > 1
++/*
++ * Figure out the number of possible node ids.
++ */
++void __init setup_nr_node_ids(void)
++{
++ unsigned int node;
++ unsigned int highest = 0;
++
++ for_each_node_mask(node, node_possible_map)
++ highest = node;
++ nr_node_ids = highest + 1;
++}
++#endif
++
++/**
++ * node_map_pfn_alignment - determine the maximum internode alignment
++ *
++ * This function should be called after node map is populated and sorted.
++ * It calculates the maximum power of two alignment which can distinguish
++ * all the nodes.
++ *
++ * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
++ * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
++ * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
++ * shifted, 1GiB is enough and this function will indicate so.
++ *
++ * This is used to test whether pfn -> nid mapping of the chosen memory
++ * model has fine enough granularity to avoid incorrect mapping for the
++ * populated node map.
++ *
++ * Returns the determined alignment in pfn's. 0 if there is no alignment
++ * requirement (single node).
++ */
++unsigned long __init node_map_pfn_alignment(void)
++{
++ unsigned long accl_mask = 0, last_end = 0;
++ unsigned long start, end, mask;
++ int last_nid = -1;
++ int i, nid;
++
++ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
++ if (!start || last_nid < 0 || last_nid == nid) {
++ last_nid = nid;
++ last_end = end;
++ continue;
++ }
++
++ /*
++ * Start with a mask granular enough to pin-point to the
++ * start pfn and tick off bits one-by-one until it becomes
++ * too coarse to separate the current node from the last.
++ */
++ mask = ~((1 << __ffs(start)) - 1);
++ while (mask && last_end <= (start & (mask << 1)))
++ mask <<= 1;
++
++ /* accumulate all internode masks */
++ accl_mask |= mask;
++ }
++
++ /* convert mask to number of pages */
++ return ~accl_mask + 1;
++}
++
++/* Find the lowest pfn for a node */
++static unsigned long __init find_min_pfn_for_node(int nid)
++{
++ unsigned long min_pfn = ULONG_MAX;
++ unsigned long start_pfn;
++ int i;
++
++ for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
++ min_pfn = min(min_pfn, start_pfn);
++
++ if (min_pfn == ULONG_MAX) {
++ printk(KERN_WARNING
++ "Could not find start_pfn for node %d\n", nid);
++ return 0;
++ }
++
++ return min_pfn;
++}
++
++/**
++ * find_min_pfn_with_active_regions - Find the minimum PFN registered
++ *
++ * It returns the minimum PFN based on information provided via
++ * memblock_set_node().
++ */
++unsigned long __init find_min_pfn_with_active_regions(void)
++{
++ return find_min_pfn_for_node(MAX_NUMNODES);
++}
++
++/*
++ * early_calculate_totalpages()
++ * Sum pages in active regions for movable zone.
++ * Populate N_MEMORY for calculating usable_nodes.
++ */
++static unsigned long __init early_calculate_totalpages(void)
++{
++ unsigned long totalpages = 0;
++ unsigned long start_pfn, end_pfn;
++ int i, nid;
++
++ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
++ unsigned long pages = end_pfn - start_pfn;
++
++ totalpages += pages;
++ if (pages)
++ node_set_state(nid, N_MEMORY);
++ }
++ return totalpages;
++}
++
++/*
++ * Find the PFN the Movable zone begins in each node. Kernel memory
++ * is spread evenly between nodes as long as the nodes have enough
++ * memory. When they don't, some nodes will have more kernelcore than
++ * others
++ */
++static void __init find_zone_movable_pfns_for_nodes(void)
++{
++ int i, nid;
++ unsigned long usable_startpfn;
++ unsigned long kernelcore_node, kernelcore_remaining;
++ /* save the state before borrow the nodemask */
++ nodemask_t saved_node_state = node_states[N_MEMORY];
++ unsigned long totalpages = early_calculate_totalpages();
++ int usable_nodes = nodes_weight(node_states[N_MEMORY]);
++ struct memblock_region *r;
++
++ /* Need to find movable_zone earlier when movable_node is specified. */
++ find_usable_zone_for_movable();
++
++ /*
++ * If movable_node is specified, ignore kernelcore and movablecore
++ * options.
++ */
++ if (movable_node_is_enabled()) {
++ for_each_memblock(memory, r) {
++ if (!memblock_is_hotpluggable(r))
++ continue;
++
++ nid = r->nid;
++
++ usable_startpfn = PFN_DOWN(r->base);
++ zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
++ min(usable_startpfn, zone_movable_pfn[nid]) :
++ usable_startpfn;
++ }
++
++ goto out2;
++ }
++
++ /*
++ * If movablecore=nn[KMG] was specified, calculate what size of
++ * kernelcore that corresponds so that memory usable for
++ * any allocation type is evenly spread. If both kernelcore
++ * and movablecore are specified, then the value of kernelcore
++ * will be used for required_kernelcore if it's greater than
++ * what movablecore would have allowed.
++ */
++ if (required_movablecore) {
++ unsigned long corepages;
++
++ /*
++ * Round-up so that ZONE_MOVABLE is at least as large as what
++ * was requested by the user
++ */
++ required_movablecore =
++ roundup(required_movablecore, MAX_ORDER_NR_PAGES);
++ corepages = totalpages - required_movablecore;
++
++ required_kernelcore = max(required_kernelcore, corepages);
++ }
++
++ /* If kernelcore was not specified, there is no ZONE_MOVABLE */
++ if (!required_kernelcore)
++ goto out;
++
++ /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
++ usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
++
++restart:
++ /* Spread kernelcore memory as evenly as possible throughout nodes */
++ kernelcore_node = required_kernelcore / usable_nodes;
++ for_each_node_state(nid, N_MEMORY) {
++ unsigned long start_pfn, end_pfn;
++
++ /*
++ * Recalculate kernelcore_node if the division per node
++ * now exceeds what is necessary to satisfy the requested
++ * amount of memory for the kernel
++ */
++ if (required_kernelcore < kernelcore_node)
++ kernelcore_node = required_kernelcore / usable_nodes;
++
++ /*
++ * As the map is walked, we track how much memory is usable
++ * by the kernel using kernelcore_remaining. When it is
++ * 0, the rest of the node is usable by ZONE_MOVABLE
++ */
++ kernelcore_remaining = kernelcore_node;
++
++ /* Go through each range of PFNs within this node */
++ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
++ unsigned long size_pages;
++
++ start_pfn = max(start_pfn, zone_movable_pfn[nid]);
++ if (start_pfn >= end_pfn)
++ continue;
++
++ /* Account for what is only usable for kernelcore */
++ if (start_pfn < usable_startpfn) {
++ unsigned long kernel_pages;
++ kernel_pages = min(end_pfn, usable_startpfn)
++ - start_pfn;
++
++ kernelcore_remaining -= min(kernel_pages,
++ kernelcore_remaining);
++ required_kernelcore -= min(kernel_pages,
++ required_kernelcore);
++
++ /* Continue if range is now fully accounted */
++ if (end_pfn <= usable_startpfn) {
++
++ /*
++ * Push zone_movable_pfn to the end so
++ * that if we have to rebalance
++ * kernelcore across nodes, we will
++ * not double account here
++ */
++ zone_movable_pfn[nid] = end_pfn;
++ continue;
++ }
++ start_pfn = usable_startpfn;
++ }
++
++ /*
++ * The usable PFN range for ZONE_MOVABLE is from
++ * start_pfn->end_pfn. Calculate size_pages as the
++ * number of pages used as kernelcore
++ */
++ size_pages = end_pfn - start_pfn;
++ if (size_pages > kernelcore_remaining)
++ size_pages = kernelcore_remaining;
++ zone_movable_pfn[nid] = start_pfn + size_pages;
++
++ /*
++ * Some kernelcore has been met, update counts and
++ * break if the kernelcore for this node has been
++ * satisfied
++ */
++ required_kernelcore -= min(required_kernelcore,
++ size_pages);
++ kernelcore_remaining -= size_pages;
++ if (!kernelcore_remaining)
++ break;
++ }
++ }
++
++ /*
++ * If there is still required_kernelcore, we do another pass with one
++ * less node in the count. This will push zone_movable_pfn[nid] further
++ * along on the nodes that still have memory until kernelcore is
++ * satisfied
++ */
++ usable_nodes--;
++ if (usable_nodes && required_kernelcore > usable_nodes)
++ goto restart;
++
++out2:
++ /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
++ for (nid = 0; nid < MAX_NUMNODES; nid++)
++ zone_movable_pfn[nid] =
++ roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
++
++out:
++ /* restore the node_state */
++ node_states[N_MEMORY] = saved_node_state;
++}
++
++/* Any regular or high memory on that node ? */
++static void check_for_memory(pg_data_t *pgdat, int nid)
++{
++ enum zone_type zone_type;
++
++ if (N_MEMORY == N_NORMAL_MEMORY)
++ return;
++
++ for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
++ struct zone *zone = &pgdat->node_zones[zone_type];
++ if (populated_zone(zone)) {
++ node_set_state(nid, N_HIGH_MEMORY);
++ if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
++ zone_type <= ZONE_NORMAL)
++ node_set_state(nid, N_NORMAL_MEMORY);
++ break;
++ }
++ }
++}
++
++/**
++ * free_area_init_nodes - Initialise all pg_data_t and zone data
++ * @max_zone_pfn: an array of max PFNs for each zone
++ *
++ * This will call free_area_init_node() for each active node in the system.
++ * Using the page ranges provided by memblock_set_node(), the size of each
++ * zone in each node and their holes is calculated. If the maximum PFN
++ * between two adjacent zones match, it is assumed that the zone is empty.
++ * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
++ * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
++ * starts where the previous one ended. For example, ZONE_DMA32 starts
++ * at arch_max_dma_pfn.
++ */
++void __init free_area_init_nodes(unsigned long *max_zone_pfn)
++{
++ unsigned long start_pfn, end_pfn;
++ int i, nid;
++
++ /* Record where the zone boundaries are */
++ memset(arch_zone_lowest_possible_pfn, 0,
++ sizeof(arch_zone_lowest_possible_pfn));
++ memset(arch_zone_highest_possible_pfn, 0,
++ sizeof(arch_zone_highest_possible_pfn));
++ arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
++ arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
++ for (i = 1; i < MAX_NR_ZONES; i++) {
++ if (i == ZONE_MOVABLE)
++ continue;
++ arch_zone_lowest_possible_pfn[i] =
++ arch_zone_highest_possible_pfn[i-1];
++ arch_zone_highest_possible_pfn[i] =
++ max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
++ }
++ arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
++ arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
++
++ /* Find the PFNs that ZONE_MOVABLE begins at in each node */
++ memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
++ find_zone_movable_pfns_for_nodes();
++
++ /* Print out the zone ranges */
++ pr_info("Zone ranges:\n");
++ for (i = 0; i < MAX_NR_ZONES; i++) {
++ if (i == ZONE_MOVABLE)
++ continue;
++ pr_info(" %-8s ", zone_names[i]);
++ if (arch_zone_lowest_possible_pfn[i] ==
++ arch_zone_highest_possible_pfn[i])
++ pr_cont("empty\n");
++ else
++ pr_cont("[mem %#018Lx-%#018Lx]\n",
++ (u64)arch_zone_lowest_possible_pfn[i]
++ << PAGE_SHIFT,
++ ((u64)arch_zone_highest_possible_pfn[i]
++ << PAGE_SHIFT) - 1);
++ }
++
++ /* Print out the PFNs ZONE_MOVABLE begins at in each node */
++ pr_info("Movable zone start for each node\n");
++ for (i = 0; i < MAX_NUMNODES; i++) {
++ if (zone_movable_pfn[i])
++ pr_info(" Node %d: %#018Lx\n", i,
++ (u64)zone_movable_pfn[i] << PAGE_SHIFT);
++ }
++
++ /* Print out the early node map */
++ pr_info("Early memory node ranges\n");
++ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
++ pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
++ (u64)start_pfn << PAGE_SHIFT,
++ ((u64)end_pfn << PAGE_SHIFT) - 1);
++
++ /* Initialise every node */
++ mminit_verify_pageflags_layout();
++ setup_nr_node_ids();
++ for_each_online_node(nid) {
++ pg_data_t *pgdat = NODE_DATA(nid);
++ free_area_init_node(nid, NULL,
++ find_min_pfn_for_node(nid), NULL);
++
++ /* Any memory on that node */
++ if (pgdat->node_present_pages)
++ node_set_state(nid, N_MEMORY);
++ check_for_memory(pgdat, nid);
++ }
++}
++
++static int __init cmdline_parse_core(char *p, unsigned long *core)
++{
++ unsigned long long coremem;
++ if (!p)
++ return -EINVAL;
++
++ coremem = memparse(p, &p);
++ *core = coremem >> PAGE_SHIFT;
++
++ /* Paranoid check that UL is enough for the coremem value */
++ WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
++
++ return 0;
++}
++
++/*
++ * kernelcore=size sets the amount of memory for use for allocations that
++ * cannot be reclaimed or migrated.
++ */
++static int __init cmdline_parse_kernelcore(char *p)
++{
++ return cmdline_parse_core(p, &required_kernelcore);
++}
++
++/*
++ * movablecore=size sets the amount of memory for use for allocations that
++ * can be reclaimed or migrated.
++ */
++static int __init cmdline_parse_movablecore(char *p)
++{
++ return cmdline_parse_core(p, &required_movablecore);
++}
++
++early_param("kernelcore", cmdline_parse_kernelcore);
++early_param("movablecore", cmdline_parse_movablecore);
++
++#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
++
++void adjust_managed_page_count(struct page *page, long count)
++{
++ spin_lock(&managed_page_count_lock);
++ page_zone(page)->managed_pages += count;
++ totalram_pages += count;
++#ifdef CONFIG_HIGHMEM
++ if (PageHighMem(page))
++ totalhigh_pages += count;
++#endif
++ spin_unlock(&managed_page_count_lock);
++}
++EXPORT_SYMBOL(adjust_managed_page_count);
++
++unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
++{
++ void *pos;
++ unsigned long pages = 0;
++
++ start = (void *)PAGE_ALIGN((unsigned long)start);
++ end = (void *)((unsigned long)end & PAGE_MASK);
++ for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
++ if ((unsigned int)poison <= 0xFF)
++ memset(pos, poison, PAGE_SIZE);
++ free_reserved_page(virt_to_page(pos));
++ }
++
++ if (pages && s)
++ pr_info("Freeing %s memory: %ldK (%p - %p)\n",
++ s, pages << (PAGE_SHIFT - 10), start, end);
++
++ return pages;
++}
++EXPORT_SYMBOL(free_reserved_area);
++
++#ifdef CONFIG_HIGHMEM
++void free_highmem_page(struct page *page)
++{
++ __free_reserved_page(page);
++ totalram_pages++;
++ page_zone(page)->managed_pages++;
++ totalhigh_pages++;
++}
++#endif
++
++
++void __init mem_init_print_info(const char *str)
++{
++ unsigned long physpages, codesize, datasize, rosize, bss_size;
++ unsigned long init_code_size, init_data_size;
++
++ physpages = get_num_physpages();
++ codesize = _etext - _stext;
++ datasize = _edata - _sdata;
++ rosize = __end_rodata - __start_rodata;
++ bss_size = __bss_stop - __bss_start;
++ init_data_size = __init_end - __init_begin;
++ init_code_size = _einittext - _sinittext;
++
++ /*
++ * Detect special cases and adjust section sizes accordingly:
++ * 1) .init.* may be embedded into .data sections
++ * 2) .init.text.* may be out of [__init_begin, __init_end],
++ * please refer to arch/tile/kernel/vmlinux.lds.S.
++ * 3) .rodata.* may be embedded into .text or .data sections.
++ */
++#define adj_init_size(start, end, size, pos, adj) \
++ do { \
++ if (start <= pos && pos < end && size > adj) \
++ size -= adj; \
++ } while (0)
++
++ adj_init_size(__init_begin, __init_end, init_data_size,
++ _sinittext, init_code_size);
++ adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
++ adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
++ adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
++ adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
++
++#undef adj_init_size
++
++ pr_info("Memory: %luK/%luK available "
++ "(%luK kernel code, %luK rwdata, %luK rodata, "
++ "%luK init, %luK bss, %luK reserved, %luK cma-reserved"
++#ifdef CONFIG_HIGHMEM
++ ", %luK highmem"
++#endif
++ "%s%s)\n",
++ nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
++ codesize >> 10, datasize >> 10, rosize >> 10,
++ (init_data_size + init_code_size) >> 10, bss_size >> 10,
++ (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
++ totalcma_pages << (PAGE_SHIFT-10),
++#ifdef CONFIG_HIGHMEM
++ totalhigh_pages << (PAGE_SHIFT-10),
++#endif
++ str ? ", " : "", str ? str : "");
++}
++
++/**
++ * set_dma_reserve - set the specified number of pages reserved in the first zone
++ * @new_dma_reserve: The number of pages to mark reserved
++ *
++ * The per-cpu batchsize and zone watermarks are determined by present_pages.
++ * In the DMA zone, a significant percentage may be consumed by kernel image
++ * and other unfreeable allocations which can skew the watermarks badly. This
++ * function may optionally be used to account for unfreeable pages in the
++ * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
++ * smaller per-cpu batchsize.
++ */
++void __init set_dma_reserve(unsigned long new_dma_reserve)
++{
++ dma_reserve = new_dma_reserve;
++}
++
++void __init free_area_init(unsigned long *zones_size)
++{
++ free_area_init_node(0, zones_size,
++ __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
++}
++
++static int page_alloc_cpu_notify(struct notifier_block *self,
++ unsigned long action, void *hcpu)
++{
++ int cpu = (unsigned long)hcpu;
++
++ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
++ lru_add_drain_cpu(cpu);
++ drain_pages(cpu);
++
++ /*
++ * Spill the event counters of the dead processor
++ * into the current processors event counters.
++ * This artificially elevates the count of the current
++ * processor.
++ */
++ vm_events_fold_cpu(cpu);
++
++ /*
++ * Zero the differential counters of the dead processor
++ * so that the vm statistics are consistent.
++ *
++ * This is only okay since the processor is dead and cannot
++ * race with what we are doing.
++ */
++ cpu_vm_stats_fold(cpu);
++ }
++ return NOTIFY_OK;
++}
++
++void __init page_alloc_init(void)
++{
++ hotcpu_notifier(page_alloc_cpu_notify, 0);
++}
++
++/*
++ * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
++ * or min_free_kbytes changes.
++ */
++static void calculate_totalreserve_pages(void)
++{
++ struct pglist_data *pgdat;
++ unsigned long reserve_pages = 0;
++ enum zone_type i, j;
++
++ for_each_online_pgdat(pgdat) {
++ for (i = 0; i < MAX_NR_ZONES; i++) {
++ struct zone *zone = pgdat->node_zones + i;
++ long max = 0;
++
++ /* Find valid and maximum lowmem_reserve in the zone */
++ for (j = i; j < MAX_NR_ZONES; j++) {
++ if (zone->lowmem_reserve[j] > max)
++ max = zone->lowmem_reserve[j];
++ }
++
++ /* we treat the high watermark as reserved pages. */
++ max += high_wmark_pages(zone);
++
++ if (max > zone->managed_pages)
++ max = zone->managed_pages;
++ reserve_pages += max;
++ /*
++ * Lowmem reserves are not available to
++ * GFP_HIGHUSER page cache allocations and
++ * kswapd tries to balance zones to their high
++ * watermark. As a result, neither should be
++ * regarded as dirtyable memory, to prevent a
++ * situation where reclaim has to clean pages
++ * in order to balance the zones.
++ */
++ zone->dirty_balance_reserve = max;
++ }
++ }
++ dirty_balance_reserve = reserve_pages;
++ totalreserve_pages = reserve_pages;
++}
++
++/*
++ * setup_per_zone_lowmem_reserve - called whenever
++ * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
++ * has a correct pages reserved value, so an adequate number of
++ * pages are left in the zone after a successful __alloc_pages().
++ */
++static void setup_per_zone_lowmem_reserve(void)
++{
++ struct pglist_data *pgdat;
++ enum zone_type j, idx;
++
++ for_each_online_pgdat(pgdat) {
++ for (j = 0; j < MAX_NR_ZONES; j++) {
++ struct zone *zone = pgdat->node_zones + j;
++ unsigned long managed_pages = zone->managed_pages;
++
++ zone->lowmem_reserve[j] = 0;
++
++ idx = j;
++ while (idx) {
++ struct zone *lower_zone;
++
++ idx--;
++
++ if (sysctl_lowmem_reserve_ratio[idx] < 1)
++ sysctl_lowmem_reserve_ratio[idx] = 1;
++
++ lower_zone = pgdat->node_zones + idx;
++ lower_zone->lowmem_reserve[j] = managed_pages /
++ sysctl_lowmem_reserve_ratio[idx];
++ managed_pages += lower_zone->managed_pages;
++ }
++ }
++ }
++
++ /* update totalreserve_pages */
++ calculate_totalreserve_pages();
++}
++
++static void __setup_per_zone_wmarks(void)
++{
++ unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
++ unsigned long lowmem_pages = 0;
++ struct zone *zone;
++ unsigned long flags;
++
++ /* Calculate total number of !ZONE_HIGHMEM pages */
++ for_each_zone(zone) {
++ if (!is_highmem(zone))
++ lowmem_pages += zone->managed_pages;
++ }
++
++ for_each_zone(zone) {
++ u64 tmp;
++
++ spin_lock_irqsave(&zone->lock, flags);
++ tmp = (u64)pages_min * zone->managed_pages;
++ do_div(tmp, lowmem_pages);
++ if (is_highmem(zone)) {
++ /*
++ * __GFP_HIGH and PF_MEMALLOC allocations usually don't
++ * need highmem pages, so cap pages_min to a small
++ * value here.
++ *
++ * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
++ * deltas control asynch page reclaim, and so should
++ * not be capped for highmem.
++ */
++ unsigned long min_pages;
++
++ min_pages = zone->managed_pages / 1024;
++ min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
++ zone->watermark[WMARK_MIN] = min_pages;
++ } else {
++ /*
++ * If it's a lowmem zone, reserve a number of pages
++ * proportionate to the zone's size.
++ */
++ zone->watermark[WMARK_MIN] = tmp;
++ }
++
++ zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
++ zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
++
++ __mod_zone_page_state(zone, NR_ALLOC_BATCH,
++ high_wmark_pages(zone) - low_wmark_pages(zone) -
++ atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
++
++ setup_zone_migrate_reserve(zone);
++ spin_unlock_irqrestore(&zone->lock, flags);
++ }
++
++ /* update totalreserve_pages */
++ calculate_totalreserve_pages();
++}
++
++/**
++ * setup_per_zone_wmarks - called when min_free_kbytes changes
++ * or when memory is hot-{added|removed}
++ *
++ * Ensures that the watermark[min,low,high] values for each zone are set
++ * correctly with respect to min_free_kbytes.
++ */
++void setup_per_zone_wmarks(void)
++{
++ mutex_lock(&zonelists_mutex);
++ __setup_per_zone_wmarks();
++ mutex_unlock(&zonelists_mutex);
++}
++
++/*
++ * The inactive anon list should be small enough that the VM never has to
++ * do too much work, but large enough that each inactive page has a chance
++ * to be referenced again before it is swapped out.
++ *
++ * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
++ * INACTIVE_ANON pages on this zone's LRU, maintained by the
++ * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
++ * the anonymous pages are kept on the inactive list.
++ *
++ * total target max
++ * memory ratio inactive anon
++ * -------------------------------------
++ * 10MB 1 5MB
++ * 100MB 1 50MB
++ * 1GB 3 250MB
++ * 10GB 10 0.9GB
++ * 100GB 31 3GB
++ * 1TB 101 10GB
++ * 10TB 320 32GB
++ */
++static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
++{
++ unsigned int gb, ratio;
++
++ /* Zone size in gigabytes */
++ gb = zone->managed_pages >> (30 - PAGE_SHIFT);
++ if (gb)
++ ratio = int_sqrt(10 * gb);
++ else
++ ratio = 1;
++
++ zone->inactive_ratio = ratio;
++}
++
++static void __meminit setup_per_zone_inactive_ratio(void)
++{
++ struct zone *zone;
++
++ for_each_zone(zone)
++ calculate_zone_inactive_ratio(zone);
++}
++
++/*
++ * Initialise min_free_kbytes.
++ *
++ * For small machines we want it small (128k min). For large machines
++ * we want it large (64MB max). But it is not linear, because network
++ * bandwidth does not increase linearly with machine size. We use
++ *
++ * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
++ * min_free_kbytes = sqrt(lowmem_kbytes * 16)
++ *
++ * which yields
++ *
++ * 16MB: 512k
++ * 32MB: 724k
++ * 64MB: 1024k
++ * 128MB: 1448k
++ * 256MB: 2048k
++ * 512MB: 2896k
++ * 1024MB: 4096k
++ * 2048MB: 5792k
++ * 4096MB: 8192k
++ * 8192MB: 11584k
++ * 16384MB: 16384k
++ */
++int __meminit init_per_zone_wmark_min(void)
++{
++ unsigned long lowmem_kbytes;
++ int new_min_free_kbytes;
++
++ lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
++ new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
++
++ if (new_min_free_kbytes > user_min_free_kbytes) {
++ min_free_kbytes = new_min_free_kbytes;
++ if (min_free_kbytes < 128)
++ min_free_kbytes = 128;
++ if (min_free_kbytes > 65536)
++ min_free_kbytes = 65536;
++ } else {
++ pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
++ new_min_free_kbytes, user_min_free_kbytes);
++ }
++ setup_per_zone_wmarks();
++ refresh_zone_stat_thresholds();
++ setup_per_zone_lowmem_reserve();
++ setup_per_zone_inactive_ratio();
++ return 0;
++}
++module_init(init_per_zone_wmark_min)
++
++/*
++ * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
++ * that we can call two helper functions whenever min_free_kbytes
++ * changes.
++ */
++int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
++ void __user *buffer, size_t *length, loff_t *ppos)
++{
++ int rc;
++
++ rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
++ if (rc)
++ return rc;
++
++ if (write) {
++ user_min_free_kbytes = min_free_kbytes;
++ setup_per_zone_wmarks();
++ }
++ return 0;
++}
++
++#ifdef CONFIG_NUMA
++int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
++ void __user *buffer, size_t *length, loff_t *ppos)
++{
++ struct zone *zone;
++ int rc;
++
++ rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
++ if (rc)
++ return rc;
++
++ for_each_zone(zone)
++ zone->min_unmapped_pages = (zone->managed_pages *
++ sysctl_min_unmapped_ratio) / 100;
++ return 0;
++}
++
++int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
++ void __user *buffer, size_t *length, loff_t *ppos)
++{
++ struct zone *zone;
++ int rc;
++
++ rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
++ if (rc)
++ return rc;
++
++ for_each_zone(zone)
++ zone->min_slab_pages = (zone->managed_pages *
++ sysctl_min_slab_ratio) / 100;
++ return 0;
++}
++#endif
++
++/*
++ * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
++ * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
++ * whenever sysctl_lowmem_reserve_ratio changes.
++ *
++ * The reserve ratio obviously has absolutely no relation with the
++ * minimum watermarks. The lowmem reserve ratio can only make sense
++ * if in function of the boot time zone sizes.
++ */
++int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
++ void __user *buffer, size_t *length, loff_t *ppos)
++{
++ proc_dointvec_minmax(table, write, buffer, length, ppos);
++ setup_per_zone_lowmem_reserve();
++ return 0;
++}
++
++/*
++ * percpu_pagelist_fraction - changes the pcp->high for each zone on each
++ * cpu. It is the fraction of total pages in each zone that a hot per cpu
++ * pagelist can have before it gets flushed back to buddy allocator.
++ */
++int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
++ void __user *buffer, size_t *length, loff_t *ppos)
++{
++ struct zone *zone;
++ int old_percpu_pagelist_fraction;
++ int ret;
++
++ mutex_lock(&pcp_batch_high_lock);
++ old_percpu_pagelist_fraction = percpu_pagelist_fraction;
++
++ ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
++ if (!write || ret < 0)
++ goto out;
++
++ /* Sanity checking to avoid pcp imbalance */
++ if (percpu_pagelist_fraction &&
++ percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
++ percpu_pagelist_fraction = old_percpu_pagelist_fraction;
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* No change? */
++ if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
++ goto out;
++
++ for_each_populated_zone(zone) {
++ unsigned int cpu;
++
++ for_each_possible_cpu(cpu)
++ pageset_set_high_and_batch(zone,
++ per_cpu_ptr(zone->pageset, cpu));
++ }
++out:
++ mutex_unlock(&pcp_batch_high_lock);
++ return ret;
++}
++
++int hashdist = HASHDIST_DEFAULT;
++
++#ifdef CONFIG_NUMA
++static int __init set_hashdist(char *str)
++{
++ if (!str)
++ return 0;
++ hashdist = simple_strtoul(str, &str, 0);
++ return 1;
++}
++__setup("hashdist=", set_hashdist);
++#endif
++
++/*
++ * allocate a large system hash table from bootmem
++ * - it is assumed that the hash table must contain an exact power-of-2
++ * quantity of entries
++ * - limit is the number of hash buckets, not the total allocation size
++ */
++void *__init alloc_large_system_hash(const char *tablename,
++ unsigned long bucketsize,
++ unsigned long numentries,
++ int scale,
++ int flags,
++ unsigned int *_hash_shift,
++ unsigned int *_hash_mask,
++ unsigned long low_limit,
++ unsigned long high_limit)
++{
++ unsigned long long max = high_limit;
++ unsigned long log2qty, size;
++ void *table = NULL;
++
++ /* allow the kernel cmdline to have a say */
++ if (!numentries) {
++ /* round applicable memory size up to nearest megabyte */
++ numentries = nr_kernel_pages;
++
++ /* It isn't necessary when PAGE_SIZE >= 1MB */
++ if (PAGE_SHIFT < 20)
++ numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
++
++ /* limit to 1 bucket per 2^scale bytes of low memory */
++ if (scale > PAGE_SHIFT)
++ numentries >>= (scale - PAGE_SHIFT);
++ else
++ numentries <<= (PAGE_SHIFT - scale);
++
++ /* Make sure we've got at least a 0-order allocation.. */
++ if (unlikely(flags & HASH_SMALL)) {
++ /* Makes no sense without HASH_EARLY */
++ WARN_ON(!(flags & HASH_EARLY));
++ if (!(numentries >> *_hash_shift)) {
++ numentries = 1UL << *_hash_shift;
++ BUG_ON(!numentries);
++ }
++ } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
++ numentries = PAGE_SIZE / bucketsize;
++ }
++ numentries = roundup_pow_of_two(numentries);
++
++ /* limit allocation size to 1/16 total memory by default */
++ if (max == 0) {
++ max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
++ do_div(max, bucketsize);
++ }
++ max = min(max, 0x80000000ULL);
++
++ if (numentries < low_limit)
++ numentries = low_limit;
++ if (numentries > max)
++ numentries = max;
++
++ log2qty = ilog2(numentries);
++
++ do {
++ size = bucketsize << log2qty;
++ if (flags & HASH_EARLY)
++ table = memblock_virt_alloc_nopanic(size, 0);
++ else if (hashdist)
++ table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
++ else {
++ /*
++ * If bucketsize is not a power-of-two, we may free
++ * some pages at the end of hash table which
++ * alloc_pages_exact() automatically does
++ */
++ if (get_order(size) < MAX_ORDER) {
++ table = alloc_pages_exact(size, GFP_ATOMIC);
++ kmemleak_alloc(table, size, 1, GFP_ATOMIC);
++ }
++ }
++ } while (!table && size > PAGE_SIZE && --log2qty);
++
++ if (!table)
++ panic("Failed to allocate %s hash table\n", tablename);
++
++ printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
++ tablename,
++ (1UL << log2qty),
++ ilog2(size) - PAGE_SHIFT,
++ size);
++
++ if (_hash_shift)
++ *_hash_shift = log2qty;
++ if (_hash_mask)
++ *_hash_mask = (1 << log2qty) - 1;
++
++ return table;
++}
++
++/* Return a pointer to the bitmap storing bits affecting a block of pages */
++static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
++ unsigned long pfn)
++{
++#ifdef CONFIG_SPARSEMEM
++ return __pfn_to_section(pfn)->pageblock_flags;
++#else
++ return zone->pageblock_flags;
++#endif /* CONFIG_SPARSEMEM */
++}
++
++static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
++{
++#ifdef CONFIG_SPARSEMEM
++ pfn &= (PAGES_PER_SECTION-1);
++ return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
++#else
++ pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
++ return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
++#endif /* CONFIG_SPARSEMEM */
++}
++
++/**
++ * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
++ * @page: The page within the block of interest
++ * @pfn: The target page frame number
++ * @end_bitidx: The last bit of interest to retrieve
++ * @mask: mask of bits that the caller is interested in
++ *
++ * Return: pageblock_bits flags
++ */
++unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
++ unsigned long end_bitidx,
++ unsigned long mask)
++{
++ struct zone *zone;
++ unsigned long *bitmap;
++ unsigned long bitidx, word_bitidx;
++ unsigned long word;
++
++ zone = page_zone(page);
++ bitmap = get_pageblock_bitmap(zone, pfn);
++ bitidx = pfn_to_bitidx(zone, pfn);
++ word_bitidx = bitidx / BITS_PER_LONG;
++ bitidx &= (BITS_PER_LONG-1);
++
++ word = bitmap[word_bitidx];
++ bitidx += end_bitidx;
++ return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
++}
++
++/**
++ * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
++ * @page: The page within the block of interest
++ * @flags: The flags to set
++ * @pfn: The target page frame number
++ * @end_bitidx: The last bit of interest
++ * @mask: mask of bits that the caller is interested in
++ */
++void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
++ unsigned long pfn,
++ unsigned long end_bitidx,
++ unsigned long mask)
++{
++ struct zone *zone;
++ unsigned long *bitmap;
++ unsigned long bitidx, word_bitidx;
++ unsigned long old_word, word;
++
++ BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
++
++ zone = page_zone(page);
++ bitmap = get_pageblock_bitmap(zone, pfn);
++ bitidx = pfn_to_bitidx(zone, pfn);
++ word_bitidx = bitidx / BITS_PER_LONG;
++ bitidx &= (BITS_PER_LONG-1);
++
++ VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
++
++ bitidx += end_bitidx;
++ mask <<= (BITS_PER_LONG - bitidx - 1);
++ flags <<= (BITS_PER_LONG - bitidx - 1);
++
++ word = READ_ONCE(bitmap[word_bitidx]);
++ for (;;) {
++ old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
++ if (word == old_word)
++ break;
++ word = old_word;
++ }
++}
++
++/*
++ * This function checks whether pageblock includes unmovable pages or not.
++ * If @count is not zero, it is okay to include less @count unmovable pages
++ *
++ * PageLRU check without isolation or lru_lock could race so that
++ * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
++ * expect this function should be exact.
++ */
++bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
++ bool skip_hwpoisoned_pages)
++{
++ unsigned long pfn, iter, found;
++ int mt;
++
++ /*
++ * For avoiding noise data, lru_add_drain_all() should be called
++ * If ZONE_MOVABLE, the zone never contains unmovable pages
++ */
++ if (zone_idx(zone) == ZONE_MOVABLE)
++ return false;
++ mt = get_pageblock_migratetype(page);
++ if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
++ return false;
++
++ pfn = page_to_pfn(page);
++ for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
++ unsigned long check = pfn + iter;
++
++ if (!pfn_valid_within(check))
++ continue;
++
++ page = pfn_to_page(check);
++
++ /*
++ * Hugepages are not in LRU lists, but they're movable.
++ * We need not scan over tail pages bacause we don't
++ * handle each tail page individually in migration.
++ */
++ if (PageHuge(page)) {
++ iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
++ continue;
++ }
++
++ /*
++ * We can't use page_count without pin a page
++ * because another CPU can free compound page.
++ * This check already skips compound tails of THP
++ * because their page->_count is zero at all time.
++ */
++ if (!atomic_read(&page->_count)) {
++ if (PageBuddy(page))
++ iter += (1 << page_order(page)) - 1;
++ continue;
++ }
++
++ /*
++ * The HWPoisoned page may be not in buddy system, and
++ * page_count() is not 0.
++ */
++ if (skip_hwpoisoned_pages && PageHWPoison(page))
++ continue;
++
++ if (!PageLRU(page))
++ found++;
++ /*
++ * If there are RECLAIMABLE pages, we need to check
++ * it. But now, memory offline itself doesn't call
++ * shrink_node_slabs() and it still to be fixed.
++ */
++ /*
++ * If the page is not RAM, page_count()should be 0.
++ * we don't need more check. This is an _used_ not-movable page.
++ *
++ * The problematic thing here is PG_reserved pages. PG_reserved
++ * is set to both of a memory hole page and a _used_ kernel
++ * page at boot.
++ */
++ if (found > count)
++ return true;
++ }
++ return false;
++}
++
++bool is_pageblock_removable_nolock(struct page *page)
++{
++ struct zone *zone;
++ unsigned long pfn;
++
++ /*
++ * We have to be careful here because we are iterating over memory
++ * sections which are not zone aware so we might end up outside of
++ * the zone but still within the section.
++ * We have to take care about the node as well. If the node is offline
++ * its NODE_DATA will be NULL - see page_zone.
++ */
++ if (!node_online(page_to_nid(page)))
++ return false;
++
++ zone = page_zone(page);
++ pfn = page_to_pfn(page);
++ if (!zone_spans_pfn(zone, pfn))
++ return false;
++
++ return !has_unmovable_pages(zone, page, 0, true);
++}
++
++#ifdef CONFIG_CMA
++
++static unsigned long pfn_max_align_down(unsigned long pfn)
++{
++ return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
++ pageblock_nr_pages) - 1);
++}
++
++static unsigned long pfn_max_align_up(unsigned long pfn)
++{
++ return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
++ pageblock_nr_pages));
++}
++
++/* [start, end) must belong to a single zone. */
++static int __alloc_contig_migrate_range(struct compact_control *cc,
++ unsigned long start, unsigned long end)
++{
++ /* This function is based on compact_zone() from compaction.c. */
++ unsigned long nr_reclaimed;
++ unsigned long pfn = start;
++ unsigned int tries = 0;
++ int ret = 0;
++
++ migrate_prep();
++
++ while (pfn < end || !list_empty(&cc->migratepages)) {
++ if (fatal_signal_pending(current)) {
++ ret = -EINTR;
++ break;
++ }
++
++ if (list_empty(&cc->migratepages)) {
++ cc->nr_migratepages = 0;
++ pfn = isolate_migratepages_range(cc, pfn, end);
++ if (!pfn) {
++ ret = -EINTR;
++ break;
++ }
++ tries = 0;
++ } else if (++tries == 5) {
++ ret = ret < 0 ? ret : -EBUSY;
++ break;
++ }
++
++ nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
++ &cc->migratepages);
++ cc->nr_migratepages -= nr_reclaimed;
++
++ ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
++ NULL, 0, cc->mode, MR_CMA);
++ }
++ if (ret < 0) {
++ putback_movable_pages(&cc->migratepages);
++ return ret;
++ }
++ return 0;
++}
++
++/**
++ * alloc_contig_range() -- tries to allocate given range of pages
++ * @start: start PFN to allocate
++ * @end: one-past-the-last PFN to allocate
++ * @migratetype: migratetype of the underlaying pageblocks (either
++ * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
++ * in range must have the same migratetype and it must
++ * be either of the two.
++ *
++ * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
++ * aligned, however it's the caller's responsibility to guarantee that
++ * we are the only thread that changes migrate type of pageblocks the
++ * pages fall in.
++ *
++ * The PFN range must belong to a single zone.
++ *
++ * Returns zero on success or negative error code. On success all
++ * pages which PFN is in [start, end) are allocated for the caller and
++ * need to be freed with free_contig_range().
++ */
++int alloc_contig_range(unsigned long start, unsigned long end,
++ unsigned migratetype)
++{
++ unsigned long outer_start, outer_end;
++ int ret = 0, order;
++
++ struct compact_control cc = {
++ .nr_migratepages = 0,
++ .order = -1,
++ .zone = page_zone(pfn_to_page(start)),
++ .mode = MIGRATE_SYNC,
++ .ignore_skip_hint = true,
++ };
++ INIT_LIST_HEAD(&cc.migratepages);
++
++ /*
++ * What we do here is we mark all pageblocks in range as
++ * MIGRATE_ISOLATE. Because pageblock and max order pages may
++ * have different sizes, and due to the way page allocator
++ * work, we align the range to biggest of the two pages so
++ * that page allocator won't try to merge buddies from
++ * different pageblocks and change MIGRATE_ISOLATE to some
++ * other migration type.
++ *
++ * Once the pageblocks are marked as MIGRATE_ISOLATE, we
++ * migrate the pages from an unaligned range (ie. pages that
++ * we are interested in). This will put all the pages in
++ * range back to page allocator as MIGRATE_ISOLATE.
++ *
++ * When this is done, we take the pages in range from page
++ * allocator removing them from the buddy system. This way
++ * page allocator will never consider using them.
++ *
++ * This lets us mark the pageblocks back as
++ * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
++ * aligned range but not in the unaligned, original range are
++ * put back to page allocator so that buddy can use them.
++ */
++
++ ret = start_isolate_page_range(pfn_max_align_down(start),
++ pfn_max_align_up(end), migratetype,
++ false);
++ if (ret)
++ return ret;
++
++ ret = __alloc_contig_migrate_range(&cc, start, end);
++ if (ret)
++ goto done;
++
++ /*
++ * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
++ * aligned blocks that are marked as MIGRATE_ISOLATE. What's
++ * more, all pages in [start, end) are free in page allocator.
++ * What we are going to do is to allocate all pages from
++ * [start, end) (that is remove them from page allocator).
++ *
++ * The only problem is that pages at the beginning and at the
++ * end of interesting range may be not aligned with pages that
++ * page allocator holds, ie. they can be part of higher order
++ * pages. Because of this, we reserve the bigger range and
++ * once this is done free the pages we are not interested in.
++ *
++ * We don't have to hold zone->lock here because the pages are
++ * isolated thus they won't get removed from buddy.
++ */
++
++ lru_add_drain_all();
++ drain_all_pages(cc.zone);
++
++ order = 0;
++ outer_start = start;
++ while (!PageBuddy(pfn_to_page(outer_start))) {
++ if (++order >= MAX_ORDER) {
++ ret = -EBUSY;
++ goto done;
++ }
++ outer_start &= ~0UL << order;
++ }
++
++ /* Make sure the range is really isolated. */
++ if (test_pages_isolated(outer_start, end, false)) {
++ pr_info("%s: [%lx, %lx) PFNs busy\n",
++ __func__, outer_start, end);
++ ret = -EBUSY;
++ goto done;
++ }
++
++ /* Grab isolated pages from freelists. */
++ outer_end = isolate_freepages_range(&cc, outer_start, end);
++ if (!outer_end) {
++ ret = -EBUSY;
++ goto done;
++ }
++
++ /* Free head and tail (if any) */
++ if (start != outer_start)
++ free_contig_range(outer_start, start - outer_start);
++ if (end != outer_end)
++ free_contig_range(end, outer_end - end);
++
++done:
++ undo_isolate_page_range(pfn_max_align_down(start),
++ pfn_max_align_up(end), migratetype);
++ return ret;
++}
++
++void free_contig_range(unsigned long pfn, unsigned nr_pages)
++{
++ unsigned int count = 0;
++
++ for (; nr_pages--; pfn++) {
++ struct page *page = pfn_to_page(pfn);
++
++ count += page_count(page) != 1;
++ __free_page(page);
++ }
++ WARN(count != 0, "%d pages are still in use!\n", count);
++}
++#endif
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++/*
++ * The zone indicated has a new number of managed_pages; batch sizes and percpu
++ * page high values need to be recalulated.
++ */
++void __meminit zone_pcp_update(struct zone *zone)
++{
++ unsigned cpu;
++ mutex_lock(&pcp_batch_high_lock);
++ for_each_possible_cpu(cpu)
++ pageset_set_high_and_batch(zone,
++ per_cpu_ptr(zone->pageset, cpu));
++ mutex_unlock(&pcp_batch_high_lock);
++}
++#endif
++
++void zone_pcp_reset(struct zone *zone)
++{
++ unsigned long flags;
++ int cpu;
++ struct per_cpu_pageset *pset;
++
++ /* avoid races with drain_pages() */
++ local_irq_save(flags);
++ if (zone->pageset != &boot_pageset) {
++ for_each_online_cpu(cpu) {
++ pset = per_cpu_ptr(zone->pageset, cpu);
++ drain_zonestat(zone, pset);
++ }
++ free_percpu(zone->pageset);
++ zone->pageset = &boot_pageset;
++ }
++ local_irq_restore(flags);
++}
++
++#ifdef CONFIG_MEMORY_HOTREMOVE
++/*
++ * All pages in the range must be isolated before calling this.
++ */
++void
++__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
++{
++ struct page *page;
++ struct zone *zone;
++ unsigned int order, i;
++ unsigned long pfn;
++ unsigned long flags;
++ /* find the first valid pfn */
++ for (pfn = start_pfn; pfn < end_pfn; pfn++)
++ if (pfn_valid(pfn))
++ break;
++ if (pfn == end_pfn)
++ return;
++ zone = page_zone(pfn_to_page(pfn));
++ spin_lock_irqsave(&zone->lock, flags);
++ pfn = start_pfn;
++ while (pfn < end_pfn) {
++ if (!pfn_valid(pfn)) {
++ pfn++;
++ continue;
++ }
++ page = pfn_to_page(pfn);
++ /*
++ * The HWPoisoned page may be not in buddy system, and
++ * page_count() is not 0.
++ */
++ if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
++ pfn++;
++ SetPageReserved(page);
++ continue;
++ }
++
++ BUG_ON(page_count(page));
++ BUG_ON(!PageBuddy(page));
++ order = page_order(page);
++#ifdef CONFIG_DEBUG_VM
++ printk(KERN_INFO "remove from free list %lx %d %lx\n",
++ pfn, 1 << order, end_pfn);
++#endif
++ list_del(&page->lru);
++ rmv_page_order(page);
++ zone->free_area[order].nr_free--;
++ for (i = 0; i < (1 << order); i++)
++ SetPageReserved((page+i));
++ pfn += (1 << order);
++ }
++ spin_unlock_irqrestore(&zone->lock, flags);
++}
++#endif
++
++#ifdef CONFIG_MEMORY_FAILURE
++bool is_free_buddy_page(struct page *page)
++{
++ struct zone *zone = page_zone(page);
++ unsigned long pfn = page_to_pfn(page);
++ unsigned long flags;
++ unsigned int order;
++
++ spin_lock_irqsave(&zone->lock, flags);
++ for (order = 0; order < MAX_ORDER; order++) {
++ struct page *page_head = page - (pfn & ((1 << order) - 1));
++
++ if (PageBuddy(page_head) && page_order(page_head) >= order)
++ break;
++ }
++ spin_unlock_irqrestore(&zone->lock, flags);
++
++ return order < MAX_ORDER;
++}
++#endif
+diff -Nur linux-4.1.10.orig/mm/slab.h linux-4.1.10/mm/slab.h
+--- linux-4.1.10.orig/mm/slab.h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/slab.h 2015-10-07 18:00:08.000000000 +0200
@@ -330,7 +330,11 @@
* The slab lists for all objects.
*/
@@ -25179,9 +56094,9 @@ diff -Nur linux-4.1.6.orig/mm/slab.h linux-4.1.6/mm/slab.h
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
-diff -Nur linux-4.1.6.orig/mm/slub.c linux-4.1.6/mm/slub.c
---- linux-4.1.6.orig/mm/slub.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/slub.c 2015-09-08 23:49:08.441830131 +0200
+diff -Nur linux-4.1.10.orig/mm/slub.c linux-4.1.10/mm/slub.c
+--- linux-4.1.10.orig/mm/slub.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/slub.c 2015-10-07 18:00:08.000000000 +0200
@@ -1069,7 +1069,7 @@
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -25320,7 +56235,7 @@ diff -Nur linux-4.1.6.orig/mm/slub.c linux-4.1.6/mm/slub.c
- inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab_cache = s;
__SetPageSlab(page);
- if (page->pfmemalloc)
+ if (page_is_pfmemalloc(page))
@@ -1448,10 +1430,34 @@
page->freelist = start;
page->inuse = page->objects;
@@ -25666,9 +56581,5357 @@ diff -Nur linux-4.1.6.orig/mm/slub.c linux-4.1.6/mm/slub.c
}
for (i = 0; i < t.count; i++) {
-diff -Nur linux-4.1.6.orig/mm/swap.c linux-4.1.6/mm/swap.c
---- linux-4.1.6.orig/mm/swap.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/swap.c 2015-09-08 23:49:08.441830131 +0200
+diff -Nur linux-4.1.10.orig/mm/slub.c.orig linux-4.1.10/mm/slub.c.orig
+--- linux-4.1.10.orig/mm/slub.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/mm/slub.c.orig 2015-10-03 13:49:38.000000000 +0200
+@@ -0,0 +1,5344 @@
++/*
++ * SLUB: A slab allocator that limits cache line use instead of queuing
++ * objects in per cpu and per node lists.
++ *
++ * The allocator synchronizes using per slab locks or atomic operatios
++ * and only uses a centralized lock to manage a pool of partial slabs.
++ *
++ * (C) 2007 SGI, Christoph Lameter
++ * (C) 2011 Linux Foundation, Christoph Lameter
++ */
++
++#include <linux/mm.h>
++#include <linux/swap.h> /* struct reclaim_state */
++#include <linux/module.h>
++#include <linux/bit_spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/bitops.h>
++#include <linux/slab.h>
++#include "slab.h"
++#include <linux/proc_fs.h>
++#include <linux/notifier.h>
++#include <linux/seq_file.h>
++#include <linux/kasan.h>
++#include <linux/kmemcheck.h>
++#include <linux/cpu.h>
++#include <linux/cpuset.h>
++#include <linux/mempolicy.h>
++#include <linux/ctype.h>
++#include <linux/debugobjects.h>
++#include <linux/kallsyms.h>
++#include <linux/memory.h>
++#include <linux/math64.h>
++#include <linux/fault-inject.h>
++#include <linux/stacktrace.h>
++#include <linux/prefetch.h>
++#include <linux/memcontrol.h>
++
++#include <trace/events/kmem.h>
++
++#include "internal.h"
++
++/*
++ * Lock order:
++ * 1. slab_mutex (Global Mutex)
++ * 2. node->list_lock
++ * 3. slab_lock(page) (Only on some arches and for debugging)
++ *
++ * slab_mutex
++ *
++ * The role of the slab_mutex is to protect the list of all the slabs
++ * and to synchronize major metadata changes to slab cache structures.
++ *
++ * The slab_lock is only used for debugging and on arches that do not
++ * have the ability to do a cmpxchg_double. It only protects the second
++ * double word in the page struct. Meaning
++ * A. page->freelist -> List of object free in a page
++ * B. page->counters -> Counters of objects
++ * C. page->frozen -> frozen state
++ *
++ * If a slab is frozen then it is exempt from list management. It is not
++ * on any list. The processor that froze the slab is the one who can
++ * perform list operations on the page. Other processors may put objects
++ * onto the freelist but the processor that froze the slab is the only
++ * one that can retrieve the objects from the page's freelist.
++ *
++ * The list_lock protects the partial and full list on each node and
++ * the partial slab counter. If taken then no new slabs may be added or
++ * removed from the lists nor make the number of partial slabs be modified.
++ * (Note that the total number of slabs is an atomic value that may be
++ * modified without taking the list lock).
++ *
++ * The list_lock is a centralized lock and thus we avoid taking it as
++ * much as possible. As long as SLUB does not have to handle partial
++ * slabs, operations can continue without any centralized lock. F.e.
++ * allocating a long series of objects that fill up slabs does not require
++ * the list lock.
++ * Interrupts are disabled during allocation and deallocation in order to
++ * make the slab allocator safe to use in the context of an irq. In addition
++ * interrupts are disabled to ensure that the processor does not change
++ * while handling per_cpu slabs, due to kernel preemption.
++ *
++ * SLUB assigns one slab for allocation to each processor.
++ * Allocations only occur from these slabs called cpu slabs.
++ *
++ * Slabs with free elements are kept on a partial list and during regular
++ * operations no list for full slabs is used. If an object in a full slab is
++ * freed then the slab will show up again on the partial lists.
++ * We track full slabs for debugging purposes though because otherwise we
++ * cannot scan all objects.
++ *
++ * Slabs are freed when they become empty. Teardown and setup is
++ * minimal so we rely on the page allocators per cpu caches for
++ * fast frees and allocs.
++ *
++ * Overloading of page flags that are otherwise used for LRU management.
++ *
++ * PageActive The slab is frozen and exempt from list processing.
++ * This means that the slab is dedicated to a purpose
++ * such as satisfying allocations for a specific
++ * processor. Objects may be freed in the slab while
++ * it is frozen but slab_free will then skip the usual
++ * list operations. It is up to the processor holding
++ * the slab to integrate the slab into the slab lists
++ * when the slab is no longer needed.
++ *
++ * One use of this flag is to mark slabs that are
++ * used for allocations. Then such a slab becomes a cpu
++ * slab. The cpu slab may be equipped with an additional
++ * freelist that allows lockless access to
++ * free objects in addition to the regular freelist
++ * that requires the slab lock.
++ *
++ * PageError Slab requires special handling due to debug
++ * options set. This moves slab handling out of
++ * the fast path and disables lockless freelists.
++ */
++
++static inline int kmem_cache_debug(struct kmem_cache *s)
++{
++#ifdef CONFIG_SLUB_DEBUG
++ return unlikely(s->flags & SLAB_DEBUG_FLAGS);
++#else
++ return 0;
++#endif
++}
++
++static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
++{
++#ifdef CONFIG_SLUB_CPU_PARTIAL
++ return !kmem_cache_debug(s);
++#else
++ return false;
++#endif
++}
++
++/*
++ * Issues still to be resolved:
++ *
++ * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
++ *
++ * - Variable sizing of the per node arrays
++ */
++
++/* Enable to test recovery from slab corruption on boot */
++#undef SLUB_RESILIENCY_TEST
++
++/* Enable to log cmpxchg failures */
++#undef SLUB_DEBUG_CMPXCHG
++
++/*
++ * Mininum number of partial slabs. These will be left on the partial
++ * lists even if they are empty. kmem_cache_shrink may reclaim them.
++ */
++#define MIN_PARTIAL 5
++
++/*
++ * Maximum number of desirable partial slabs.
++ * The existence of more partial slabs makes kmem_cache_shrink
++ * sort the partial list by the number of objects in use.
++ */
++#define MAX_PARTIAL 10
++
++#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
++ SLAB_POISON | SLAB_STORE_USER)
++
++/*
++ * Debugging flags that require metadata to be stored in the slab. These get
++ * disabled when slub_debug=O is used and a cache's min order increases with
++ * metadata.
++ */
++#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
++
++#define OO_SHIFT 16
++#define OO_MASK ((1 << OO_SHIFT) - 1)
++#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
++
++/* Internal SLUB flags */
++#define __OBJECT_POISON 0x80000000UL /* Poison object */
++#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
++
++#ifdef CONFIG_SMP
++static struct notifier_block slab_notifier;
++#endif
++
++/*
++ * Tracking user of a slab.
++ */
++#define TRACK_ADDRS_COUNT 16
++struct track {
++ unsigned long addr; /* Called from address */
++#ifdef CONFIG_STACKTRACE
++ unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
++#endif
++ int cpu; /* Was running on cpu */
++ int pid; /* Pid context */
++ unsigned long when; /* When did the operation occur */
++};
++
++enum track_item { TRACK_ALLOC, TRACK_FREE };
++
++#ifdef CONFIG_SYSFS
++static int sysfs_slab_add(struct kmem_cache *);
++static int sysfs_slab_alias(struct kmem_cache *, const char *);
++static void memcg_propagate_slab_attrs(struct kmem_cache *s);
++#else
++static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
++static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
++ { return 0; }
++static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
++#endif
++
++static inline void stat(const struct kmem_cache *s, enum stat_item si)
++{
++#ifdef CONFIG_SLUB_STATS
++ /*
++ * The rmw is racy on a preemptible kernel but this is acceptable, so
++ * avoid this_cpu_add()'s irq-disable overhead.
++ */
++ raw_cpu_inc(s->cpu_slab->stat[si]);
++#endif
++}
++
++/********************************************************************
++ * Core slab cache functions
++ *******************************************************************/
++
++/* Verify that a pointer has an address that is valid within a slab page */
++static inline int check_valid_pointer(struct kmem_cache *s,
++ struct page *page, const void *object)
++{
++ void *base;
++
++ if (!object)
++ return 1;
++
++ base = page_address(page);
++ if (object < base || object >= base + page->objects * s->size ||
++ (object - base) % s->size) {
++ return 0;
++ }
++
++ return 1;
++}
++
++static inline void *get_freepointer(struct kmem_cache *s, void *object)
++{
++ return *(void **)(object + s->offset);
++}
++
++static void prefetch_freepointer(const struct kmem_cache *s, void *object)
++{
++ prefetch(object + s->offset);
++}
++
++static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
++{
++ void *p;
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
++#else
++ p = get_freepointer(s, object);
++#endif
++ return p;
++}
++
++static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
++{
++ *(void **)(object + s->offset) = fp;
++}
++
++/* Loop over all objects in a slab */
++#define for_each_object(__p, __s, __addr, __objects) \
++ for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
++ __p += (__s)->size)
++
++#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
++ for (__p = (__addr), __idx = 1; __idx <= __objects;\
++ __p += (__s)->size, __idx++)
++
++/* Determine object index from a given position */
++static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
++{
++ return (p - addr) / s->size;
++}
++
++static inline size_t slab_ksize(const struct kmem_cache *s)
++{
++#ifdef CONFIG_SLUB_DEBUG
++ /*
++ * Debugging requires use of the padding between object
++ * and whatever may come after it.
++ */
++ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
++ return s->object_size;
++
++#endif
++ /*
++ * If we have the need to store the freelist pointer
++ * back there or track user information then we can
++ * only use the space before that information.
++ */
++ if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
++ return s->inuse;
++ /*
++ * Else we can use all the padding etc for the allocation
++ */
++ return s->size;
++}
++
++static inline int order_objects(int order, unsigned long size, int reserved)
++{
++ return ((PAGE_SIZE << order) - reserved) / size;
++}
++
++static inline struct kmem_cache_order_objects oo_make(int order,
++ unsigned long size, int reserved)
++{
++ struct kmem_cache_order_objects x = {
++ (order << OO_SHIFT) + order_objects(order, size, reserved)
++ };
++
++ return x;
++}
++
++static inline int oo_order(struct kmem_cache_order_objects x)
++{
++ return x.x >> OO_SHIFT;
++}
++
++static inline int oo_objects(struct kmem_cache_order_objects x)
++{
++ return x.x & OO_MASK;
++}
++
++/*
++ * Per slab locking using the pagelock
++ */
++static __always_inline void slab_lock(struct page *page)
++{
++ bit_spin_lock(PG_locked, &page->flags);
++}
++
++static __always_inline void slab_unlock(struct page *page)
++{
++ __bit_spin_unlock(PG_locked, &page->flags);
++}
++
++static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
++{
++ struct page tmp;
++ tmp.counters = counters_new;
++ /*
++ * page->counters can cover frozen/inuse/objects as well
++ * as page->_count. If we assign to ->counters directly
++ * we run the risk of losing updates to page->_count, so
++ * be careful and only assign to the fields we need.
++ */
++ page->frozen = tmp.frozen;
++ page->inuse = tmp.inuse;
++ page->objects = tmp.objects;
++}
++
++/* Interrupts must be disabled (for the fallback code to work right) */
++static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
++ void *freelist_old, unsigned long counters_old,
++ void *freelist_new, unsigned long counters_new,
++ const char *n)
++{
++ VM_BUG_ON(!irqs_disabled());
++#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
++ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
++ if (s->flags & __CMPXCHG_DOUBLE) {
++ if (cmpxchg_double(&page->freelist, &page->counters,
++ freelist_old, counters_old,
++ freelist_new, counters_new))
++ return true;
++ } else
++#endif
++ {
++ slab_lock(page);
++ if (page->freelist == freelist_old &&
++ page->counters == counters_old) {
++ page->freelist = freelist_new;
++ set_page_slub_counters(page, counters_new);
++ slab_unlock(page);
++ return true;
++ }
++ slab_unlock(page);
++ }
++
++ cpu_relax();
++ stat(s, CMPXCHG_DOUBLE_FAIL);
++
++#ifdef SLUB_DEBUG_CMPXCHG
++ pr_info("%s %s: cmpxchg double redo ", n, s->name);
++#endif
++
++ return false;
++}
++
++static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
++ void *freelist_old, unsigned long counters_old,
++ void *freelist_new, unsigned long counters_new,
++ const char *n)
++{
++#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
++ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
++ if (s->flags & __CMPXCHG_DOUBLE) {
++ if (cmpxchg_double(&page->freelist, &page->counters,
++ freelist_old, counters_old,
++ freelist_new, counters_new))
++ return true;
++ } else
++#endif
++ {
++ unsigned long flags;
++
++ local_irq_save(flags);
++ slab_lock(page);
++ if (page->freelist == freelist_old &&
++ page->counters == counters_old) {
++ page->freelist = freelist_new;
++ set_page_slub_counters(page, counters_new);
++ slab_unlock(page);
++ local_irq_restore(flags);
++ return true;
++ }
++ slab_unlock(page);
++ local_irq_restore(flags);
++ }
++
++ cpu_relax();
++ stat(s, CMPXCHG_DOUBLE_FAIL);
++
++#ifdef SLUB_DEBUG_CMPXCHG
++ pr_info("%s %s: cmpxchg double redo ", n, s->name);
++#endif
++
++ return false;
++}
++
++#ifdef CONFIG_SLUB_DEBUG
++/*
++ * Determine a map of object in use on a page.
++ *
++ * Node listlock must be held to guarantee that the page does
++ * not vanish from under us.
++ */
++static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
++{
++ void *p;
++ void *addr = page_address(page);
++
++ for (p = page->freelist; p; p = get_freepointer(s, p))
++ set_bit(slab_index(p, s, addr), map);
++}
++
++/*
++ * Debug settings:
++ */
++#ifdef CONFIG_SLUB_DEBUG_ON
++static int slub_debug = DEBUG_DEFAULT_FLAGS;
++#else
++static int slub_debug;
++#endif
++
++static char *slub_debug_slabs;
++static int disable_higher_order_debug;
++
++/*
++ * slub is about to manipulate internal object metadata. This memory lies
++ * outside the range of the allocated object, so accessing it would normally
++ * be reported by kasan as a bounds error. metadata_access_enable() is used
++ * to tell kasan that these accesses are OK.
++ */
++static inline void metadata_access_enable(void)
++{
++ kasan_disable_current();
++}
++
++static inline void metadata_access_disable(void)
++{
++ kasan_enable_current();
++}
++
++/*
++ * Object debugging
++ */
++static void print_section(char *text, u8 *addr, unsigned int length)
++{
++ metadata_access_enable();
++ print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
++ length, 1);
++ metadata_access_disable();
++}
++
++static struct track *get_track(struct kmem_cache *s, void *object,
++ enum track_item alloc)
++{
++ struct track *p;
++
++ if (s->offset)
++ p = object + s->offset + sizeof(void *);
++ else
++ p = object + s->inuse;
++
++ return p + alloc;
++}
++
++static void set_track(struct kmem_cache *s, void *object,
++ enum track_item alloc, unsigned long addr)
++{
++ struct track *p = get_track(s, object, alloc);
++
++ if (addr) {
++#ifdef CONFIG_STACKTRACE
++ struct stack_trace trace;
++ int i;
++
++ trace.nr_entries = 0;
++ trace.max_entries = TRACK_ADDRS_COUNT;
++ trace.entries = p->addrs;
++ trace.skip = 3;
++ metadata_access_enable();
++ save_stack_trace(&trace);
++ metadata_access_disable();
++
++ /* See rant in lockdep.c */
++ if (trace.nr_entries != 0 &&
++ trace.entries[trace.nr_entries - 1] == ULONG_MAX)
++ trace.nr_entries--;
++
++ for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
++ p->addrs[i] = 0;
++#endif
++ p->addr = addr;
++ p->cpu = smp_processor_id();
++ p->pid = current->pid;
++ p->when = jiffies;
++ } else
++ memset(p, 0, sizeof(struct track));
++}
++
++static void init_tracking(struct kmem_cache *s, void *object)
++{
++ if (!(s->flags & SLAB_STORE_USER))
++ return;
++
++ set_track(s, object, TRACK_FREE, 0UL);
++ set_track(s, object, TRACK_ALLOC, 0UL);
++}
++
++static void print_track(const char *s, struct track *t)
++{
++ if (!t->addr)
++ return;
++
++ pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
++ s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
++#ifdef CONFIG_STACKTRACE
++ {
++ int i;
++ for (i = 0; i < TRACK_ADDRS_COUNT; i++)
++ if (t->addrs[i])
++ pr_err("\t%pS\n", (void *)t->addrs[i]);
++ else
++ break;
++ }
++#endif
++}
++
++static void print_tracking(struct kmem_cache *s, void *object)
++{
++ if (!(s->flags & SLAB_STORE_USER))
++ return;
++
++ print_track("Allocated", get_track(s, object, TRACK_ALLOC));
++ print_track("Freed", get_track(s, object, TRACK_FREE));
++}
++
++static void print_page_info(struct page *page)
++{
++ pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
++ page, page->objects, page->inuse, page->freelist, page->flags);
++
++}
++
++static void slab_bug(struct kmem_cache *s, char *fmt, ...)
++{
++ struct va_format vaf;
++ va_list args;
++
++ va_start(args, fmt);
++ vaf.fmt = fmt;
++ vaf.va = &args;
++ pr_err("=============================================================================\n");
++ pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
++ pr_err("-----------------------------------------------------------------------------\n\n");
++
++ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
++ va_end(args);
++}
++
++static void slab_fix(struct kmem_cache *s, char *fmt, ...)
++{
++ struct va_format vaf;
++ va_list args;
++
++ va_start(args, fmt);
++ vaf.fmt = fmt;
++ vaf.va = &args;
++ pr_err("FIX %s: %pV\n", s->name, &vaf);
++ va_end(args);
++}
++
++static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
++{
++ unsigned int off; /* Offset of last byte */
++ u8 *addr = page_address(page);
++
++ print_tracking(s, p);
++
++ print_page_info(page);
++
++ pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
++ p, p - addr, get_freepointer(s, p));
++
++ if (p > addr + 16)
++ print_section("Bytes b4 ", p - 16, 16);
++
++ print_section("Object ", p, min_t(unsigned long, s->object_size,
++ PAGE_SIZE));
++ if (s->flags & SLAB_RED_ZONE)
++ print_section("Redzone ", p + s->object_size,
++ s->inuse - s->object_size);
++
++ if (s->offset)
++ off = s->offset + sizeof(void *);
++ else
++ off = s->inuse;
++
++ if (s->flags & SLAB_STORE_USER)
++ off += 2 * sizeof(struct track);
++
++ if (off != s->size)
++ /* Beginning of the filler is the free pointer */
++ print_section("Padding ", p + off, s->size - off);
++
++ dump_stack();
++}
++
++void object_err(struct kmem_cache *s, struct page *page,
++ u8 *object, char *reason)
++{
++ slab_bug(s, "%s", reason);
++ print_trailer(s, page, object);
++}
++
++static void slab_err(struct kmem_cache *s, struct page *page,
++ const char *fmt, ...)
++{
++ va_list args;
++ char buf[100];
++
++ va_start(args, fmt);
++ vsnprintf(buf, sizeof(buf), fmt, args);
++ va_end(args);
++ slab_bug(s, "%s", buf);
++ print_page_info(page);
++ dump_stack();
++}
++
++static void init_object(struct kmem_cache *s, void *object, u8 val)
++{
++ u8 *p = object;
++
++ if (s->flags & __OBJECT_POISON) {
++ memset(p, POISON_FREE, s->object_size - 1);
++ p[s->object_size - 1] = POISON_END;
++ }
++
++ if (s->flags & SLAB_RED_ZONE)
++ memset(p + s->object_size, val, s->inuse - s->object_size);
++}
++
++static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
++ void *from, void *to)
++{
++ slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
++ memset(from, data, to - from);
++}
++
++static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
++ u8 *object, char *what,
++ u8 *start, unsigned int value, unsigned int bytes)
++{
++ u8 *fault;
++ u8 *end;
++
++ metadata_access_enable();
++ fault = memchr_inv(start, value, bytes);
++ metadata_access_disable();
++ if (!fault)
++ return 1;
++
++ end = start + bytes;
++ while (end > fault && end[-1] == value)
++ end--;
++
++ slab_bug(s, "%s overwritten", what);
++ pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
++ fault, end - 1, fault[0], value);
++ print_trailer(s, page, object);
++
++ restore_bytes(s, what, value, fault, end);
++ return 0;
++}
++
++/*
++ * Object layout:
++ *
++ * object address
++ * Bytes of the object to be managed.
++ * If the freepointer may overlay the object then the free
++ * pointer is the first word of the object.
++ *
++ * Poisoning uses 0x6b (POISON_FREE) and the last byte is
++ * 0xa5 (POISON_END)
++ *
++ * object + s->object_size
++ * Padding to reach word boundary. This is also used for Redzoning.
++ * Padding is extended by another word if Redzoning is enabled and
++ * object_size == inuse.
++ *
++ * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
++ * 0xcc (RED_ACTIVE) for objects in use.
++ *
++ * object + s->inuse
++ * Meta data starts here.
++ *
++ * A. Free pointer (if we cannot overwrite object on free)
++ * B. Tracking data for SLAB_STORE_USER
++ * C. Padding to reach required alignment boundary or at mininum
++ * one word if debugging is on to be able to detect writes
++ * before the word boundary.
++ *
++ * Padding is done using 0x5a (POISON_INUSE)
++ *
++ * object + s->size
++ * Nothing is used beyond s->size.
++ *
++ * If slabcaches are merged then the object_size and inuse boundaries are mostly
++ * ignored. And therefore no slab options that rely on these boundaries
++ * may be used with merged slabcaches.
++ */
++
++static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
++{
++ unsigned long off = s->inuse; /* The end of info */
++
++ if (s->offset)
++ /* Freepointer is placed after the object. */
++ off += sizeof(void *);
++
++ if (s->flags & SLAB_STORE_USER)
++ /* We also have user information there */
++ off += 2 * sizeof(struct track);
++
++ if (s->size == off)
++ return 1;
++
++ return check_bytes_and_report(s, page, p, "Object padding",
++ p + off, POISON_INUSE, s->size - off);
++}
++
++/* Check the pad bytes at the end of a slab page */
++static int slab_pad_check(struct kmem_cache *s, struct page *page)
++{
++ u8 *start;
++ u8 *fault;
++ u8 *end;
++ int length;
++ int remainder;
++
++ if (!(s->flags & SLAB_POISON))
++ return 1;
++
++ start = page_address(page);
++ length = (PAGE_SIZE << compound_order(page)) - s->reserved;
++ end = start + length;
++ remainder = length % s->size;
++ if (!remainder)
++ return 1;
++
++ metadata_access_enable();
++ fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
++ metadata_access_disable();
++ if (!fault)
++ return 1;
++ while (end > fault && end[-1] == POISON_INUSE)
++ end--;
++
++ slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
++ print_section("Padding ", end - remainder, remainder);
++
++ restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
++ return 0;
++}
++
++static int check_object(struct kmem_cache *s, struct page *page,
++ void *object, u8 val)
++{
++ u8 *p = object;
++ u8 *endobject = object + s->object_size;
++
++ if (s->flags & SLAB_RED_ZONE) {
++ if (!check_bytes_and_report(s, page, object, "Redzone",
++ endobject, val, s->inuse - s->object_size))
++ return 0;
++ } else {
++ if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
++ check_bytes_and_report(s, page, p, "Alignment padding",
++ endobject, POISON_INUSE,
++ s->inuse - s->object_size);
++ }
++ }
++
++ if (s->flags & SLAB_POISON) {
++ if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
++ (!check_bytes_and_report(s, page, p, "Poison", p,
++ POISON_FREE, s->object_size - 1) ||
++ !check_bytes_and_report(s, page, p, "Poison",
++ p + s->object_size - 1, POISON_END, 1)))
++ return 0;
++ /*
++ * check_pad_bytes cleans up on its own.
++ */
++ check_pad_bytes(s, page, p);
++ }
++
++ if (!s->offset && val == SLUB_RED_ACTIVE)
++ /*
++ * Object and freepointer overlap. Cannot check
++ * freepointer while object is allocated.
++ */
++ return 1;
++
++ /* Check free pointer validity */
++ if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
++ object_err(s, page, p, "Freepointer corrupt");
++ /*
++ * No choice but to zap it and thus lose the remainder
++ * of the free objects in this slab. May cause
++ * another error because the object count is now wrong.
++ */
++ set_freepointer(s, p, NULL);
++ return 0;
++ }
++ return 1;
++}
++
++static int check_slab(struct kmem_cache *s, struct page *page)
++{
++ int maxobj;
++
++ VM_BUG_ON(!irqs_disabled());
++
++ if (!PageSlab(page)) {
++ slab_err(s, page, "Not a valid slab page");
++ return 0;
++ }
++
++ maxobj = order_objects(compound_order(page), s->size, s->reserved);
++ if (page->objects > maxobj) {
++ slab_err(s, page, "objects %u > max %u",
++ page->objects, maxobj);
++ return 0;
++ }
++ if (page->inuse > page->objects) {
++ slab_err(s, page, "inuse %u > max %u",
++ page->inuse, page->objects);
++ return 0;
++ }
++ /* Slab_pad_check fixes things up after itself */
++ slab_pad_check(s, page);
++ return 1;
++}
++
++/*
++ * Determine if a certain object on a page is on the freelist. Must hold the
++ * slab lock to guarantee that the chains are in a consistent state.
++ */
++static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
++{
++ int nr = 0;
++ void *fp;
++ void *object = NULL;
++ int max_objects;
++
++ fp = page->freelist;
++ while (fp && nr <= page->objects) {
++ if (fp == search)
++ return 1;
++ if (!check_valid_pointer(s, page, fp)) {
++ if (object) {
++ object_err(s, page, object,
++ "Freechain corrupt");
++ set_freepointer(s, object, NULL);
++ } else {
++ slab_err(s, page, "Freepointer corrupt");
++ page->freelist = NULL;
++ page->inuse = page->objects;
++ slab_fix(s, "Freelist cleared");
++ return 0;
++ }
++ break;
++ }
++ object = fp;
++ fp = get_freepointer(s, object);
++ nr++;
++ }
++
++ max_objects = order_objects(compound_order(page), s->size, s->reserved);
++ if (max_objects > MAX_OBJS_PER_PAGE)
++ max_objects = MAX_OBJS_PER_PAGE;
++
++ if (page->objects != max_objects) {
++ slab_err(s, page, "Wrong number of objects. Found %d but "
++ "should be %d", page->objects, max_objects);
++ page->objects = max_objects;
++ slab_fix(s, "Number of objects adjusted.");
++ }
++ if (page->inuse != page->objects - nr) {
++ slab_err(s, page, "Wrong object count. Counter is %d but "
++ "counted were %d", page->inuse, page->objects - nr);
++ page->inuse = page->objects - nr;
++ slab_fix(s, "Object count adjusted.");
++ }
++ return search == NULL;
++}
++
++static void trace(struct kmem_cache *s, struct page *page, void *object,
++ int alloc)
++{
++ if (s->flags & SLAB_TRACE) {
++ pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
++ s->name,
++ alloc ? "alloc" : "free",
++ object, page->inuse,
++ page->freelist);
++
++ if (!alloc)
++ print_section("Object ", (void *)object,
++ s->object_size);
++
++ dump_stack();
++ }
++}
++
++/*
++ * Tracking of fully allocated slabs for debugging purposes.
++ */
++static void add_full(struct kmem_cache *s,
++ struct kmem_cache_node *n, struct page *page)
++{
++ if (!(s->flags & SLAB_STORE_USER))
++ return;
++
++ lockdep_assert_held(&n->list_lock);
++ list_add(&page->lru, &n->full);
++}
++
++static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
++{
++ if (!(s->flags & SLAB_STORE_USER))
++ return;
++
++ lockdep_assert_held(&n->list_lock);
++ list_del(&page->lru);
++}
++
++/* Tracking of the number of slabs for debugging purposes */
++static inline unsigned long slabs_node(struct kmem_cache *s, int node)
++{
++ struct kmem_cache_node *n = get_node(s, node);
++
++ return atomic_long_read(&n->nr_slabs);
++}
++
++static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
++{
++ return atomic_long_read(&n->nr_slabs);
++}
++
++static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
++{
++ struct kmem_cache_node *n = get_node(s, node);
++
++ /*
++ * May be called early in order to allocate a slab for the
++ * kmem_cache_node structure. Solve the chicken-egg
++ * dilemma by deferring the increment of the count during
++ * bootstrap (see early_kmem_cache_node_alloc).
++ */
++ if (likely(n)) {
++ atomic_long_inc(&n->nr_slabs);
++ atomic_long_add(objects, &n->total_objects);
++ }
++}
++static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
++{
++ struct kmem_cache_node *n = get_node(s, node);
++
++ atomic_long_dec(&n->nr_slabs);
++ atomic_long_sub(objects, &n->total_objects);
++}
++
++/* Object debug checks for alloc/free paths */
++static void setup_object_debug(struct kmem_cache *s, struct page *page,
++ void *object)
++{
++ if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
++ return;
++
++ init_object(s, object, SLUB_RED_INACTIVE);
++ init_tracking(s, object);
++}
++
++static noinline int alloc_debug_processing(struct kmem_cache *s,
++ struct page *page,
++ void *object, unsigned long addr)
++{
++ if (!check_slab(s, page))
++ goto bad;
++
++ if (!check_valid_pointer(s, page, object)) {
++ object_err(s, page, object, "Freelist Pointer check fails");
++ goto bad;
++ }
++
++ if (!check_object(s, page, object, SLUB_RED_INACTIVE))
++ goto bad;
++
++ /* Success perform special debug activities for allocs */
++ if (s->flags & SLAB_STORE_USER)
++ set_track(s, object, TRACK_ALLOC, addr);
++ trace(s, page, object, 1);
++ init_object(s, object, SLUB_RED_ACTIVE);
++ return 1;
++
++bad:
++ if (PageSlab(page)) {
++ /*
++ * If this is a slab page then lets do the best we can
++ * to avoid issues in the future. Marking all objects
++ * as used avoids touching the remaining objects.
++ */
++ slab_fix(s, "Marking all objects used");
++ page->inuse = page->objects;
++ page->freelist = NULL;
++ }
++ return 0;
++}
++
++static noinline struct kmem_cache_node *free_debug_processing(
++ struct kmem_cache *s, struct page *page, void *object,
++ unsigned long addr, unsigned long *flags)
++{
++ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
++
++ spin_lock_irqsave(&n->list_lock, *flags);
++ slab_lock(page);
++
++ if (!check_slab(s, page))
++ goto fail;
++
++ if (!check_valid_pointer(s, page, object)) {
++ slab_err(s, page, "Invalid object pointer 0x%p", object);
++ goto fail;
++ }
++
++ if (on_freelist(s, page, object)) {
++ object_err(s, page, object, "Object already free");
++ goto fail;
++ }
++
++ if (!check_object(s, page, object, SLUB_RED_ACTIVE))
++ goto out;
++
++ if (unlikely(s != page->slab_cache)) {
++ if (!PageSlab(page)) {
++ slab_err(s, page, "Attempt to free object(0x%p) "
++ "outside of slab", object);
++ } else if (!page->slab_cache) {
++ pr_err("SLUB <none>: no slab for object 0x%p.\n",
++ object);
++ dump_stack();
++ } else
++ object_err(s, page, object,
++ "page slab pointer corrupt.");
++ goto fail;
++ }
++
++ if (s->flags & SLAB_STORE_USER)
++ set_track(s, object, TRACK_FREE, addr);
++ trace(s, page, object, 0);
++ init_object(s, object, SLUB_RED_INACTIVE);
++out:
++ slab_unlock(page);
++ /*
++ * Keep node_lock to preserve integrity
++ * until the object is actually freed
++ */
++ return n;
++
++fail:
++ slab_unlock(page);
++ spin_unlock_irqrestore(&n->list_lock, *flags);
++ slab_fix(s, "Object at 0x%p not freed", object);
++ return NULL;
++}
++
++static int __init setup_slub_debug(char *str)
++{
++ slub_debug = DEBUG_DEFAULT_FLAGS;
++ if (*str++ != '=' || !*str)
++ /*
++ * No options specified. Switch on full debugging.
++ */
++ goto out;
++
++ if (*str == ',')
++ /*
++ * No options but restriction on slabs. This means full
++ * debugging for slabs matching a pattern.
++ */
++ goto check_slabs;
++
++ slub_debug = 0;
++ if (*str == '-')
++ /*
++ * Switch off all debugging measures.
++ */
++ goto out;
++
++ /*
++ * Determine which debug features should be switched on
++ */
++ for (; *str && *str != ','; str++) {
++ switch (tolower(*str)) {
++ case 'f':
++ slub_debug |= SLAB_DEBUG_FREE;
++ break;
++ case 'z':
++ slub_debug |= SLAB_RED_ZONE;
++ break;
++ case 'p':
++ slub_debug |= SLAB_POISON;
++ break;
++ case 'u':
++ slub_debug |= SLAB_STORE_USER;
++ break;
++ case 't':
++ slub_debug |= SLAB_TRACE;
++ break;
++ case 'a':
++ slub_debug |= SLAB_FAILSLAB;
++ break;
++ case 'o':
++ /*
++ * Avoid enabling debugging on caches if its minimum
++ * order would increase as a result.
++ */
++ disable_higher_order_debug = 1;
++ break;
++ default:
++ pr_err("slub_debug option '%c' unknown. skipped\n",
++ *str);
++ }
++ }
++
++check_slabs:
++ if (*str == ',')
++ slub_debug_slabs = str + 1;
++out:
++ return 1;
++}
++
++__setup("slub_debug", setup_slub_debug);
++
++unsigned long kmem_cache_flags(unsigned long object_size,
++ unsigned long flags, const char *name,
++ void (*ctor)(void *))
++{
++ /*
++ * Enable debugging if selected on the kernel commandline.
++ */
++ if (slub_debug && (!slub_debug_slabs || (name &&
++ !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
++ flags |= slub_debug;
++
++ return flags;
++}
++#else
++static inline void setup_object_debug(struct kmem_cache *s,
++ struct page *page, void *object) {}
++
++static inline int alloc_debug_processing(struct kmem_cache *s,
++ struct page *page, void *object, unsigned long addr) { return 0; }
++
++static inline struct kmem_cache_node *free_debug_processing(
++ struct kmem_cache *s, struct page *page, void *object,
++ unsigned long addr, unsigned long *flags) { return NULL; }
++
++static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
++ { return 1; }
++static inline int check_object(struct kmem_cache *s, struct page *page,
++ void *object, u8 val) { return 1; }
++static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
++ struct page *page) {}
++static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
++ struct page *page) {}
++unsigned long kmem_cache_flags(unsigned long object_size,
++ unsigned long flags, const char *name,
++ void (*ctor)(void *))
++{
++ return flags;
++}
++#define slub_debug 0
++
++#define disable_higher_order_debug 0
++
++static inline unsigned long slabs_node(struct kmem_cache *s, int node)
++ { return 0; }
++static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
++ { return 0; }
++static inline void inc_slabs_node(struct kmem_cache *s, int node,
++ int objects) {}
++static inline void dec_slabs_node(struct kmem_cache *s, int node,
++ int objects) {}
++
++#endif /* CONFIG_SLUB_DEBUG */
++
++/*
++ * Hooks for other subsystems that check memory allocations. In a typical
++ * production configuration these hooks all should produce no code at all.
++ */
++static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
++{
++ kmemleak_alloc(ptr, size, 1, flags);
++ kasan_kmalloc_large(ptr, size);
++}
++
++static inline void kfree_hook(const void *x)
++{
++ kmemleak_free(x);
++ kasan_kfree_large(x);
++}
++
++static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
++ gfp_t flags)
++{
++ flags &= gfp_allowed_mask;
++ lockdep_trace_alloc(flags);
++ might_sleep_if(flags & __GFP_WAIT);
++
++ if (should_failslab(s->object_size, flags, s->flags))
++ return NULL;
++
++ return memcg_kmem_get_cache(s, flags);
++}
++
++static inline void slab_post_alloc_hook(struct kmem_cache *s,
++ gfp_t flags, void *object)
++{
++ flags &= gfp_allowed_mask;
++ kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
++ kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
++ memcg_kmem_put_cache(s);
++ kasan_slab_alloc(s, object);
++}
++
++static inline void slab_free_hook(struct kmem_cache *s, void *x)
++{
++ kmemleak_free_recursive(x, s->flags);
++
++ /*
++ * Trouble is that we may no longer disable interrupts in the fast path
++ * So in order to make the debug calls that expect irqs to be
++ * disabled we need to disable interrupts temporarily.
++ */
++#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
++ {
++ unsigned long flags;
++
++ local_irq_save(flags);
++ kmemcheck_slab_free(s, x, s->object_size);
++ debug_check_no_locks_freed(x, s->object_size);
++ local_irq_restore(flags);
++ }
++#endif
++ if (!(s->flags & SLAB_DEBUG_OBJECTS))
++ debug_check_no_obj_freed(x, s->object_size);
++
++ kasan_slab_free(s, x);
++}
++
++/*
++ * Slab allocation and freeing
++ */
++static inline struct page *alloc_slab_page(struct kmem_cache *s,
++ gfp_t flags, int node, struct kmem_cache_order_objects oo)
++{
++ struct page *page;
++ int order = oo_order(oo);
++
++ flags |= __GFP_NOTRACK;
++
++ if (memcg_charge_slab(s, flags, order))
++ return NULL;
++
++ if (node == NUMA_NO_NODE)
++ page = alloc_pages(flags, order);
++ else
++ page = alloc_pages_exact_node(node, flags, order);
++
++ if (!page)
++ memcg_uncharge_slab(s, order);
++
++ return page;
++}
++
++static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
++{
++ struct page *page;
++ struct kmem_cache_order_objects oo = s->oo;
++ gfp_t alloc_gfp;
++
++ flags &= gfp_allowed_mask;
++
++ if (flags & __GFP_WAIT)
++ local_irq_enable();
++
++ flags |= s->allocflags;
++
++ /*
++ * Let the initial higher-order allocation fail under memory pressure
++ * so we fall-back to the minimum order allocation.
++ */
++ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
++
++ page = alloc_slab_page(s, alloc_gfp, node, oo);
++ if (unlikely(!page)) {
++ oo = s->min;
++ alloc_gfp = flags;
++ /*
++ * Allocation may have failed due to fragmentation.
++ * Try a lower order alloc if possible
++ */
++ page = alloc_slab_page(s, alloc_gfp, node, oo);
++
++ if (page)
++ stat(s, ORDER_FALLBACK);
++ }
++
++ if (kmemcheck_enabled && page
++ && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
++ int pages = 1 << oo_order(oo);
++
++ kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
++
++ /*
++ * Objects from caches that have a constructor don't get
++ * cleared when they're allocated, so we need to do it here.
++ */
++ if (s->ctor)
++ kmemcheck_mark_uninitialized_pages(page, pages);
++ else
++ kmemcheck_mark_unallocated_pages(page, pages);
++ }
++
++ if (flags & __GFP_WAIT)
++ local_irq_disable();
++ if (!page)
++ return NULL;
++
++ page->objects = oo_objects(oo);
++ mod_zone_page_state(page_zone(page),
++ (s->flags & SLAB_RECLAIM_ACCOUNT) ?
++ NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
++ 1 << oo_order(oo));
++
++ return page;
++}
++
++static void setup_object(struct kmem_cache *s, struct page *page,
++ void *object)
++{
++ setup_object_debug(s, page, object);
++ if (unlikely(s->ctor)) {
++ kasan_unpoison_object_data(s, object);
++ s->ctor(object);
++ kasan_poison_object_data(s, object);
++ }
++}
++
++static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
++{
++ struct page *page;
++ void *start;
++ void *p;
++ int order;
++ int idx;
++
++ if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
++ pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
++ BUG();
++ }
++
++ page = allocate_slab(s,
++ flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
++ if (!page)
++ goto out;
++
++ order = compound_order(page);
++ inc_slabs_node(s, page_to_nid(page), page->objects);
++ page->slab_cache = s;
++ __SetPageSlab(page);
++ if (page_is_pfmemalloc(page))
++ SetPageSlabPfmemalloc(page);
++
++ start = page_address(page);
++
++ if (unlikely(s->flags & SLAB_POISON))
++ memset(start, POISON_INUSE, PAGE_SIZE << order);
++
++ kasan_poison_slab(page);
++
++ for_each_object_idx(p, idx, s, start, page->objects) {
++ setup_object(s, page, p);
++ if (likely(idx < page->objects))
++ set_freepointer(s, p, p + s->size);
++ else
++ set_freepointer(s, p, NULL);
++ }
++
++ page->freelist = start;
++ page->inuse = page->objects;
++ page->frozen = 1;
++out:
++ return page;
++}
++
++static void __free_slab(struct kmem_cache *s, struct page *page)
++{
++ int order = compound_order(page);
++ int pages = 1 << order;
++
++ if (kmem_cache_debug(s)) {
++ void *p;
++
++ slab_pad_check(s, page);
++ for_each_object(p, s, page_address(page),
++ page->objects)
++ check_object(s, page, p, SLUB_RED_INACTIVE);
++ }
++
++ kmemcheck_free_shadow(page, compound_order(page));
++
++ mod_zone_page_state(page_zone(page),
++ (s->flags & SLAB_RECLAIM_ACCOUNT) ?
++ NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
++ -pages);
++
++ __ClearPageSlabPfmemalloc(page);
++ __ClearPageSlab(page);
++
++ page_mapcount_reset(page);
++ if (current->reclaim_state)
++ current->reclaim_state->reclaimed_slab += pages;
++ __free_pages(page, order);
++ memcg_uncharge_slab(s, order);
++}
++
++#define need_reserve_slab_rcu \
++ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
++
++static void rcu_free_slab(struct rcu_head *h)
++{
++ struct page *page;
++
++ if (need_reserve_slab_rcu)
++ page = virt_to_head_page(h);
++ else
++ page = container_of((struct list_head *)h, struct page, lru);
++
++ __free_slab(page->slab_cache, page);
++}
++
++static void free_slab(struct kmem_cache *s, struct page *page)
++{
++ if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
++ struct rcu_head *head;
++
++ if (need_reserve_slab_rcu) {
++ int order = compound_order(page);
++ int offset = (PAGE_SIZE << order) - s->reserved;
++
++ VM_BUG_ON(s->reserved != sizeof(*head));
++ head = page_address(page) + offset;
++ } else {
++ /*
++ * RCU free overloads the RCU head over the LRU
++ */
++ head = (void *)&page->lru;
++ }
++
++ call_rcu(head, rcu_free_slab);
++ } else
++ __free_slab(s, page);
++}
++
++static void discard_slab(struct kmem_cache *s, struct page *page)
++{
++ dec_slabs_node(s, page_to_nid(page), page->objects);
++ free_slab(s, page);
++}
++
++/*
++ * Management of partially allocated slabs.
++ */
++static inline void
++__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
++{
++ n->nr_partial++;
++ if (tail == DEACTIVATE_TO_TAIL)
++ list_add_tail(&page->lru, &n->partial);
++ else
++ list_add(&page->lru, &n->partial);
++}
++
++static inline void add_partial(struct kmem_cache_node *n,
++ struct page *page, int tail)
++{
++ lockdep_assert_held(&n->list_lock);
++ __add_partial(n, page, tail);
++}
++
++static inline void
++__remove_partial(struct kmem_cache_node *n, struct page *page)
++{
++ list_del(&page->lru);
++ n->nr_partial--;
++}
++
++static inline void remove_partial(struct kmem_cache_node *n,
++ struct page *page)
++{
++ lockdep_assert_held(&n->list_lock);
++ __remove_partial(n, page);
++}
++
++/*
++ * Remove slab from the partial list, freeze it and
++ * return the pointer to the freelist.
++ *
++ * Returns a list of objects or NULL if it fails.
++ */
++static inline void *acquire_slab(struct kmem_cache *s,
++ struct kmem_cache_node *n, struct page *page,
++ int mode, int *objects)
++{
++ void *freelist;
++ unsigned long counters;
++ struct page new;
++
++ lockdep_assert_held(&n->list_lock);
++
++ /*
++ * Zap the freelist and set the frozen bit.
++ * The old freelist is the list of objects for the
++ * per cpu allocation list.
++ */
++ freelist = page->freelist;
++ counters = page->counters;
++ new.counters = counters;
++ *objects = new.objects - new.inuse;
++ if (mode) {
++ new.inuse = page->objects;
++ new.freelist = NULL;
++ } else {
++ new.freelist = freelist;
++ }
++
++ VM_BUG_ON(new.frozen);
++ new.frozen = 1;
++
++ if (!__cmpxchg_double_slab(s, page,
++ freelist, counters,
++ new.freelist, new.counters,
++ "acquire_slab"))
++ return NULL;
++
++ remove_partial(n, page);
++ WARN_ON(!freelist);
++ return freelist;
++}
++
++static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
++static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
++
++/*
++ * Try to allocate a partial slab from a specific node.
++ */
++static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
++ struct kmem_cache_cpu *c, gfp_t flags)
++{
++ struct page *page, *page2;
++ void *object = NULL;
++ int available = 0;
++ int objects;
++
++ /*
++ * Racy check. If we mistakenly see no partial slabs then we
++ * just allocate an empty slab. If we mistakenly try to get a
++ * partial slab and there is none available then get_partials()
++ * will return NULL.
++ */
++ if (!n || !n->nr_partial)
++ return NULL;
++
++ spin_lock(&n->list_lock);
++ list_for_each_entry_safe(page, page2, &n->partial, lru) {
++ void *t;
++
++ if (!pfmemalloc_match(page, flags))
++ continue;
++
++ t = acquire_slab(s, n, page, object == NULL, &objects);
++ if (!t)
++ break;
++
++ available += objects;
++ if (!object) {
++ c->page = page;
++ stat(s, ALLOC_FROM_PARTIAL);
++ object = t;
++ } else {
++ put_cpu_partial(s, page, 0);
++ stat(s, CPU_PARTIAL_NODE);
++ }
++ if (!kmem_cache_has_cpu_partial(s)
++ || available > s->cpu_partial / 2)
++ break;
++
++ }
++ spin_unlock(&n->list_lock);
++ return object;
++}
++
++/*
++ * Get a page from somewhere. Search in increasing NUMA distances.
++ */
++static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
++ struct kmem_cache_cpu *c)
++{
++#ifdef CONFIG_NUMA
++ struct zonelist *zonelist;
++ struct zoneref *z;
++ struct zone *zone;
++ enum zone_type high_zoneidx = gfp_zone(flags);
++ void *object;
++ unsigned int cpuset_mems_cookie;
++
++ /*
++ * The defrag ratio allows a configuration of the tradeoffs between
++ * inter node defragmentation and node local allocations. A lower
++ * defrag_ratio increases the tendency to do local allocations
++ * instead of attempting to obtain partial slabs from other nodes.
++ *
++ * If the defrag_ratio is set to 0 then kmalloc() always
++ * returns node local objects. If the ratio is higher then kmalloc()
++ * may return off node objects because partial slabs are obtained
++ * from other nodes and filled up.
++ *
++ * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
++ * defrag_ratio = 1000) then every (well almost) allocation will
++ * first attempt to defrag slab caches on other nodes. This means
++ * scanning over all nodes to look for partial slabs which may be
++ * expensive if we do it every time we are trying to find a slab
++ * with available objects.
++ */
++ if (!s->remote_node_defrag_ratio ||
++ get_cycles() % 1024 > s->remote_node_defrag_ratio)
++ return NULL;
++
++ do {
++ cpuset_mems_cookie = read_mems_allowed_begin();
++ zonelist = node_zonelist(mempolicy_slab_node(), flags);
++ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
++ struct kmem_cache_node *n;
++
++ n = get_node(s, zone_to_nid(zone));
++
++ if (n && cpuset_zone_allowed(zone, flags) &&
++ n->nr_partial > s->min_partial) {
++ object = get_partial_node(s, n, c, flags);
++ if (object) {
++ /*
++ * Don't check read_mems_allowed_retry()
++ * here - if mems_allowed was updated in
++ * parallel, that was a harmless race
++ * between allocation and the cpuset
++ * update
++ */
++ return object;
++ }
++ }
++ }
++ } while (read_mems_allowed_retry(cpuset_mems_cookie));
++#endif
++ return NULL;
++}
++
++/*
++ * Get a partial page, lock it and return it.
++ */
++static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
++ struct kmem_cache_cpu *c)
++{
++ void *object;
++ int searchnode = node;
++
++ if (node == NUMA_NO_NODE)
++ searchnode = numa_mem_id();
++ else if (!node_present_pages(node))
++ searchnode = node_to_mem_node(node);
++
++ object = get_partial_node(s, get_node(s, searchnode), c, flags);
++ if (object || node != NUMA_NO_NODE)
++ return object;
++
++ return get_any_partial(s, flags, c);
++}
++
++#ifdef CONFIG_PREEMPT
++/*
++ * Calculate the next globally unique transaction for disambiguiation
++ * during cmpxchg. The transactions start with the cpu number and are then
++ * incremented by CONFIG_NR_CPUS.
++ */
++#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
++#else
++/*
++ * No preemption supported therefore also no need to check for
++ * different cpus.
++ */
++#define TID_STEP 1
++#endif
++
++static inline unsigned long next_tid(unsigned long tid)
++{
++ return tid + TID_STEP;
++}
++
++static inline unsigned int tid_to_cpu(unsigned long tid)
++{
++ return tid % TID_STEP;
++}
++
++static inline unsigned long tid_to_event(unsigned long tid)
++{
++ return tid / TID_STEP;
++}
++
++static inline unsigned int init_tid(int cpu)
++{
++ return cpu;
++}
++
++static inline void note_cmpxchg_failure(const char *n,
++ const struct kmem_cache *s, unsigned long tid)
++{
++#ifdef SLUB_DEBUG_CMPXCHG
++ unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
++
++ pr_info("%s %s: cmpxchg redo ", n, s->name);
++
++#ifdef CONFIG_PREEMPT
++ if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
++ pr_warn("due to cpu change %d -> %d\n",
++ tid_to_cpu(tid), tid_to_cpu(actual_tid));
++ else
++#endif
++ if (tid_to_event(tid) != tid_to_event(actual_tid))
++ pr_warn("due to cpu running other code. Event %ld->%ld\n",
++ tid_to_event(tid), tid_to_event(actual_tid));
++ else
++ pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
++ actual_tid, tid, next_tid(tid));
++#endif
++ stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
++}
++
++static void init_kmem_cache_cpus(struct kmem_cache *s)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu)
++ per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
++}
++
++/*
++ * Remove the cpu slab
++ */
++static void deactivate_slab(struct kmem_cache *s, struct page *page,
++ void *freelist)
++{
++ enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
++ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
++ int lock = 0;
++ enum slab_modes l = M_NONE, m = M_NONE;
++ void *nextfree;
++ int tail = DEACTIVATE_TO_HEAD;
++ struct page new;
++ struct page old;
++
++ if (page->freelist) {
++ stat(s, DEACTIVATE_REMOTE_FREES);
++ tail = DEACTIVATE_TO_TAIL;
++ }
++
++ /*
++ * Stage one: Free all available per cpu objects back
++ * to the page freelist while it is still frozen. Leave the
++ * last one.
++ *
++ * There is no need to take the list->lock because the page
++ * is still frozen.
++ */
++ while (freelist && (nextfree = get_freepointer(s, freelist))) {
++ void *prior;
++ unsigned long counters;
++
++ do {
++ prior = page->freelist;
++ counters = page->counters;
++ set_freepointer(s, freelist, prior);
++ new.counters = counters;
++ new.inuse--;
++ VM_BUG_ON(!new.frozen);
++
++ } while (!__cmpxchg_double_slab(s, page,
++ prior, counters,
++ freelist, new.counters,
++ "drain percpu freelist"));
++
++ freelist = nextfree;
++ }
++
++ /*
++ * Stage two: Ensure that the page is unfrozen while the
++ * list presence reflects the actual number of objects
++ * during unfreeze.
++ *
++ * We setup the list membership and then perform a cmpxchg
++ * with the count. If there is a mismatch then the page
++ * is not unfrozen but the page is on the wrong list.
++ *
++ * Then we restart the process which may have to remove
++ * the page from the list that we just put it on again
++ * because the number of objects in the slab may have
++ * changed.
++ */
++redo:
++
++ old.freelist = page->freelist;
++ old.counters = page->counters;
++ VM_BUG_ON(!old.frozen);
++
++ /* Determine target state of the slab */
++ new.counters = old.counters;
++ if (freelist) {
++ new.inuse--;
++ set_freepointer(s, freelist, old.freelist);
++ new.freelist = freelist;
++ } else
++ new.freelist = old.freelist;
++
++ new.frozen = 0;
++
++ if (!new.inuse && n->nr_partial >= s->min_partial)
++ m = M_FREE;
++ else if (new.freelist) {
++ m = M_PARTIAL;
++ if (!lock) {
++ lock = 1;
++ /*
++ * Taking the spinlock removes the possiblity
++ * that acquire_slab() will see a slab page that
++ * is frozen
++ */
++ spin_lock(&n->list_lock);
++ }
++ } else {
++ m = M_FULL;
++ if (kmem_cache_debug(s) && !lock) {
++ lock = 1;
++ /*
++ * This also ensures that the scanning of full
++ * slabs from diagnostic functions will not see
++ * any frozen slabs.
++ */
++ spin_lock(&n->list_lock);
++ }
++ }
++
++ if (l != m) {
++
++ if (l == M_PARTIAL)
++
++ remove_partial(n, page);
++
++ else if (l == M_FULL)
++
++ remove_full(s, n, page);
++
++ if (m == M_PARTIAL) {
++
++ add_partial(n, page, tail);
++ stat(s, tail);
++
++ } else if (m == M_FULL) {
++
++ stat(s, DEACTIVATE_FULL);
++ add_full(s, n, page);
++
++ }
++ }
++
++ l = m;
++ if (!__cmpxchg_double_slab(s, page,
++ old.freelist, old.counters,
++ new.freelist, new.counters,
++ "unfreezing slab"))
++ goto redo;
++
++ if (lock)
++ spin_unlock(&n->list_lock);
++
++ if (m == M_FREE) {
++ stat(s, DEACTIVATE_EMPTY);
++ discard_slab(s, page);
++ stat(s, FREE_SLAB);
++ }
++}
++
++/*
++ * Unfreeze all the cpu partial slabs.
++ *
++ * This function must be called with interrupts disabled
++ * for the cpu using c (or some other guarantee must be there
++ * to guarantee no concurrent accesses).
++ */
++static void unfreeze_partials(struct kmem_cache *s,
++ struct kmem_cache_cpu *c)
++{
++#ifdef CONFIG_SLUB_CPU_PARTIAL
++ struct kmem_cache_node *n = NULL, *n2 = NULL;
++ struct page *page, *discard_page = NULL;
++
++ while ((page = c->partial)) {
++ struct page new;
++ struct page old;
++
++ c->partial = page->next;
++
++ n2 = get_node(s, page_to_nid(page));
++ if (n != n2) {
++ if (n)
++ spin_unlock(&n->list_lock);
++
++ n = n2;
++ spin_lock(&n->list_lock);
++ }
++
++ do {
++
++ old.freelist = page->freelist;
++ old.counters = page->counters;
++ VM_BUG_ON(!old.frozen);
++
++ new.counters = old.counters;
++ new.freelist = old.freelist;
++
++ new.frozen = 0;
++
++ } while (!__cmpxchg_double_slab(s, page,
++ old.freelist, old.counters,
++ new.freelist, new.counters,
++ "unfreezing slab"));
++
++ if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
++ page->next = discard_page;
++ discard_page = page;
++ } else {
++ add_partial(n, page, DEACTIVATE_TO_TAIL);
++ stat(s, FREE_ADD_PARTIAL);
++ }
++ }
++
++ if (n)
++ spin_unlock(&n->list_lock);
++
++ while (discard_page) {
++ page = discard_page;
++ discard_page = discard_page->next;
++
++ stat(s, DEACTIVATE_EMPTY);
++ discard_slab(s, page);
++ stat(s, FREE_SLAB);
++ }
++#endif
++}
++
++/*
++ * Put a page that was just frozen (in __slab_free) into a partial page
++ * slot if available. This is done without interrupts disabled and without
++ * preemption disabled. The cmpxchg is racy and may put the partial page
++ * onto a random cpus partial slot.
++ *
++ * If we did not find a slot then simply move all the partials to the
++ * per node partial list.
++ */
++static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
++{
++#ifdef CONFIG_SLUB_CPU_PARTIAL
++ struct page *oldpage;
++ int pages;
++ int pobjects;
++
++ preempt_disable();
++ do {
++ pages = 0;
++ pobjects = 0;
++ oldpage = this_cpu_read(s->cpu_slab->partial);
++
++ if (oldpage) {
++ pobjects = oldpage->pobjects;
++ pages = oldpage->pages;
++ if (drain && pobjects > s->cpu_partial) {
++ unsigned long flags;
++ /*
++ * partial array is full. Move the existing
++ * set to the per node partial list.
++ */
++ local_irq_save(flags);
++ unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
++ local_irq_restore(flags);
++ oldpage = NULL;
++ pobjects = 0;
++ pages = 0;
++ stat(s, CPU_PARTIAL_DRAIN);
++ }
++ }
++
++ pages++;
++ pobjects += page->objects - page->inuse;
++
++ page->pages = pages;
++ page->pobjects = pobjects;
++ page->next = oldpage;
++
++ } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
++ != oldpage);
++ if (unlikely(!s->cpu_partial)) {
++ unsigned long flags;
++
++ local_irq_save(flags);
++ unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
++ local_irq_restore(flags);
++ }
++ preempt_enable();
++#endif
++}
++
++static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
++{
++ stat(s, CPUSLAB_FLUSH);
++ deactivate_slab(s, c->page, c->freelist);
++
++ c->tid = next_tid(c->tid);
++ c->page = NULL;
++ c->freelist = NULL;
++}
++
++/*
++ * Flush cpu slab.
++ *
++ * Called from IPI handler with interrupts disabled.
++ */
++static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
++{
++ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
++
++ if (likely(c)) {
++ if (c->page)
++ flush_slab(s, c);
++
++ unfreeze_partials(s, c);
++ }
++}
++
++static void flush_cpu_slab(void *d)
++{
++ struct kmem_cache *s = d;
++
++ __flush_cpu_slab(s, smp_processor_id());
++}
++
++static bool has_cpu_slab(int cpu, void *info)
++{
++ struct kmem_cache *s = info;
++ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
++
++ return c->page || c->partial;
++}
++
++static void flush_all(struct kmem_cache *s)
++{
++ on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
++}
++
++/*
++ * Check if the objects in a per cpu structure fit numa
++ * locality expectations.
++ */
++static inline int node_match(struct page *page, int node)
++{
++#ifdef CONFIG_NUMA
++ if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
++ return 0;
++#endif
++ return 1;
++}
++
++#ifdef CONFIG_SLUB_DEBUG
++static int count_free(struct page *page)
++{
++ return page->objects - page->inuse;
++}
++
++static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
++{
++ return atomic_long_read(&n->total_objects);
++}
++#endif /* CONFIG_SLUB_DEBUG */
++
++#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
++static unsigned long count_partial(struct kmem_cache_node *n,
++ int (*get_count)(struct page *))
++{
++ unsigned long flags;
++ unsigned long x = 0;
++ struct page *page;
++
++ spin_lock_irqsave(&n->list_lock, flags);
++ list_for_each_entry(page, &n->partial, lru)
++ x += get_count(page);
++ spin_unlock_irqrestore(&n->list_lock, flags);
++ return x;
++}
++#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
++
++static noinline void
++slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
++{
++#ifdef CONFIG_SLUB_DEBUG
++ static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
++ DEFAULT_RATELIMIT_BURST);
++ int node;
++ struct kmem_cache_node *n;
++
++ if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
++ return;
++
++ pr_warn("SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
++ nid, gfpflags);
++ pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
++ s->name, s->object_size, s->size, oo_order(s->oo),
++ oo_order(s->min));
++
++ if (oo_order(s->min) > get_order(s->object_size))
++ pr_warn(" %s debugging increased min order, use slub_debug=O to disable.\n",
++ s->name);
++
++ for_each_kmem_cache_node(s, node, n) {
++ unsigned long nr_slabs;
++ unsigned long nr_objs;
++ unsigned long nr_free;
++
++ nr_free = count_partial(n, count_free);
++ nr_slabs = node_nr_slabs(n);
++ nr_objs = node_nr_objs(n);
++
++ pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
++ node, nr_slabs, nr_objs, nr_free);
++ }
++#endif
++}
++
++static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
++ int node, struct kmem_cache_cpu **pc)
++{
++ void *freelist;
++ struct kmem_cache_cpu *c = *pc;
++ struct page *page;
++
++ freelist = get_partial(s, flags, node, c);
++
++ if (freelist)
++ return freelist;
++
++ page = new_slab(s, flags, node);
++ if (page) {
++ c = raw_cpu_ptr(s->cpu_slab);
++ if (c->page)
++ flush_slab(s, c);
++
++ /*
++ * No other reference to the page yet so we can
++ * muck around with it freely without cmpxchg
++ */
++ freelist = page->freelist;
++ page->freelist = NULL;
++
++ stat(s, ALLOC_SLAB);
++ c->page = page;
++ *pc = c;
++ } else
++ freelist = NULL;
++
++ return freelist;
++}
++
++static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
++{
++ if (unlikely(PageSlabPfmemalloc(page)))
++ return gfp_pfmemalloc_allowed(gfpflags);
++
++ return true;
++}
++
++/*
++ * Check the page->freelist of a page and either transfer the freelist to the
++ * per cpu freelist or deactivate the page.
++ *
++ * The page is still frozen if the return value is not NULL.
++ *
++ * If this function returns NULL then the page has been unfrozen.
++ *
++ * This function must be called with interrupt disabled.
++ */
++static inline void *get_freelist(struct kmem_cache *s, struct page *page)
++{
++ struct page new;
++ unsigned long counters;
++ void *freelist;
++
++ do {
++ freelist = page->freelist;
++ counters = page->counters;
++
++ new.counters = counters;
++ VM_BUG_ON(!new.frozen);
++
++ new.inuse = page->objects;
++ new.frozen = freelist != NULL;
++
++ } while (!__cmpxchg_double_slab(s, page,
++ freelist, counters,
++ NULL, new.counters,
++ "get_freelist"));
++
++ return freelist;
++}
++
++/*
++ * Slow path. The lockless freelist is empty or we need to perform
++ * debugging duties.
++ *
++ * Processing is still very fast if new objects have been freed to the
++ * regular freelist. In that case we simply take over the regular freelist
++ * as the lockless freelist and zap the regular freelist.
++ *
++ * If that is not working then we fall back to the partial lists. We take the
++ * first element of the freelist as the object to allocate now and move the
++ * rest of the freelist to the lockless freelist.
++ *
++ * And if we were unable to get a new slab from the partial slab lists then
++ * we need to allocate a new slab. This is the slowest path since it involves
++ * a call to the page allocator and the setup of a new slab.
++ */
++static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
++ unsigned long addr, struct kmem_cache_cpu *c)
++{
++ void *freelist;
++ struct page *page;
++ unsigned long flags;
++
++ local_irq_save(flags);
++#ifdef CONFIG_PREEMPT
++ /*
++ * We may have been preempted and rescheduled on a different
++ * cpu before disabling interrupts. Need to reload cpu area
++ * pointer.
++ */
++ c = this_cpu_ptr(s->cpu_slab);
++#endif
++
++ page = c->page;
++ if (!page)
++ goto new_slab;
++redo:
++
++ if (unlikely(!node_match(page, node))) {
++ int searchnode = node;
++
++ if (node != NUMA_NO_NODE && !node_present_pages(node))
++ searchnode = node_to_mem_node(node);
++
++ if (unlikely(!node_match(page, searchnode))) {
++ stat(s, ALLOC_NODE_MISMATCH);
++ deactivate_slab(s, page, c->freelist);
++ c->page = NULL;
++ c->freelist = NULL;
++ goto new_slab;
++ }
++ }
++
++ /*
++ * By rights, we should be searching for a slab page that was
++ * PFMEMALLOC but right now, we are losing the pfmemalloc
++ * information when the page leaves the per-cpu allocator
++ */
++ if (unlikely(!pfmemalloc_match(page, gfpflags))) {
++ deactivate_slab(s, page, c->freelist);
++ c->page = NULL;
++ c->freelist = NULL;
++ goto new_slab;
++ }
++
++ /* must check again c->freelist in case of cpu migration or IRQ */
++ freelist = c->freelist;
++ if (freelist)
++ goto load_freelist;
++
++ freelist = get_freelist(s, page);
++
++ if (!freelist) {
++ c->page = NULL;
++ stat(s, DEACTIVATE_BYPASS);
++ goto new_slab;
++ }
++
++ stat(s, ALLOC_REFILL);
++
++load_freelist:
++ /*
++ * freelist is pointing to the list of objects to be used.
++ * page is pointing to the page from which the objects are obtained.
++ * That page must be frozen for per cpu allocations to work.
++ */
++ VM_BUG_ON(!c->page->frozen);
++ c->freelist = get_freepointer(s, freelist);
++ c->tid = next_tid(c->tid);
++ local_irq_restore(flags);
++ return freelist;
++
++new_slab:
++
++ if (c->partial) {
++ page = c->page = c->partial;
++ c->partial = page->next;
++ stat(s, CPU_PARTIAL_ALLOC);
++ c->freelist = NULL;
++ goto redo;
++ }
++
++ freelist = new_slab_objects(s, gfpflags, node, &c);
++
++ if (unlikely(!freelist)) {
++ slab_out_of_memory(s, gfpflags, node);
++ local_irq_restore(flags);
++ return NULL;
++ }
++
++ page = c->page;
++ if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
++ goto load_freelist;
++
++ /* Only entered in the debug case */
++ if (kmem_cache_debug(s) &&
++ !alloc_debug_processing(s, page, freelist, addr))
++ goto new_slab; /* Slab failed checks. Next slab needed */
++
++ deactivate_slab(s, page, get_freepointer(s, freelist));
++ c->page = NULL;
++ c->freelist = NULL;
++ local_irq_restore(flags);
++ return freelist;
++}
++
++/*
++ * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
++ * have the fastpath folded into their functions. So no function call
++ * overhead for requests that can be satisfied on the fastpath.
++ *
++ * The fastpath works by first checking if the lockless freelist can be used.
++ * If not then __slab_alloc is called for slow processing.
++ *
++ * Otherwise we can simply pick the next object from the lockless free list.
++ */
++static __always_inline void *slab_alloc_node(struct kmem_cache *s,
++ gfp_t gfpflags, int node, unsigned long addr)
++{
++ void **object;
++ struct kmem_cache_cpu *c;
++ struct page *page;
++ unsigned long tid;
++
++ s = slab_pre_alloc_hook(s, gfpflags);
++ if (!s)
++ return NULL;
++redo:
++ /*
++ * Must read kmem_cache cpu data via this cpu ptr. Preemption is
++ * enabled. We may switch back and forth between cpus while
++ * reading from one cpu area. That does not matter as long
++ * as we end up on the original cpu again when doing the cmpxchg.
++ *
++ * We should guarantee that tid and kmem_cache are retrieved on
++ * the same cpu. It could be different if CONFIG_PREEMPT so we need
++ * to check if it is matched or not.
++ */
++ do {
++ tid = this_cpu_read(s->cpu_slab->tid);
++ c = raw_cpu_ptr(s->cpu_slab);
++ } while (IS_ENABLED(CONFIG_PREEMPT) &&
++ unlikely(tid != READ_ONCE(c->tid)));
++
++ /*
++ * Irqless object alloc/free algorithm used here depends on sequence
++ * of fetching cpu_slab's data. tid should be fetched before anything
++ * on c to guarantee that object and page associated with previous tid
++ * won't be used with current tid. If we fetch tid first, object and
++ * page could be one associated with next tid and our alloc/free
++ * request will be failed. In this case, we will retry. So, no problem.
++ */
++ barrier();
++
++ /*
++ * The transaction ids are globally unique per cpu and per operation on
++ * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
++ * occurs on the right processor and that there was no operation on the
++ * linked list in between.
++ */
++
++ object = c->freelist;
++ page = c->page;
++ if (unlikely(!object || !node_match(page, node))) {
++ object = __slab_alloc(s, gfpflags, node, addr, c);
++ stat(s, ALLOC_SLOWPATH);
++ } else {
++ void *next_object = get_freepointer_safe(s, object);
++
++ /*
++ * The cmpxchg will only match if there was no additional
++ * operation and if we are on the right processor.
++ *
++ * The cmpxchg does the following atomically (without lock
++ * semantics!)
++ * 1. Relocate first pointer to the current per cpu area.
++ * 2. Verify that tid and freelist have not been changed
++ * 3. If they were not changed replace tid and freelist
++ *
++ * Since this is without lock semantics the protection is only
++ * against code executing on this cpu *not* from access by
++ * other cpus.
++ */
++ if (unlikely(!this_cpu_cmpxchg_double(
++ s->cpu_slab->freelist, s->cpu_slab->tid,
++ object, tid,
++ next_object, next_tid(tid)))) {
++
++ note_cmpxchg_failure("slab_alloc", s, tid);
++ goto redo;
++ }
++ prefetch_freepointer(s, next_object);
++ stat(s, ALLOC_FASTPATH);
++ }
++
++ if (unlikely(gfpflags & __GFP_ZERO) && object)
++ memset(object, 0, s->object_size);
++
++ slab_post_alloc_hook(s, gfpflags, object);
++
++ return object;
++}
++
++static __always_inline void *slab_alloc(struct kmem_cache *s,
++ gfp_t gfpflags, unsigned long addr)
++{
++ return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
++}
++
++void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
++{
++ void *ret = slab_alloc(s, gfpflags, _RET_IP_);
++
++ trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
++ s->size, gfpflags);
++
++ return ret;
++}
++EXPORT_SYMBOL(kmem_cache_alloc);
++
++#ifdef CONFIG_TRACING
++void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
++{
++ void *ret = slab_alloc(s, gfpflags, _RET_IP_);
++ trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
++ kasan_kmalloc(s, ret, size);
++ return ret;
++}
++EXPORT_SYMBOL(kmem_cache_alloc_trace);
++#endif
++
++#ifdef CONFIG_NUMA
++void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
++{
++ void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
++
++ trace_kmem_cache_alloc_node(_RET_IP_, ret,
++ s->object_size, s->size, gfpflags, node);
++
++ return ret;
++}
++EXPORT_SYMBOL(kmem_cache_alloc_node);
++
++#ifdef CONFIG_TRACING
++void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
++ gfp_t gfpflags,
++ int node, size_t size)
++{
++ void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
++
++ trace_kmalloc_node(_RET_IP_, ret,
++ size, s->size, gfpflags, node);
++
++ kasan_kmalloc(s, ret, size);
++ return ret;
++}
++EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
++#endif
++#endif
++
++/*
++ * Slow path handling. This may still be called frequently since objects
++ * have a longer lifetime than the cpu slabs in most processing loads.
++ *
++ * So we still attempt to reduce cache line usage. Just take the slab
++ * lock and free the item. If there is no additional partial page
++ * handling required then we can return immediately.
++ */
++static void __slab_free(struct kmem_cache *s, struct page *page,
++ void *x, unsigned long addr)
++{
++ void *prior;
++ void **object = (void *)x;
++ int was_frozen;
++ struct page new;
++ unsigned long counters;
++ struct kmem_cache_node *n = NULL;
++ unsigned long uninitialized_var(flags);
++
++ stat(s, FREE_SLOWPATH);
++
++ if (kmem_cache_debug(s) &&
++ !(n = free_debug_processing(s, page, x, addr, &flags)))
++ return;
++
++ do {
++ if (unlikely(n)) {
++ spin_unlock_irqrestore(&n->list_lock, flags);
++ n = NULL;
++ }
++ prior = page->freelist;
++ counters = page->counters;
++ set_freepointer(s, object, prior);
++ new.counters = counters;
++ was_frozen = new.frozen;
++ new.inuse--;
++ if ((!new.inuse || !prior) && !was_frozen) {
++
++ if (kmem_cache_has_cpu_partial(s) && !prior) {
++
++ /*
++ * Slab was on no list before and will be
++ * partially empty
++ * We can defer the list move and instead
++ * freeze it.
++ */
++ new.frozen = 1;
++
++ } else { /* Needs to be taken off a list */
++
++ n = get_node(s, page_to_nid(page));
++ /*
++ * Speculatively acquire the list_lock.
++ * If the cmpxchg does not succeed then we may
++ * drop the list_lock without any processing.
++ *
++ * Otherwise the list_lock will synchronize with
++ * other processors updating the list of slabs.
++ */
++ spin_lock_irqsave(&n->list_lock, flags);
++
++ }
++ }
++
++ } while (!cmpxchg_double_slab(s, page,
++ prior, counters,
++ object, new.counters,
++ "__slab_free"));
++
++ if (likely(!n)) {
++
++ /*
++ * If we just froze the page then put it onto the
++ * per cpu partial list.
++ */
++ if (new.frozen && !was_frozen) {
++ put_cpu_partial(s, page, 1);
++ stat(s, CPU_PARTIAL_FREE);
++ }
++ /*
++ * The list lock was not taken therefore no list
++ * activity can be necessary.
++ */
++ if (was_frozen)
++ stat(s, FREE_FROZEN);
++ return;
++ }
++
++ if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
++ goto slab_empty;
++
++ /*
++ * Objects left in the slab. If it was not on the partial list before
++ * then add it.
++ */
++ if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
++ if (kmem_cache_debug(s))
++ remove_full(s, n, page);
++ add_partial(n, page, DEACTIVATE_TO_TAIL);
++ stat(s, FREE_ADD_PARTIAL);
++ }
++ spin_unlock_irqrestore(&n->list_lock, flags);
++ return;
++
++slab_empty:
++ if (prior) {
++ /*
++ * Slab on the partial list.
++ */
++ remove_partial(n, page);
++ stat(s, FREE_REMOVE_PARTIAL);
++ } else {
++ /* Slab must be on the full list */
++ remove_full(s, n, page);
++ }
++
++ spin_unlock_irqrestore(&n->list_lock, flags);
++ stat(s, FREE_SLAB);
++ discard_slab(s, page);
++}
++
++/*
++ * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
++ * can perform fastpath freeing without additional function calls.
++ *
++ * The fastpath is only possible if we are freeing to the current cpu slab
++ * of this processor. This typically the case if we have just allocated
++ * the item before.
++ *
++ * If fastpath is not possible then fall back to __slab_free where we deal
++ * with all sorts of special processing.
++ */
++static __always_inline void slab_free(struct kmem_cache *s,
++ struct page *page, void *x, unsigned long addr)
++{
++ void **object = (void *)x;
++ struct kmem_cache_cpu *c;
++ unsigned long tid;
++
++ slab_free_hook(s, x);
++
++redo:
++ /*
++ * Determine the currently cpus per cpu slab.
++ * The cpu may change afterward. However that does not matter since
++ * data is retrieved via this pointer. If we are on the same cpu
++ * during the cmpxchg then the free will succedd.
++ */
++ do {
++ tid = this_cpu_read(s->cpu_slab->tid);
++ c = raw_cpu_ptr(s->cpu_slab);
++ } while (IS_ENABLED(CONFIG_PREEMPT) &&
++ unlikely(tid != READ_ONCE(c->tid)));
++
++ /* Same with comment on barrier() in slab_alloc_node() */
++ barrier();
++
++ if (likely(page == c->page)) {
++ set_freepointer(s, object, c->freelist);
++
++ if (unlikely(!this_cpu_cmpxchg_double(
++ s->cpu_slab->freelist, s->cpu_slab->tid,
++ c->freelist, tid,
++ object, next_tid(tid)))) {
++
++ note_cmpxchg_failure("slab_free", s, tid);
++ goto redo;
++ }
++ stat(s, FREE_FASTPATH);
++ } else
++ __slab_free(s, page, x, addr);
++
++}
++
++void kmem_cache_free(struct kmem_cache *s, void *x)
++{
++ s = cache_from_obj(s, x);
++ if (!s)
++ return;
++ slab_free(s, virt_to_head_page(x), x, _RET_IP_);
++ trace_kmem_cache_free(_RET_IP_, x);
++}
++EXPORT_SYMBOL(kmem_cache_free);
++
++/*
++ * Object placement in a slab is made very easy because we always start at
++ * offset 0. If we tune the size of the object to the alignment then we can
++ * get the required alignment by putting one properly sized object after
++ * another.
++ *
++ * Notice that the allocation order determines the sizes of the per cpu
++ * caches. Each processor has always one slab available for allocations.
++ * Increasing the allocation order reduces the number of times that slabs
++ * must be moved on and off the partial lists and is therefore a factor in
++ * locking overhead.
++ */
++
++/*
++ * Mininum / Maximum order of slab pages. This influences locking overhead
++ * and slab fragmentation. A higher order reduces the number of partial slabs
++ * and increases the number of allocations possible without having to
++ * take the list_lock.
++ */
++static int slub_min_order;
++static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
++static int slub_min_objects;
++
++/*
++ * Calculate the order of allocation given an slab object size.
++ *
++ * The order of allocation has significant impact on performance and other
++ * system components. Generally order 0 allocations should be preferred since
++ * order 0 does not cause fragmentation in the page allocator. Larger objects
++ * be problematic to put into order 0 slabs because there may be too much
++ * unused space left. We go to a higher order if more than 1/16th of the slab
++ * would be wasted.
++ *
++ * In order to reach satisfactory performance we must ensure that a minimum
++ * number of objects is in one slab. Otherwise we may generate too much
++ * activity on the partial lists which requires taking the list_lock. This is
++ * less a concern for large slabs though which are rarely used.
++ *
++ * slub_max_order specifies the order where we begin to stop considering the
++ * number of objects in a slab as critical. If we reach slub_max_order then
++ * we try to keep the page order as low as possible. So we accept more waste
++ * of space in favor of a small page order.
++ *
++ * Higher order allocations also allow the placement of more objects in a
++ * slab and thereby reduce object handling overhead. If the user has
++ * requested a higher mininum order then we start with that one instead of
++ * the smallest order which will fit the object.
++ */
++static inline int slab_order(int size, int min_objects,
++ int max_order, int fract_leftover, int reserved)
++{
++ int order;
++ int rem;
++ int min_order = slub_min_order;
++
++ if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
++ return get_order(size * MAX_OBJS_PER_PAGE) - 1;
++
++ for (order = max(min_order,
++ fls(min_objects * size - 1) - PAGE_SHIFT);
++ order <= max_order; order++) {
++
++ unsigned long slab_size = PAGE_SIZE << order;
++
++ if (slab_size < min_objects * size + reserved)
++ continue;
++
++ rem = (slab_size - reserved) % size;
++
++ if (rem <= slab_size / fract_leftover)
++ break;
++
++ }
++
++ return order;
++}
++
++static inline int calculate_order(int size, int reserved)
++{
++ int order;
++ int min_objects;
++ int fraction;
++ int max_objects;
++
++ /*
++ * Attempt to find best configuration for a slab. This
++ * works by first attempting to generate a layout with
++ * the best configuration and backing off gradually.
++ *
++ * First we reduce the acceptable waste in a slab. Then
++ * we reduce the minimum objects required in a slab.
++ */
++ min_objects = slub_min_objects;
++ if (!min_objects)
++ min_objects = 4 * (fls(nr_cpu_ids) + 1);
++ max_objects = order_objects(slub_max_order, size, reserved);
++ min_objects = min(min_objects, max_objects);
++
++ while (min_objects > 1) {
++ fraction = 16;
++ while (fraction >= 4) {
++ order = slab_order(size, min_objects,
++ slub_max_order, fraction, reserved);
++ if (order <= slub_max_order)
++ return order;
++ fraction /= 2;
++ }
++ min_objects--;
++ }
++
++ /*
++ * We were unable to place multiple objects in a slab. Now
++ * lets see if we can place a single object there.
++ */
++ order = slab_order(size, 1, slub_max_order, 1, reserved);
++ if (order <= slub_max_order)
++ return order;
++
++ /*
++ * Doh this slab cannot be placed using slub_max_order.
++ */
++ order = slab_order(size, 1, MAX_ORDER, 1, reserved);
++ if (order < MAX_ORDER)
++ return order;
++ return -ENOSYS;
++}
++
++static void
++init_kmem_cache_node(struct kmem_cache_node *n)
++{
++ n->nr_partial = 0;
++ spin_lock_init(&n->list_lock);
++ INIT_LIST_HEAD(&n->partial);
++#ifdef CONFIG_SLUB_DEBUG
++ atomic_long_set(&n->nr_slabs, 0);
++ atomic_long_set(&n->total_objects, 0);
++ INIT_LIST_HEAD(&n->full);
++#endif
++}
++
++static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
++{
++ BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
++ KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
++
++ /*
++ * Must align to double word boundary for the double cmpxchg
++ * instructions to work; see __pcpu_double_call_return_bool().
++ */
++ s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
++ 2 * sizeof(void *));
++
++ if (!s->cpu_slab)
++ return 0;
++
++ init_kmem_cache_cpus(s);
++
++ return 1;
++}
++
++static struct kmem_cache *kmem_cache_node;
++
++/*
++ * No kmalloc_node yet so do it by hand. We know that this is the first
++ * slab on the node for this slabcache. There are no concurrent accesses
++ * possible.
++ *
++ * Note that this function only works on the kmem_cache_node
++ * when allocating for the kmem_cache_node. This is used for bootstrapping
++ * memory on a fresh node that has no slab structures yet.
++ */
++static void early_kmem_cache_node_alloc(int node)
++{
++ struct page *page;
++ struct kmem_cache_node *n;
++
++ BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
++
++ page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
++
++ BUG_ON(!page);
++ if (page_to_nid(page) != node) {
++ pr_err("SLUB: Unable to allocate memory from node %d\n", node);
++ pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
++ }
++
++ n = page->freelist;
++ BUG_ON(!n);
++ page->freelist = get_freepointer(kmem_cache_node, n);
++ page->inuse = 1;
++ page->frozen = 0;
++ kmem_cache_node->node[node] = n;
++#ifdef CONFIG_SLUB_DEBUG
++ init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
++ init_tracking(kmem_cache_node, n);
++#endif
++ kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node));
++ init_kmem_cache_node(n);
++ inc_slabs_node(kmem_cache_node, node, page->objects);
++
++ /*
++ * No locks need to be taken here as it has just been
++ * initialized and there is no concurrent access.
++ */
++ __add_partial(n, page, DEACTIVATE_TO_HEAD);
++}
++
++static void free_kmem_cache_nodes(struct kmem_cache *s)
++{
++ int node;
++ struct kmem_cache_node *n;
++
++ for_each_kmem_cache_node(s, node, n) {
++ kmem_cache_free(kmem_cache_node, n);
++ s->node[node] = NULL;
++ }
++}
++
++static int init_kmem_cache_nodes(struct kmem_cache *s)
++{
++ int node;
++
++ for_each_node_state(node, N_NORMAL_MEMORY) {
++ struct kmem_cache_node *n;
++
++ if (slab_state == DOWN) {
++ early_kmem_cache_node_alloc(node);
++ continue;
++ }
++ n = kmem_cache_alloc_node(kmem_cache_node,
++ GFP_KERNEL, node);
++
++ if (!n) {
++ free_kmem_cache_nodes(s);
++ return 0;
++ }
++
++ s->node[node] = n;
++ init_kmem_cache_node(n);
++ }
++ return 1;
++}
++
++static void set_min_partial(struct kmem_cache *s, unsigned long min)
++{
++ if (min < MIN_PARTIAL)
++ min = MIN_PARTIAL;
++ else if (min > MAX_PARTIAL)
++ min = MAX_PARTIAL;
++ s->min_partial = min;
++}
++
++/*
++ * calculate_sizes() determines the order and the distribution of data within
++ * a slab object.
++ */
++static int calculate_sizes(struct kmem_cache *s, int forced_order)
++{
++ unsigned long flags = s->flags;
++ unsigned long size = s->object_size;
++ int order;
++
++ /*
++ * Round up object size to the next word boundary. We can only
++ * place the free pointer at word boundaries and this determines
++ * the possible location of the free pointer.
++ */
++ size = ALIGN(size, sizeof(void *));
++
++#ifdef CONFIG_SLUB_DEBUG
++ /*
++ * Determine if we can poison the object itself. If the user of
++ * the slab may touch the object after free or before allocation
++ * then we should never poison the object itself.
++ */
++ if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
++ !s->ctor)
++ s->flags |= __OBJECT_POISON;
++ else
++ s->flags &= ~__OBJECT_POISON;
++
++
++ /*
++ * If we are Redzoning then check if there is some space between the
++ * end of the object and the free pointer. If not then add an
++ * additional word to have some bytes to store Redzone information.
++ */
++ if ((flags & SLAB_RED_ZONE) && size == s->object_size)
++ size += sizeof(void *);
++#endif
++
++ /*
++ * With that we have determined the number of bytes in actual use
++ * by the object. This is the potential offset to the free pointer.
++ */
++ s->inuse = size;
++
++ if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
++ s->ctor)) {
++ /*
++ * Relocate free pointer after the object if it is not
++ * permitted to overwrite the first word of the object on
++ * kmem_cache_free.
++ *
++ * This is the case if we do RCU, have a constructor or
++ * destructor or are poisoning the objects.
++ */
++ s->offset = size;
++ size += sizeof(void *);
++ }
++
++#ifdef CONFIG_SLUB_DEBUG
++ if (flags & SLAB_STORE_USER)
++ /*
++ * Need to store information about allocs and frees after
++ * the object.
++ */
++ size += 2 * sizeof(struct track);
++
++ if (flags & SLAB_RED_ZONE)
++ /*
++ * Add some empty padding so that we can catch
++ * overwrites from earlier objects rather than let
++ * tracking information or the free pointer be
++ * corrupted if a user writes before the start
++ * of the object.
++ */
++ size += sizeof(void *);
++#endif
++
++ /*
++ * SLUB stores one object immediately after another beginning from
++ * offset 0. In order to align the objects we have to simply size
++ * each object to conform to the alignment.
++ */
++ size = ALIGN(size, s->align);
++ s->size = size;
++ if (forced_order >= 0)
++ order = forced_order;
++ else
++ order = calculate_order(size, s->reserved);
++
++ if (order < 0)
++ return 0;
++
++ s->allocflags = 0;
++ if (order)
++ s->allocflags |= __GFP_COMP;
++
++ if (s->flags & SLAB_CACHE_DMA)
++ s->allocflags |= GFP_DMA;
++
++ if (s->flags & SLAB_RECLAIM_ACCOUNT)
++ s->allocflags |= __GFP_RECLAIMABLE;
++
++ /*
++ * Determine the number of objects per slab
++ */
++ s->oo = oo_make(order, size, s->reserved);
++ s->min = oo_make(get_order(size), size, s->reserved);
++ if (oo_objects(s->oo) > oo_objects(s->max))
++ s->max = s->oo;
++
++ return !!oo_objects(s->oo);
++}
++
++static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
++{
++ s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
++ s->reserved = 0;
++
++ if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
++ s->reserved = sizeof(struct rcu_head);
++
++ if (!calculate_sizes(s, -1))
++ goto error;
++ if (disable_higher_order_debug) {
++ /*
++ * Disable debugging flags that store metadata if the min slab
++ * order increased.
++ */
++ if (get_order(s->size) > get_order(s->object_size)) {
++ s->flags &= ~DEBUG_METADATA_FLAGS;
++ s->offset = 0;
++ if (!calculate_sizes(s, -1))
++ goto error;
++ }
++ }
++
++#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
++ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
++ if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
++ /* Enable fast mode */
++ s->flags |= __CMPXCHG_DOUBLE;
++#endif
++
++ /*
++ * The larger the object size is, the more pages we want on the partial
++ * list to avoid pounding the page allocator excessively.
++ */
++ set_min_partial(s, ilog2(s->size) / 2);
++
++ /*
++ * cpu_partial determined the maximum number of objects kept in the
++ * per cpu partial lists of a processor.
++ *
++ * Per cpu partial lists mainly contain slabs that just have one
++ * object freed. If they are used for allocation then they can be
++ * filled up again with minimal effort. The slab will never hit the
++ * per node partial lists and therefore no locking will be required.
++ *
++ * This setting also determines
++ *
++ * A) The number of objects from per cpu partial slabs dumped to the
++ * per node list when we reach the limit.
++ * B) The number of objects in cpu partial slabs to extract from the
++ * per node list when we run out of per cpu objects. We only fetch
++ * 50% to keep some capacity around for frees.
++ */
++ if (!kmem_cache_has_cpu_partial(s))
++ s->cpu_partial = 0;
++ else if (s->size >= PAGE_SIZE)
++ s->cpu_partial = 2;
++ else if (s->size >= 1024)
++ s->cpu_partial = 6;
++ else if (s->size >= 256)
++ s->cpu_partial = 13;
++ else
++ s->cpu_partial = 30;
++
++#ifdef CONFIG_NUMA
++ s->remote_node_defrag_ratio = 1000;
++#endif
++ if (!init_kmem_cache_nodes(s))
++ goto error;
++
++ if (alloc_kmem_cache_cpus(s))
++ return 0;
++
++ free_kmem_cache_nodes(s);
++error:
++ if (flags & SLAB_PANIC)
++ panic("Cannot create slab %s size=%lu realsize=%u "
++ "order=%u offset=%u flags=%lx\n",
++ s->name, (unsigned long)s->size, s->size,
++ oo_order(s->oo), s->offset, flags);
++ return -EINVAL;
++}
++
++static void list_slab_objects(struct kmem_cache *s, struct page *page,
++ const char *text)
++{
++#ifdef CONFIG_SLUB_DEBUG
++ void *addr = page_address(page);
++ void *p;
++ unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
++ sizeof(long), GFP_ATOMIC);
++ if (!map)
++ return;
++ slab_err(s, page, text, s->name);
++ slab_lock(page);
++
++ get_map(s, page, map);
++ for_each_object(p, s, addr, page->objects) {
++
++ if (!test_bit(slab_index(p, s, addr), map)) {
++ pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
++ print_tracking(s, p);
++ }
++ }
++ slab_unlock(page);
++ kfree(map);
++#endif
++}
++
++/*
++ * Attempt to free all partial slabs on a node.
++ * This is called from kmem_cache_close(). We must be the last thread
++ * using the cache and therefore we do not need to lock anymore.
++ */
++static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
++{
++ struct page *page, *h;
++
++ list_for_each_entry_safe(page, h, &n->partial, lru) {
++ if (!page->inuse) {
++ __remove_partial(n, page);
++ discard_slab(s, page);
++ } else {
++ list_slab_objects(s, page,
++ "Objects remaining in %s on kmem_cache_close()");
++ }
++ }
++}
++
++/*
++ * Release all resources used by a slab cache.
++ */
++static inline int kmem_cache_close(struct kmem_cache *s)
++{
++ int node;
++ struct kmem_cache_node *n;
++
++ flush_all(s);
++ /* Attempt to free all objects */
++ for_each_kmem_cache_node(s, node, n) {
++ free_partial(s, n);
++ if (n->nr_partial || slabs_node(s, node))
++ return 1;
++ }
++ free_percpu(s->cpu_slab);
++ free_kmem_cache_nodes(s);
++ return 0;
++}
++
++int __kmem_cache_shutdown(struct kmem_cache *s)
++{
++ return kmem_cache_close(s);
++}
++
++/********************************************************************
++ * Kmalloc subsystem
++ *******************************************************************/
++
++static int __init setup_slub_min_order(char *str)
++{
++ get_option(&str, &slub_min_order);
++
++ return 1;
++}
++
++__setup("slub_min_order=", setup_slub_min_order);
++
++static int __init setup_slub_max_order(char *str)
++{
++ get_option(&str, &slub_max_order);
++ slub_max_order = min(slub_max_order, MAX_ORDER - 1);
++
++ return 1;
++}
++
++__setup("slub_max_order=", setup_slub_max_order);
++
++static int __init setup_slub_min_objects(char *str)
++{
++ get_option(&str, &slub_min_objects);
++
++ return 1;
++}
++
++__setup("slub_min_objects=", setup_slub_min_objects);
++
++void *__kmalloc(size_t size, gfp_t flags)
++{
++ struct kmem_cache *s;
++ void *ret;
++
++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
++ return kmalloc_large(size, flags);
++
++ s = kmalloc_slab(size, flags);
++
++ if (unlikely(ZERO_OR_NULL_PTR(s)))
++ return s;
++
++ ret = slab_alloc(s, flags, _RET_IP_);
++
++ trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
++
++ kasan_kmalloc(s, ret, size);
++
++ return ret;
++}
++EXPORT_SYMBOL(__kmalloc);
++
++#ifdef CONFIG_NUMA
++static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
++{
++ struct page *page;
++ void *ptr = NULL;
++
++ flags |= __GFP_COMP | __GFP_NOTRACK;
++ page = alloc_kmem_pages_node(node, flags, get_order(size));
++ if (page)
++ ptr = page_address(page);
++
++ kmalloc_large_node_hook(ptr, size, flags);
++ return ptr;
++}
++
++void *__kmalloc_node(size_t size, gfp_t flags, int node)
++{
++ struct kmem_cache *s;
++ void *ret;
++
++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
++ ret = kmalloc_large_node(size, flags, node);
++
++ trace_kmalloc_node(_RET_IP_, ret,
++ size, PAGE_SIZE << get_order(size),
++ flags, node);
++
++ return ret;
++ }
++
++ s = kmalloc_slab(size, flags);
++
++ if (unlikely(ZERO_OR_NULL_PTR(s)))
++ return s;
++
++ ret = slab_alloc_node(s, flags, node, _RET_IP_);
++
++ trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
++
++ kasan_kmalloc(s, ret, size);
++
++ return ret;
++}
++EXPORT_SYMBOL(__kmalloc_node);
++#endif
++
++static size_t __ksize(const void *object)
++{
++ struct page *page;
++
++ if (unlikely(object == ZERO_SIZE_PTR))
++ return 0;
++
++ page = virt_to_head_page(object);
++
++ if (unlikely(!PageSlab(page))) {
++ WARN_ON(!PageCompound(page));
++ return PAGE_SIZE << compound_order(page);
++ }
++
++ return slab_ksize(page->slab_cache);
++}
++
++size_t ksize(const void *object)
++{
++ size_t size = __ksize(object);
++ /* We assume that ksize callers could use whole allocated area,
++ so we need unpoison this area. */
++ kasan_krealloc(object, size);
++ return size;
++}
++EXPORT_SYMBOL(ksize);
++
++void kfree(const void *x)
++{
++ struct page *page;
++ void *object = (void *)x;
++
++ trace_kfree(_RET_IP_, x);
++
++ if (unlikely(ZERO_OR_NULL_PTR(x)))
++ return;
++
++ page = virt_to_head_page(x);
++ if (unlikely(!PageSlab(page))) {
++ BUG_ON(!PageCompound(page));
++ kfree_hook(x);
++ __free_kmem_pages(page, compound_order(page));
++ return;
++ }
++ slab_free(page->slab_cache, page, object, _RET_IP_);
++}
++EXPORT_SYMBOL(kfree);
++
++#define SHRINK_PROMOTE_MAX 32
++
++/*
++ * kmem_cache_shrink discards empty slabs and promotes the slabs filled
++ * up most to the head of the partial lists. New allocations will then
++ * fill those up and thus they can be removed from the partial lists.
++ *
++ * The slabs with the least items are placed last. This results in them
++ * being allocated from last increasing the chance that the last objects
++ * are freed in them.
++ */
++int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
++{
++ int node;
++ int i;
++ struct kmem_cache_node *n;
++ struct page *page;
++ struct page *t;
++ struct list_head discard;
++ struct list_head promote[SHRINK_PROMOTE_MAX];
++ unsigned long flags;
++ int ret = 0;
++
++ if (deactivate) {
++ /*
++ * Disable empty slabs caching. Used to avoid pinning offline
++ * memory cgroups by kmem pages that can be freed.
++ */
++ s->cpu_partial = 0;
++ s->min_partial = 0;
++
++ /*
++ * s->cpu_partial is checked locklessly (see put_cpu_partial),
++ * so we have to make sure the change is visible.
++ */
++ kick_all_cpus_sync();
++ }
++
++ flush_all(s);
++ for_each_kmem_cache_node(s, node, n) {
++ INIT_LIST_HEAD(&discard);
++ for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
++ INIT_LIST_HEAD(promote + i);
++
++ spin_lock_irqsave(&n->list_lock, flags);
++
++ /*
++ * Build lists of slabs to discard or promote.
++ *
++ * Note that concurrent frees may occur while we hold the
++ * list_lock. page->inuse here is the upper limit.
++ */
++ list_for_each_entry_safe(page, t, &n->partial, lru) {
++ int free = page->objects - page->inuse;
++
++ /* Do not reread page->inuse */
++ barrier();
++
++ /* We do not keep full slabs on the list */
++ BUG_ON(free <= 0);
++
++ if (free == page->objects) {
++ list_move(&page->lru, &discard);
++ n->nr_partial--;
++ } else if (free <= SHRINK_PROMOTE_MAX)
++ list_move(&page->lru, promote + free - 1);
++ }
++
++ /*
++ * Promote the slabs filled up most to the head of the
++ * partial list.
++ */
++ for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
++ list_splice(promote + i, &n->partial);
++
++ spin_unlock_irqrestore(&n->list_lock, flags);
++
++ /* Release empty slabs */
++ list_for_each_entry_safe(page, t, &discard, lru)
++ discard_slab(s, page);
++
++ if (slabs_node(s, node))
++ ret = 1;
++ }
++
++ return ret;
++}
++
++static int slab_mem_going_offline_callback(void *arg)
++{
++ struct kmem_cache *s;
++
++ mutex_lock(&slab_mutex);
++ list_for_each_entry(s, &slab_caches, list)
++ __kmem_cache_shrink(s, false);
++ mutex_unlock(&slab_mutex);
++
++ return 0;
++}
++
++static void slab_mem_offline_callback(void *arg)
++{
++ struct kmem_cache_node *n;
++ struct kmem_cache *s;
++ struct memory_notify *marg = arg;
++ int offline_node;
++
++ offline_node = marg->status_change_nid_normal;
++
++ /*
++ * If the node still has available memory. we need kmem_cache_node
++ * for it yet.
++ */
++ if (offline_node < 0)
++ return;
++
++ mutex_lock(&slab_mutex);
++ list_for_each_entry(s, &slab_caches, list) {
++ n = get_node(s, offline_node);
++ if (n) {
++ /*
++ * if n->nr_slabs > 0, slabs still exist on the node
++ * that is going down. We were unable to free them,
++ * and offline_pages() function shouldn't call this
++ * callback. So, we must fail.
++ */
++ BUG_ON(slabs_node(s, offline_node));
++
++ s->node[offline_node] = NULL;
++ kmem_cache_free(kmem_cache_node, n);
++ }
++ }
++ mutex_unlock(&slab_mutex);
++}
++
++static int slab_mem_going_online_callback(void *arg)
++{
++ struct kmem_cache_node *n;
++ struct kmem_cache *s;
++ struct memory_notify *marg = arg;
++ int nid = marg->status_change_nid_normal;
++ int ret = 0;
++
++ /*
++ * If the node's memory is already available, then kmem_cache_node is
++ * already created. Nothing to do.
++ */
++ if (nid < 0)
++ return 0;
++
++ /*
++ * We are bringing a node online. No memory is available yet. We must
++ * allocate a kmem_cache_node structure in order to bring the node
++ * online.
++ */
++ mutex_lock(&slab_mutex);
++ list_for_each_entry(s, &slab_caches, list) {
++ /*
++ * XXX: kmem_cache_alloc_node will fallback to other nodes
++ * since memory is not yet available from the node that
++ * is brought up.
++ */
++ n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
++ if (!n) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ init_kmem_cache_node(n);
++ s->node[nid] = n;
++ }
++out:
++ mutex_unlock(&slab_mutex);
++ return ret;
++}
++
++static int slab_memory_callback(struct notifier_block *self,
++ unsigned long action, void *arg)
++{
++ int ret = 0;
++
++ switch (action) {
++ case MEM_GOING_ONLINE:
++ ret = slab_mem_going_online_callback(arg);
++ break;
++ case MEM_GOING_OFFLINE:
++ ret = slab_mem_going_offline_callback(arg);
++ break;
++ case MEM_OFFLINE:
++ case MEM_CANCEL_ONLINE:
++ slab_mem_offline_callback(arg);
++ break;
++ case MEM_ONLINE:
++ case MEM_CANCEL_OFFLINE:
++ break;
++ }
++ if (ret)
++ ret = notifier_from_errno(ret);
++ else
++ ret = NOTIFY_OK;
++ return ret;
++}
++
++static struct notifier_block slab_memory_callback_nb = {
++ .notifier_call = slab_memory_callback,
++ .priority = SLAB_CALLBACK_PRI,
++};
++
++/********************************************************************
++ * Basic setup of slabs
++ *******************************************************************/
++
++/*
++ * Used for early kmem_cache structures that were allocated using
++ * the page allocator. Allocate them properly then fix up the pointers
++ * that may be pointing to the wrong kmem_cache structure.
++ */
++
++static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
++{
++ int node;
++ struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
++ struct kmem_cache_node *n;
++
++ memcpy(s, static_cache, kmem_cache->object_size);
++
++ /*
++ * This runs very early, and only the boot processor is supposed to be
++ * up. Even if it weren't true, IRQs are not up so we couldn't fire
++ * IPIs around.
++ */
++ __flush_cpu_slab(s, smp_processor_id());
++ for_each_kmem_cache_node(s, node, n) {
++ struct page *p;
++
++ list_for_each_entry(p, &n->partial, lru)
++ p->slab_cache = s;
++
++#ifdef CONFIG_SLUB_DEBUG
++ list_for_each_entry(p, &n->full, lru)
++ p->slab_cache = s;
++#endif
++ }
++ slab_init_memcg_params(s);
++ list_add(&s->list, &slab_caches);
++ return s;
++}
++
++void __init kmem_cache_init(void)
++{
++ static __initdata struct kmem_cache boot_kmem_cache,
++ boot_kmem_cache_node;
++
++ if (debug_guardpage_minorder())
++ slub_max_order = 0;
++
++ kmem_cache_node = &boot_kmem_cache_node;
++ kmem_cache = &boot_kmem_cache;
++
++ create_boot_cache(kmem_cache_node, "kmem_cache_node",
++ sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
++
++ register_hotmemory_notifier(&slab_memory_callback_nb);
++
++ /* Able to allocate the per node structures */
++ slab_state = PARTIAL;
++
++ create_boot_cache(kmem_cache, "kmem_cache",
++ offsetof(struct kmem_cache, node) +
++ nr_node_ids * sizeof(struct kmem_cache_node *),
++ SLAB_HWCACHE_ALIGN);
++
++ kmem_cache = bootstrap(&boot_kmem_cache);
++
++ /*
++ * Allocate kmem_cache_node properly from the kmem_cache slab.
++ * kmem_cache_node is separately allocated so no need to
++ * update any list pointers.
++ */
++ kmem_cache_node = bootstrap(&boot_kmem_cache_node);
++
++ /* Now we can use the kmem_cache to allocate kmalloc slabs */
++ create_kmalloc_caches(0);
++
++#ifdef CONFIG_SMP
++ register_cpu_notifier(&slab_notifier);
++#endif
++
++ pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n",
++ cache_line_size(),
++ slub_min_order, slub_max_order, slub_min_objects,
++ nr_cpu_ids, nr_node_ids);
++}
++
++void __init kmem_cache_init_late(void)
++{
++}
++
++struct kmem_cache *
++__kmem_cache_alias(const char *name, size_t size, size_t align,
++ unsigned long flags, void (*ctor)(void *))
++{
++ struct kmem_cache *s, *c;
++
++ s = find_mergeable(size, align, flags, name, ctor);
++ if (s) {
++ s->refcount++;
++
++ /*
++ * Adjust the object sizes so that we clear
++ * the complete object on kzalloc.
++ */
++ s->object_size = max(s->object_size, (int)size);
++ s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
++
++ for_each_memcg_cache(c, s) {
++ c->object_size = s->object_size;
++ c->inuse = max_t(int, c->inuse,
++ ALIGN(size, sizeof(void *)));
++ }
++
++ if (sysfs_slab_alias(s, name)) {
++ s->refcount--;
++ s = NULL;
++ }
++ }
++
++ return s;
++}
++
++int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
++{
++ int err;
++
++ err = kmem_cache_open(s, flags);
++ if (err)
++ return err;
++
++ /* Mutex is not taken during early boot */
++ if (slab_state <= UP)
++ return 0;
++
++ memcg_propagate_slab_attrs(s);
++ err = sysfs_slab_add(s);
++ if (err)
++ kmem_cache_close(s);
++
++ return err;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * Use the cpu notifier to insure that the cpu slabs are flushed when
++ * necessary.
++ */
++static int slab_cpuup_callback(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ long cpu = (long)hcpu;
++ struct kmem_cache *s;
++ unsigned long flags;
++
++ switch (action) {
++ case CPU_UP_CANCELED:
++ case CPU_UP_CANCELED_FROZEN:
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ mutex_lock(&slab_mutex);
++ list_for_each_entry(s, &slab_caches, list) {
++ local_irq_save(flags);
++ __flush_cpu_slab(s, cpu);
++ local_irq_restore(flags);
++ }
++ mutex_unlock(&slab_mutex);
++ break;
++ default:
++ break;
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block slab_notifier = {
++ .notifier_call = slab_cpuup_callback
++};
++
++#endif
++
++void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
++{
++ struct kmem_cache *s;
++ void *ret;
++
++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
++ return kmalloc_large(size, gfpflags);
++
++ s = kmalloc_slab(size, gfpflags);
++
++ if (unlikely(ZERO_OR_NULL_PTR(s)))
++ return s;
++
++ ret = slab_alloc(s, gfpflags, caller);
++
++ /* Honor the call site pointer we received. */
++ trace_kmalloc(caller, ret, size, s->size, gfpflags);
++
++ return ret;
++}
++
++#ifdef CONFIG_NUMA
++void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
++ int node, unsigned long caller)
++{
++ struct kmem_cache *s;
++ void *ret;
++
++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
++ ret = kmalloc_large_node(size, gfpflags, node);
++
++ trace_kmalloc_node(caller, ret,
++ size, PAGE_SIZE << get_order(size),
++ gfpflags, node);
++
++ return ret;
++ }
++
++ s = kmalloc_slab(size, gfpflags);
++
++ if (unlikely(ZERO_OR_NULL_PTR(s)))
++ return s;
++
++ ret = slab_alloc_node(s, gfpflags, node, caller);
++
++ /* Honor the call site pointer we received. */
++ trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
++
++ return ret;
++}
++#endif
++
++#ifdef CONFIG_SYSFS
++static int count_inuse(struct page *page)
++{
++ return page->inuse;
++}
++
++static int count_total(struct page *page)
++{
++ return page->objects;
++}
++#endif
++
++#ifdef CONFIG_SLUB_DEBUG
++static int validate_slab(struct kmem_cache *s, struct page *page,
++ unsigned long *map)
++{
++ void *p;
++ void *addr = page_address(page);
++
++ if (!check_slab(s, page) ||
++ !on_freelist(s, page, NULL))
++ return 0;
++
++ /* Now we know that a valid freelist exists */
++ bitmap_zero(map, page->objects);
++
++ get_map(s, page, map);
++ for_each_object(p, s, addr, page->objects) {
++ if (test_bit(slab_index(p, s, addr), map))
++ if (!check_object(s, page, p, SLUB_RED_INACTIVE))
++ return 0;
++ }
++
++ for_each_object(p, s, addr, page->objects)
++ if (!test_bit(slab_index(p, s, addr), map))
++ if (!check_object(s, page, p, SLUB_RED_ACTIVE))
++ return 0;
++ return 1;
++}
++
++static void validate_slab_slab(struct kmem_cache *s, struct page *page,
++ unsigned long *map)
++{
++ slab_lock(page);
++ validate_slab(s, page, map);
++ slab_unlock(page);
++}
++
++static int validate_slab_node(struct kmem_cache *s,
++ struct kmem_cache_node *n, unsigned long *map)
++{
++ unsigned long count = 0;
++ struct page *page;
++ unsigned long flags;
++
++ spin_lock_irqsave(&n->list_lock, flags);
++
++ list_for_each_entry(page, &n->partial, lru) {
++ validate_slab_slab(s, page, map);
++ count++;
++ }
++ if (count != n->nr_partial)
++ pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
++ s->name, count, n->nr_partial);
++
++ if (!(s->flags & SLAB_STORE_USER))
++ goto out;
++
++ list_for_each_entry(page, &n->full, lru) {
++ validate_slab_slab(s, page, map);
++ count++;
++ }
++ if (count != atomic_long_read(&n->nr_slabs))
++ pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
++ s->name, count, atomic_long_read(&n->nr_slabs));
++
++out:
++ spin_unlock_irqrestore(&n->list_lock, flags);
++ return count;
++}
++
++static long validate_slab_cache(struct kmem_cache *s)
++{
++ int node;
++ unsigned long count = 0;
++ unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
++ sizeof(unsigned long), GFP_KERNEL);
++ struct kmem_cache_node *n;
++
++ if (!map)
++ return -ENOMEM;
++
++ flush_all(s);
++ for_each_kmem_cache_node(s, node, n)
++ count += validate_slab_node(s, n, map);
++ kfree(map);
++ return count;
++}
++/*
++ * Generate lists of code addresses where slabcache objects are allocated
++ * and freed.
++ */
++
++struct location {
++ unsigned long count;
++ unsigned long addr;
++ long long sum_time;
++ long min_time;
++ long max_time;
++ long min_pid;
++ long max_pid;
++ DECLARE_BITMAP(cpus, NR_CPUS);
++ nodemask_t nodes;
++};
++
++struct loc_track {
++ unsigned long max;
++ unsigned long count;
++ struct location *loc;
++};
++
++static void free_loc_track(struct loc_track *t)
++{
++ if (t->max)
++ free_pages((unsigned long)t->loc,
++ get_order(sizeof(struct location) * t->max));
++}
++
++static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
++{
++ struct location *l;
++ int order;
++
++ order = get_order(sizeof(struct location) * max);
++
++ l = (void *)__get_free_pages(flags, order);
++ if (!l)
++ return 0;
++
++ if (t->count) {
++ memcpy(l, t->loc, sizeof(struct location) * t->count);
++ free_loc_track(t);
++ }
++ t->max = max;
++ t->loc = l;
++ return 1;
++}
++
++static int add_location(struct loc_track *t, struct kmem_cache *s,
++ const struct track *track)
++{
++ long start, end, pos;
++ struct location *l;
++ unsigned long caddr;
++ unsigned long age = jiffies - track->when;
++
++ start = -1;
++ end = t->count;
++
++ for ( ; ; ) {
++ pos = start + (end - start + 1) / 2;
++
++ /*
++ * There is nothing at "end". If we end up there
++ * we need to add something to before end.
++ */
++ if (pos == end)
++ break;
++
++ caddr = t->loc[pos].addr;
++ if (track->addr == caddr) {
++
++ l = &t->loc[pos];
++ l->count++;
++ if (track->when) {
++ l->sum_time += age;
++ if (age < l->min_time)
++ l->min_time = age;
++ if (age > l->max_time)
++ l->max_time = age;
++
++ if (track->pid < l->min_pid)
++ l->min_pid = track->pid;
++ if (track->pid > l->max_pid)
++ l->max_pid = track->pid;
++
++ cpumask_set_cpu(track->cpu,
++ to_cpumask(l->cpus));
++ }
++ node_set(page_to_nid(virt_to_page(track)), l->nodes);
++ return 1;
++ }
++
++ if (track->addr < caddr)
++ end = pos;
++ else
++ start = pos;
++ }
++
++ /*
++ * Not found. Insert new tracking element.
++ */
++ if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
++ return 0;
++
++ l = t->loc + pos;
++ if (pos < t->count)
++ memmove(l + 1, l,
++ (t->count - pos) * sizeof(struct location));
++ t->count++;
++ l->count = 1;
++ l->addr = track->addr;
++ l->sum_time = age;
++ l->min_time = age;
++ l->max_time = age;
++ l->min_pid = track->pid;
++ l->max_pid = track->pid;
++ cpumask_clear(to_cpumask(l->cpus));
++ cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
++ nodes_clear(l->nodes);
++ node_set(page_to_nid(virt_to_page(track)), l->nodes);
++ return 1;
++}
++
++static void process_slab(struct loc_track *t, struct kmem_cache *s,
++ struct page *page, enum track_item alloc,
++ unsigned long *map)
++{
++ void *addr = page_address(page);
++ void *p;
++
++ bitmap_zero(map, page->objects);
++ get_map(s, page, map);
++
++ for_each_object(p, s, addr, page->objects)
++ if (!test_bit(slab_index(p, s, addr), map))
++ add_location(t, s, get_track(s, p, alloc));
++}
++
++static int list_locations(struct kmem_cache *s, char *buf,
++ enum track_item alloc)
++{
++ int len = 0;
++ unsigned long i;
++ struct loc_track t = { 0, 0, NULL };
++ int node;
++ unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
++ sizeof(unsigned long), GFP_KERNEL);
++ struct kmem_cache_node *n;
++
++ if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
++ GFP_TEMPORARY)) {
++ kfree(map);
++ return sprintf(buf, "Out of memory\n");
++ }
++ /* Push back cpu slabs */
++ flush_all(s);
++
++ for_each_kmem_cache_node(s, node, n) {
++ unsigned long flags;
++ struct page *page;
++
++ if (!atomic_long_read(&n->nr_slabs))
++ continue;
++
++ spin_lock_irqsave(&n->list_lock, flags);
++ list_for_each_entry(page, &n->partial, lru)
++ process_slab(&t, s, page, alloc, map);
++ list_for_each_entry(page, &n->full, lru)
++ process_slab(&t, s, page, alloc, map);
++ spin_unlock_irqrestore(&n->list_lock, flags);
++ }
++
++ for (i = 0; i < t.count; i++) {
++ struct location *l = &t.loc[i];
++
++ if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
++ break;
++ len += sprintf(buf + len, "%7ld ", l->count);
++
++ if (l->addr)
++ len += sprintf(buf + len, "%pS", (void *)l->addr);
++ else
++ len += sprintf(buf + len, "<not-available>");
++
++ if (l->sum_time != l->min_time) {
++ len += sprintf(buf + len, " age=%ld/%ld/%ld",
++ l->min_time,
++ (long)div_u64(l->sum_time, l->count),
++ l->max_time);
++ } else
++ len += sprintf(buf + len, " age=%ld",
++ l->min_time);
++
++ if (l->min_pid != l->max_pid)
++ len += sprintf(buf + len, " pid=%ld-%ld",
++ l->min_pid, l->max_pid);
++ else
++ len += sprintf(buf + len, " pid=%ld",
++ l->min_pid);
++
++ if (num_online_cpus() > 1 &&
++ !cpumask_empty(to_cpumask(l->cpus)) &&
++ len < PAGE_SIZE - 60)
++ len += scnprintf(buf + len, PAGE_SIZE - len - 50,
++ " cpus=%*pbl",
++ cpumask_pr_args(to_cpumask(l->cpus)));
++
++ if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
++ len < PAGE_SIZE - 60)
++ len += scnprintf(buf + len, PAGE_SIZE - len - 50,
++ " nodes=%*pbl",
++ nodemask_pr_args(&l->nodes));
++
++ len += sprintf(buf + len, "\n");
++ }
++
++ free_loc_track(&t);
++ kfree(map);
++ if (!t.count)
++ len += sprintf(buf, "No data\n");
++ return len;
++}
++#endif
++
++#ifdef SLUB_RESILIENCY_TEST
++static void __init resiliency_test(void)
++{
++ u8 *p;
++
++ BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
++
++ pr_err("SLUB resiliency testing\n");
++ pr_err("-----------------------\n");
++ pr_err("A. Corruption after allocation\n");
++
++ p = kzalloc(16, GFP_KERNEL);
++ p[16] = 0x12;
++ pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
++ p + 16);
++
++ validate_slab_cache(kmalloc_caches[4]);
++
++ /* Hmmm... The next two are dangerous */
++ p = kzalloc(32, GFP_KERNEL);
++ p[32 + sizeof(void *)] = 0x34;
++ pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
++ p);
++ pr_err("If allocated object is overwritten then not detectable\n\n");
++
++ validate_slab_cache(kmalloc_caches[5]);
++ p = kzalloc(64, GFP_KERNEL);
++ p += 64 + (get_cycles() & 0xff) * sizeof(void *);
++ *p = 0x56;
++ pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
++ p);
++ pr_err("If allocated object is overwritten then not detectable\n\n");
++ validate_slab_cache(kmalloc_caches[6]);
++
++ pr_err("\nB. Corruption after free\n");
++ p = kzalloc(128, GFP_KERNEL);
++ kfree(p);
++ *p = 0x78;
++ pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
++ validate_slab_cache(kmalloc_caches[7]);
++
++ p = kzalloc(256, GFP_KERNEL);
++ kfree(p);
++ p[50] = 0x9a;
++ pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
++ validate_slab_cache(kmalloc_caches[8]);
++
++ p = kzalloc(512, GFP_KERNEL);
++ kfree(p);
++ p[512] = 0xab;
++ pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
++ validate_slab_cache(kmalloc_caches[9]);
++}
++#else
++#ifdef CONFIG_SYSFS
++static void resiliency_test(void) {};
++#endif
++#endif
++
++#ifdef CONFIG_SYSFS
++enum slab_stat_type {
++ SL_ALL, /* All slabs */
++ SL_PARTIAL, /* Only partially allocated slabs */
++ SL_CPU, /* Only slabs used for cpu caches */
++ SL_OBJECTS, /* Determine allocated objects not slabs */
++ SL_TOTAL /* Determine object capacity not slabs */
++};
++
++#define SO_ALL (1 << SL_ALL)
++#define SO_PARTIAL (1 << SL_PARTIAL)
++#define SO_CPU (1 << SL_CPU)
++#define SO_OBJECTS (1 << SL_OBJECTS)
++#define SO_TOTAL (1 << SL_TOTAL)
++
++static ssize_t show_slab_objects(struct kmem_cache *s,
++ char *buf, unsigned long flags)
++{
++ unsigned long total = 0;
++ int node;
++ int x;
++ unsigned long *nodes;
++
++ nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
++ if (!nodes)
++ return -ENOMEM;
++
++ if (flags & SO_CPU) {
++ int cpu;
++
++ for_each_possible_cpu(cpu) {
++ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
++ cpu);
++ int node;
++ struct page *page;
++
++ page = READ_ONCE(c->page);
++ if (!page)
++ continue;
++
++ node = page_to_nid(page);
++ if (flags & SO_TOTAL)
++ x = page->objects;
++ else if (flags & SO_OBJECTS)
++ x = page->inuse;
++ else
++ x = 1;
++
++ total += x;
++ nodes[node] += x;
++
++ page = READ_ONCE(c->partial);
++ if (page) {
++ node = page_to_nid(page);
++ if (flags & SO_TOTAL)
++ WARN_ON_ONCE(1);
++ else if (flags & SO_OBJECTS)
++ WARN_ON_ONCE(1);
++ else
++ x = page->pages;
++ total += x;
++ nodes[node] += x;
++ }
++ }
++ }
++
++ get_online_mems();
++#ifdef CONFIG_SLUB_DEBUG
++ if (flags & SO_ALL) {
++ struct kmem_cache_node *n;
++
++ for_each_kmem_cache_node(s, node, n) {
++
++ if (flags & SO_TOTAL)
++ x = atomic_long_read(&n->total_objects);
++ else if (flags & SO_OBJECTS)
++ x = atomic_long_read(&n->total_objects) -
++ count_partial(n, count_free);
++ else
++ x = atomic_long_read(&n->nr_slabs);
++ total += x;
++ nodes[node] += x;
++ }
++
++ } else
++#endif
++ if (flags & SO_PARTIAL) {
++ struct kmem_cache_node *n;
++
++ for_each_kmem_cache_node(s, node, n) {
++ if (flags & SO_TOTAL)
++ x = count_partial(n, count_total);
++ else if (flags & SO_OBJECTS)
++ x = count_partial(n, count_inuse);
++ else
++ x = n->nr_partial;
++ total += x;
++ nodes[node] += x;
++ }
++ }
++ x = sprintf(buf, "%lu", total);
++#ifdef CONFIG_NUMA
++ for (node = 0; node < nr_node_ids; node++)
++ if (nodes[node])
++ x += sprintf(buf + x, " N%d=%lu",
++ node, nodes[node]);
++#endif
++ put_online_mems();
++ kfree(nodes);
++ return x + sprintf(buf + x, "\n");
++}
++
++#ifdef CONFIG_SLUB_DEBUG
++static int any_slab_objects(struct kmem_cache *s)
++{
++ int node;
++ struct kmem_cache_node *n;
++
++ for_each_kmem_cache_node(s, node, n)
++ if (atomic_long_read(&n->total_objects))
++ return 1;
++
++ return 0;
++}
++#endif
++
++#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
++#define to_slab(n) container_of(n, struct kmem_cache, kobj)
++
++struct slab_attribute {
++ struct attribute attr;
++ ssize_t (*show)(struct kmem_cache *s, char *buf);
++ ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
++};
++
++#define SLAB_ATTR_RO(_name) \
++ static struct slab_attribute _name##_attr = \
++ __ATTR(_name, 0400, _name##_show, NULL)
++
++#define SLAB_ATTR(_name) \
++ static struct slab_attribute _name##_attr = \
++ __ATTR(_name, 0600, _name##_show, _name##_store)
++
++static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", s->size);
++}
++SLAB_ATTR_RO(slab_size);
++
++static ssize_t align_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", s->align);
++}
++SLAB_ATTR_RO(align);
++
++static ssize_t object_size_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", s->object_size);
++}
++SLAB_ATTR_RO(object_size);
++
++static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", oo_objects(s->oo));
++}
++SLAB_ATTR_RO(objs_per_slab);
++
++static ssize_t order_store(struct kmem_cache *s,
++ const char *buf, size_t length)
++{
++ unsigned long order;
++ int err;
++
++ err = kstrtoul(buf, 10, &order);
++ if (err)
++ return err;
++
++ if (order > slub_max_order || order < slub_min_order)
++ return -EINVAL;
++
++ calculate_sizes(s, order);
++ return length;
++}
++
++static ssize_t order_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", oo_order(s->oo));
++}
++SLAB_ATTR(order);
++
++static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%lu\n", s->min_partial);
++}
++
++static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
++ size_t length)
++{
++ unsigned long min;
++ int err;
++
++ err = kstrtoul(buf, 10, &min);
++ if (err)
++ return err;
++
++ set_min_partial(s, min);
++ return length;
++}
++SLAB_ATTR(min_partial);
++
++static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%u\n", s->cpu_partial);
++}
++
++static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
++ size_t length)
++{
++ unsigned long objects;
++ int err;
++
++ err = kstrtoul(buf, 10, &objects);
++ if (err)
++ return err;
++ if (objects && !kmem_cache_has_cpu_partial(s))
++ return -EINVAL;
++
++ s->cpu_partial = objects;
++ flush_all(s);
++ return length;
++}
++SLAB_ATTR(cpu_partial);
++
++static ssize_t ctor_show(struct kmem_cache *s, char *buf)
++{
++ if (!s->ctor)
++ return 0;
++ return sprintf(buf, "%pS\n", s->ctor);
++}
++SLAB_ATTR_RO(ctor);
++
++static ssize_t aliases_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
++}
++SLAB_ATTR_RO(aliases);
++
++static ssize_t partial_show(struct kmem_cache *s, char *buf)
++{
++ return show_slab_objects(s, buf, SO_PARTIAL);
++}
++SLAB_ATTR_RO(partial);
++
++static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
++{
++ return show_slab_objects(s, buf, SO_CPU);
++}
++SLAB_ATTR_RO(cpu_slabs);
++
++static ssize_t objects_show(struct kmem_cache *s, char *buf)
++{
++ return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
++}
++SLAB_ATTR_RO(objects);
++
++static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
++{
++ return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
++}
++SLAB_ATTR_RO(objects_partial);
++
++static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
++{
++ int objects = 0;
++ int pages = 0;
++ int cpu;
++ int len;
++
++ for_each_online_cpu(cpu) {
++ struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
++
++ if (page) {
++ pages += page->pages;
++ objects += page->pobjects;
++ }
++ }
++
++ len = sprintf(buf, "%d(%d)", objects, pages);
++
++#ifdef CONFIG_SMP
++ for_each_online_cpu(cpu) {
++ struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
++
++ if (page && len < PAGE_SIZE - 20)
++ len += sprintf(buf + len, " C%d=%d(%d)", cpu,
++ page->pobjects, page->pages);
++ }
++#endif
++ return len + sprintf(buf + len, "\n");
++}
++SLAB_ATTR_RO(slabs_cpu_partial);
++
++static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
++}
++
++static ssize_t reclaim_account_store(struct kmem_cache *s,
++ const char *buf, size_t length)
++{
++ s->flags &= ~SLAB_RECLAIM_ACCOUNT;
++ if (buf[0] == '1')
++ s->flags |= SLAB_RECLAIM_ACCOUNT;
++ return length;
++}
++SLAB_ATTR(reclaim_account);
++
++static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
++}
++SLAB_ATTR_RO(hwcache_align);
++
++#ifdef CONFIG_ZONE_DMA
++static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
++}
++SLAB_ATTR_RO(cache_dma);
++#endif
++
++static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
++}
++SLAB_ATTR_RO(destroy_by_rcu);
++
++static ssize_t reserved_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", s->reserved);
++}
++SLAB_ATTR_RO(reserved);
++
++#ifdef CONFIG_SLUB_DEBUG
++static ssize_t slabs_show(struct kmem_cache *s, char *buf)
++{
++ return show_slab_objects(s, buf, SO_ALL);
++}
++SLAB_ATTR_RO(slabs);
++
++static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
++{
++ return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
++}
++SLAB_ATTR_RO(total_objects);
++
++static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
++}
++
++static ssize_t sanity_checks_store(struct kmem_cache *s,
++ const char *buf, size_t length)
++{
++ s->flags &= ~SLAB_DEBUG_FREE;
++ if (buf[0] == '1') {
++ s->flags &= ~__CMPXCHG_DOUBLE;
++ s->flags |= SLAB_DEBUG_FREE;
++ }
++ return length;
++}
++SLAB_ATTR(sanity_checks);
++
++static ssize_t trace_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
++}
++
++static ssize_t trace_store(struct kmem_cache *s, const char *buf,
++ size_t length)
++{
++ /*
++ * Tracing a merged cache is going to give confusing results
++ * as well as cause other issues like converting a mergeable
++ * cache into an umergeable one.
++ */
++ if (s->refcount > 1)
++ return -EINVAL;
++
++ s->flags &= ~SLAB_TRACE;
++ if (buf[0] == '1') {
++ s->flags &= ~__CMPXCHG_DOUBLE;
++ s->flags |= SLAB_TRACE;
++ }
++ return length;
++}
++SLAB_ATTR(trace);
++
++static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
++}
++
++static ssize_t red_zone_store(struct kmem_cache *s,
++ const char *buf, size_t length)
++{
++ if (any_slab_objects(s))
++ return -EBUSY;
++
++ s->flags &= ~SLAB_RED_ZONE;
++ if (buf[0] == '1') {
++ s->flags &= ~__CMPXCHG_DOUBLE;
++ s->flags |= SLAB_RED_ZONE;
++ }
++ calculate_sizes(s, -1);
++ return length;
++}
++SLAB_ATTR(red_zone);
++
++static ssize_t poison_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
++}
++
++static ssize_t poison_store(struct kmem_cache *s,
++ const char *buf, size_t length)
++{
++ if (any_slab_objects(s))
++ return -EBUSY;
++
++ s->flags &= ~SLAB_POISON;
++ if (buf[0] == '1') {
++ s->flags &= ~__CMPXCHG_DOUBLE;
++ s->flags |= SLAB_POISON;
++ }
++ calculate_sizes(s, -1);
++ return length;
++}
++SLAB_ATTR(poison);
++
++static ssize_t store_user_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
++}
++
++static ssize_t store_user_store(struct kmem_cache *s,
++ const char *buf, size_t length)
++{
++ if (any_slab_objects(s))
++ return -EBUSY;
++
++ s->flags &= ~SLAB_STORE_USER;
++ if (buf[0] == '1') {
++ s->flags &= ~__CMPXCHG_DOUBLE;
++ s->flags |= SLAB_STORE_USER;
++ }
++ calculate_sizes(s, -1);
++ return length;
++}
++SLAB_ATTR(store_user);
++
++static ssize_t validate_show(struct kmem_cache *s, char *buf)
++{
++ return 0;
++}
++
++static ssize_t validate_store(struct kmem_cache *s,
++ const char *buf, size_t length)
++{
++ int ret = -EINVAL;
++
++ if (buf[0] == '1') {
++ ret = validate_slab_cache(s);
++ if (ret >= 0)
++ ret = length;
++ }
++ return ret;
++}
++SLAB_ATTR(validate);
++
++static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
++{
++ if (!(s->flags & SLAB_STORE_USER))
++ return -ENOSYS;
++ return list_locations(s, buf, TRACK_ALLOC);
++}
++SLAB_ATTR_RO(alloc_calls);
++
++static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
++{
++ if (!(s->flags & SLAB_STORE_USER))
++ return -ENOSYS;
++ return list_locations(s, buf, TRACK_FREE);
++}
++SLAB_ATTR_RO(free_calls);
++#endif /* CONFIG_SLUB_DEBUG */
++
++#ifdef CONFIG_FAILSLAB
++static ssize_t failslab_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
++}
++
++static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
++ size_t length)
++{
++ if (s->refcount > 1)
++ return -EINVAL;
++
++ s->flags &= ~SLAB_FAILSLAB;
++ if (buf[0] == '1')
++ s->flags |= SLAB_FAILSLAB;
++ return length;
++}
++SLAB_ATTR(failslab);
++#endif
++
++static ssize_t shrink_show(struct kmem_cache *s, char *buf)
++{
++ return 0;
++}
++
++static ssize_t shrink_store(struct kmem_cache *s,
++ const char *buf, size_t length)
++{
++ if (buf[0] == '1')
++ kmem_cache_shrink(s);
++ else
++ return -EINVAL;
++ return length;
++}
++SLAB_ATTR(shrink);
++
++#ifdef CONFIG_NUMA
++static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
++{
++ return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
++}
++
++static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
++ const char *buf, size_t length)
++{
++ unsigned long ratio;
++ int err;
++
++ err = kstrtoul(buf, 10, &ratio);
++ if (err)
++ return err;
++
++ if (ratio <= 100)
++ s->remote_node_defrag_ratio = ratio * 10;
++
++ return length;
++}
++SLAB_ATTR(remote_node_defrag_ratio);
++#endif
++
++#ifdef CONFIG_SLUB_STATS
++static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
++{
++ unsigned long sum = 0;
++ int cpu;
++ int len;
++ int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
++
++ if (!data)
++ return -ENOMEM;
++
++ for_each_online_cpu(cpu) {
++ unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
++
++ data[cpu] = x;
++ sum += x;
++ }
++
++ len = sprintf(buf, "%lu", sum);
++
++#ifdef CONFIG_SMP
++ for_each_online_cpu(cpu) {
++ if (data[cpu] && len < PAGE_SIZE - 20)
++ len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
++ }
++#endif
++ kfree(data);
++ return len + sprintf(buf + len, "\n");
++}
++
++static void clear_stat(struct kmem_cache *s, enum stat_item si)
++{
++ int cpu;
++
++ for_each_online_cpu(cpu)
++ per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
++}
++
++#define STAT_ATTR(si, text) \
++static ssize_t text##_show(struct kmem_cache *s, char *buf) \
++{ \
++ return show_stat(s, buf, si); \
++} \
++static ssize_t text##_store(struct kmem_cache *s, \
++ const char *buf, size_t length) \
++{ \
++ if (buf[0] != '0') \
++ return -EINVAL; \
++ clear_stat(s, si); \
++ return length; \
++} \
++SLAB_ATTR(text); \
++
++STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
++STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
++STAT_ATTR(FREE_FASTPATH, free_fastpath);
++STAT_ATTR(FREE_SLOWPATH, free_slowpath);
++STAT_ATTR(FREE_FROZEN, free_frozen);
++STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
++STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
++STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
++STAT_ATTR(ALLOC_SLAB, alloc_slab);
++STAT_ATTR(ALLOC_REFILL, alloc_refill);
++STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
++STAT_ATTR(FREE_SLAB, free_slab);
++STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
++STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
++STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
++STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
++STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
++STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
++STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
++STAT_ATTR(ORDER_FALLBACK, order_fallback);
++STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
++STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
++STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
++STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
++STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
++STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
++#endif
++
++static struct attribute *slab_attrs[] = {
++ &slab_size_attr.attr,
++ &object_size_attr.attr,
++ &objs_per_slab_attr.attr,
++ &order_attr.attr,
++ &min_partial_attr.attr,
++ &cpu_partial_attr.attr,
++ &objects_attr.attr,
++ &objects_partial_attr.attr,
++ &partial_attr.attr,
++ &cpu_slabs_attr.attr,
++ &ctor_attr.attr,
++ &aliases_attr.attr,
++ &align_attr.attr,
++ &hwcache_align_attr.attr,
++ &reclaim_account_attr.attr,
++ &destroy_by_rcu_attr.attr,
++ &shrink_attr.attr,
++ &reserved_attr.attr,
++ &slabs_cpu_partial_attr.attr,
++#ifdef CONFIG_SLUB_DEBUG
++ &total_objects_attr.attr,
++ &slabs_attr.attr,
++ &sanity_checks_attr.attr,
++ &trace_attr.attr,
++ &red_zone_attr.attr,
++ &poison_attr.attr,
++ &store_user_attr.attr,
++ &validate_attr.attr,
++ &alloc_calls_attr.attr,
++ &free_calls_attr.attr,
++#endif
++#ifdef CONFIG_ZONE_DMA
++ &cache_dma_attr.attr,
++#endif
++#ifdef CONFIG_NUMA
++ &remote_node_defrag_ratio_attr.attr,
++#endif
++#ifdef CONFIG_SLUB_STATS
++ &alloc_fastpath_attr.attr,
++ &alloc_slowpath_attr.attr,
++ &free_fastpath_attr.attr,
++ &free_slowpath_attr.attr,
++ &free_frozen_attr.attr,
++ &free_add_partial_attr.attr,
++ &free_remove_partial_attr.attr,
++ &alloc_from_partial_attr.attr,
++ &alloc_slab_attr.attr,
++ &alloc_refill_attr.attr,
++ &alloc_node_mismatch_attr.attr,
++ &free_slab_attr.attr,
++ &cpuslab_flush_attr.attr,
++ &deactivate_full_attr.attr,
++ &deactivate_empty_attr.attr,
++ &deactivate_to_head_attr.attr,
++ &deactivate_to_tail_attr.attr,
++ &deactivate_remote_frees_attr.attr,
++ &deactivate_bypass_attr.attr,
++ &order_fallback_attr.attr,
++ &cmpxchg_double_fail_attr.attr,
++ &cmpxchg_double_cpu_fail_attr.attr,
++ &cpu_partial_alloc_attr.attr,
++ &cpu_partial_free_attr.attr,
++ &cpu_partial_node_attr.attr,
++ &cpu_partial_drain_attr.attr,
++#endif
++#ifdef CONFIG_FAILSLAB
++ &failslab_attr.attr,
++#endif
++
++ NULL
++};
++
++static struct attribute_group slab_attr_group = {
++ .attrs = slab_attrs,
++};
++
++static ssize_t slab_attr_show(struct kobject *kobj,
++ struct attribute *attr,
++ char *buf)
++{
++ struct slab_attribute *attribute;
++ struct kmem_cache *s;
++ int err;
++
++ attribute = to_slab_attr(attr);
++ s = to_slab(kobj);
++
++ if (!attribute->show)
++ return -EIO;
++
++ err = attribute->show(s, buf);
++
++ return err;
++}
++
++static ssize_t slab_attr_store(struct kobject *kobj,
++ struct attribute *attr,
++ const char *buf, size_t len)
++{
++ struct slab_attribute *attribute;
++ struct kmem_cache *s;
++ int err;
++
++ attribute = to_slab_attr(attr);
++ s = to_slab(kobj);
++
++ if (!attribute->store)
++ return -EIO;
++
++ err = attribute->store(s, buf, len);
++#ifdef CONFIG_MEMCG_KMEM
++ if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
++ struct kmem_cache *c;
++
++ mutex_lock(&slab_mutex);
++ if (s->max_attr_size < len)
++ s->max_attr_size = len;
++
++ /*
++ * This is a best effort propagation, so this function's return
++ * value will be determined by the parent cache only. This is
++ * basically because not all attributes will have a well
++ * defined semantics for rollbacks - most of the actions will
++ * have permanent effects.
++ *
++ * Returning the error value of any of the children that fail
++ * is not 100 % defined, in the sense that users seeing the
++ * error code won't be able to know anything about the state of
++ * the cache.
++ *
++ * Only returning the error code for the parent cache at least
++ * has well defined semantics. The cache being written to
++ * directly either failed or succeeded, in which case we loop
++ * through the descendants with best-effort propagation.
++ */
++ for_each_memcg_cache(c, s)
++ attribute->store(c, buf, len);
++ mutex_unlock(&slab_mutex);
++ }
++#endif
++ return err;
++}
++
++static void memcg_propagate_slab_attrs(struct kmem_cache *s)
++{
++#ifdef CONFIG_MEMCG_KMEM
++ int i;
++ char *buffer = NULL;
++ struct kmem_cache *root_cache;
++
++ if (is_root_cache(s))
++ return;
++
++ root_cache = s->memcg_params.root_cache;
++
++ /*
++ * This mean this cache had no attribute written. Therefore, no point
++ * in copying default values around
++ */
++ if (!root_cache->max_attr_size)
++ return;
++
++ for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
++ char mbuf[64];
++ char *buf;
++ struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
++
++ if (!attr || !attr->store || !attr->show)
++ continue;
++
++ /*
++ * It is really bad that we have to allocate here, so we will
++ * do it only as a fallback. If we actually allocate, though,
++ * we can just use the allocated buffer until the end.
++ *
++ * Most of the slub attributes will tend to be very small in
++ * size, but sysfs allows buffers up to a page, so they can
++ * theoretically happen.
++ */
++ if (buffer)
++ buf = buffer;
++ else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
++ buf = mbuf;
++ else {
++ buffer = (char *) get_zeroed_page(GFP_KERNEL);
++ if (WARN_ON(!buffer))
++ continue;
++ buf = buffer;
++ }
++
++ attr->show(root_cache, buf);
++ attr->store(s, buf, strlen(buf));
++ }
++
++ if (buffer)
++ free_page((unsigned long)buffer);
++#endif
++}
++
++static void kmem_cache_release(struct kobject *k)
++{
++ slab_kmem_cache_release(to_slab(k));
++}
++
++static const struct sysfs_ops slab_sysfs_ops = {
++ .show = slab_attr_show,
++ .store = slab_attr_store,
++};
++
++static struct kobj_type slab_ktype = {
++ .sysfs_ops = &slab_sysfs_ops,
++ .release = kmem_cache_release,
++};
++
++static int uevent_filter(struct kset *kset, struct kobject *kobj)
++{
++ struct kobj_type *ktype = get_ktype(kobj);
++
++ if (ktype == &slab_ktype)
++ return 1;
++ return 0;
++}
++
++static const struct kset_uevent_ops slab_uevent_ops = {
++ .filter = uevent_filter,
++};
++
++static struct kset *slab_kset;
++
++static inline struct kset *cache_kset(struct kmem_cache *s)
++{
++#ifdef CONFIG_MEMCG_KMEM
++ if (!is_root_cache(s))
++ return s->memcg_params.root_cache->memcg_kset;
++#endif
++ return slab_kset;
++}
++
++#define ID_STR_LENGTH 64
++
++/* Create a unique string id for a slab cache:
++ *
++ * Format :[flags-]size
++ */
++static char *create_unique_id(struct kmem_cache *s)
++{
++ char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
++ char *p = name;
++
++ BUG_ON(!name);
++
++ *p++ = ':';
++ /*
++ * First flags affecting slabcache operations. We will only
++ * get here for aliasable slabs so we do not need to support
++ * too many flags. The flags here must cover all flags that
++ * are matched during merging to guarantee that the id is
++ * unique.
++ */
++ if (s->flags & SLAB_CACHE_DMA)
++ *p++ = 'd';
++ if (s->flags & SLAB_RECLAIM_ACCOUNT)
++ *p++ = 'a';
++ if (s->flags & SLAB_DEBUG_FREE)
++ *p++ = 'F';
++ if (!(s->flags & SLAB_NOTRACK))
++ *p++ = 't';
++ if (p != name + 1)
++ *p++ = '-';
++ p += sprintf(p, "%07d", s->size);
++
++ BUG_ON(p > name + ID_STR_LENGTH - 1);
++ return name;
++}
++
++static int sysfs_slab_add(struct kmem_cache *s)
++{
++ int err;
++ const char *name;
++ int unmergeable = slab_unmergeable(s);
++
++ if (unmergeable) {
++ /*
++ * Slabcache can never be merged so we can use the name proper.
++ * This is typically the case for debug situations. In that
++ * case we can catch duplicate names easily.
++ */
++ sysfs_remove_link(&slab_kset->kobj, s->name);
++ name = s->name;
++ } else {
++ /*
++ * Create a unique name for the slab as a target
++ * for the symlinks.
++ */
++ name = create_unique_id(s);
++ }
++
++ s->kobj.kset = cache_kset(s);
++ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
++ if (err)
++ goto out_put_kobj;
++
++ err = sysfs_create_group(&s->kobj, &slab_attr_group);
++ if (err)
++ goto out_del_kobj;
++
++#ifdef CONFIG_MEMCG_KMEM
++ if (is_root_cache(s)) {
++ s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
++ if (!s->memcg_kset) {
++ err = -ENOMEM;
++ goto out_del_kobj;
++ }
++ }
++#endif
++
++ kobject_uevent(&s->kobj, KOBJ_ADD);
++ if (!unmergeable) {
++ /* Setup first alias */
++ sysfs_slab_alias(s, s->name);
++ }
++out:
++ if (!unmergeable)
++ kfree(name);
++ return err;
++out_del_kobj:
++ kobject_del(&s->kobj);
++out_put_kobj:
++ kobject_put(&s->kobj);
++ goto out;
++}
++
++void sysfs_slab_remove(struct kmem_cache *s)
++{
++ if (slab_state < FULL)
++ /*
++ * Sysfs has not been setup yet so no need to remove the
++ * cache from sysfs.
++ */
++ return;
++
++#ifdef CONFIG_MEMCG_KMEM
++ kset_unregister(s->memcg_kset);
++#endif
++ kobject_uevent(&s->kobj, KOBJ_REMOVE);
++ kobject_del(&s->kobj);
++ kobject_put(&s->kobj);
++}
++
++/*
++ * Need to buffer aliases during bootup until sysfs becomes
++ * available lest we lose that information.
++ */
++struct saved_alias {
++ struct kmem_cache *s;
++ const char *name;
++ struct saved_alias *next;
++};
++
++static struct saved_alias *alias_list;
++
++static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
++{
++ struct saved_alias *al;
++
++ if (slab_state == FULL) {
++ /*
++ * If we have a leftover link then remove it.
++ */
++ sysfs_remove_link(&slab_kset->kobj, name);
++ return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
++ }
++
++ al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
++ if (!al)
++ return -ENOMEM;
++
++ al->s = s;
++ al->name = name;
++ al->next = alias_list;
++ alias_list = al;
++ return 0;
++}
++
++static int __init slab_sysfs_init(void)
++{
++ struct kmem_cache *s;
++ int err;
++
++ mutex_lock(&slab_mutex);
++
++ slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
++ if (!slab_kset) {
++ mutex_unlock(&slab_mutex);
++ pr_err("Cannot register slab subsystem.\n");
++ return -ENOSYS;
++ }
++
++ slab_state = FULL;
++
++ list_for_each_entry(s, &slab_caches, list) {
++ err = sysfs_slab_add(s);
++ if (err)
++ pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
++ s->name);
++ }
++
++ while (alias_list) {
++ struct saved_alias *al = alias_list;
++
++ alias_list = alias_list->next;
++ err = sysfs_slab_alias(al->s, al->name);
++ if (err)
++ pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
++ al->name);
++ kfree(al);
++ }
++
++ mutex_unlock(&slab_mutex);
++ resiliency_test();
++ return 0;
++}
++
++__initcall(slab_sysfs_init);
++#endif /* CONFIG_SYSFS */
++
++/*
++ * The /proc/slabinfo ABI
++ */
++#ifdef CONFIG_SLABINFO
++void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
++{
++ unsigned long nr_slabs = 0;
++ unsigned long nr_objs = 0;
++ unsigned long nr_free = 0;
++ int node;
++ struct kmem_cache_node *n;
++
++ for_each_kmem_cache_node(s, node, n) {
++ nr_slabs += node_nr_slabs(n);
++ nr_objs += node_nr_objs(n);
++ nr_free += count_partial(n, count_free);
++ }
++
++ sinfo->active_objs = nr_objs - nr_free;
++ sinfo->num_objs = nr_objs;
++ sinfo->active_slabs = nr_slabs;
++ sinfo->num_slabs = nr_slabs;
++ sinfo->objects_per_slab = oo_objects(s->oo);
++ sinfo->cache_order = oo_order(s->oo);
++}
++
++void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
++{
++}
++
++ssize_t slabinfo_write(struct file *file, const char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ return -EIO;
++}
++#endif /* CONFIG_SLABINFO */
+diff -Nur linux-4.1.10.orig/mm/swap.c linux-4.1.10/mm/swap.c
+--- linux-4.1.10.orig/mm/swap.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/swap.c 2015-10-07 18:00:08.000000000 +0200
@@ -32,6 +32,7 @@
#include <linux/gfp.h>
#include <linux/uio.h>
@@ -25787,9 +62050,9 @@ diff -Nur linux-4.1.6.orig/mm/swap.c linux-4.1.6/mm/swap.c
}
static void lru_add_drain_per_cpu(struct work_struct *dummy)
-diff -Nur linux-4.1.6.orig/mm/truncate.c linux-4.1.6/mm/truncate.c
---- linux-4.1.6.orig/mm/truncate.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/truncate.c 2015-09-08 23:49:08.441830131 +0200
+diff -Nur linux-4.1.10.orig/mm/truncate.c linux-4.1.10/mm/truncate.c
+--- linux-4.1.10.orig/mm/truncate.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/truncate.c 2015-10-07 18:00:08.000000000 +0200
@@ -56,8 +56,11 @@
* protected by mapping->tree_lock.
*/
@@ -25804,9 +62067,9 @@ diff -Nur linux-4.1.6.orig/mm/truncate.c linux-4.1.6/mm/truncate.c
__radix_tree_delete_node(&mapping->page_tree, node);
unlock:
spin_unlock_irq(&mapping->tree_lock);
-diff -Nur linux-4.1.6.orig/mm/vmalloc.c linux-4.1.6/mm/vmalloc.c
---- linux-4.1.6.orig/mm/vmalloc.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/vmalloc.c 2015-09-08 23:49:08.441830131 +0200
+diff -Nur linux-4.1.10.orig/mm/vmalloc.c linux-4.1.10/mm/vmalloc.c
+--- linux-4.1.10.orig/mm/vmalloc.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/vmalloc.c 2015-10-07 18:00:08.000000000 +0200
@@ -819,7 +819,7 @@
struct vmap_block *vb;
struct vmap_area *va;
@@ -25858,9 +62121,9 @@ diff -Nur linux-4.1.6.orig/mm/vmalloc.c linux-4.1.6/mm/vmalloc.c
rcu_read_unlock();
/* Allocate new block if nothing was found */
-diff -Nur linux-4.1.6.orig/mm/vmstat.c linux-4.1.6/mm/vmstat.c
---- linux-4.1.6.orig/mm/vmstat.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/vmstat.c 2015-09-08 23:49:08.441830131 +0200
+diff -Nur linux-4.1.10.orig/mm/vmstat.c linux-4.1.10/mm/vmstat.c
+--- linux-4.1.10.orig/mm/vmstat.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/vmstat.c 2015-10-07 18:00:08.000000000 +0200
@@ -226,6 +226,7 @@
long x;
long t;
@@ -25909,9 +62172,9 @@ diff -Nur linux-4.1.6.orig/mm/vmstat.c linux-4.1.6/mm/vmstat.c
}
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
-diff -Nur linux-4.1.6.orig/mm/workingset.c linux-4.1.6/mm/workingset.c
---- linux-4.1.6.orig/mm/workingset.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/mm/workingset.c 2015-09-08 23:49:08.441830131 +0200
+diff -Nur linux-4.1.10.orig/mm/workingset.c linux-4.1.10/mm/workingset.c
+--- linux-4.1.10.orig/mm/workingset.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/mm/workingset.c 2015-10-07 18:00:08.000000000 +0200
@@ -264,7 +264,8 @@
* point where they would still be useful.
*/
@@ -25979,9 +62242,9 @@ diff -Nur linux-4.1.6.orig/mm/workingset.c linux-4.1.6/mm/workingset.c
err:
return ret;
}
-diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
---- linux-4.1.6.orig/net/core/dev.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/net/core/dev.c 2015-09-08 23:49:08.441830131 +0200
+diff -Nur linux-4.1.10.orig/net/core/dev.c linux-4.1.10/net/core/dev.c
+--- linux-4.1.10.orig/net/core/dev.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/net/core/dev.c 2015-10-07 18:00:08.000000000 +0200
@@ -184,6 +184,7 @@
static DEFINE_HASHTABLE(napi_hash, 8);
@@ -26007,7 +62270,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
#endif
}
-@@ -856,7 +857,8 @@
+@@ -852,7 +853,8 @@
strcpy(name, dev->name);
rcu_read_unlock();
if (read_seqcount_retry(&devnet_rename_seq, seq)) {
@@ -26017,7 +62280,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
goto retry;
}
-@@ -1125,20 +1127,17 @@
+@@ -1121,20 +1123,17 @@
if (dev->flags & IFF_UP)
return -EBUSY;
@@ -26044,7 +62307,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
if (oldname[0] && !strchr(oldname, '%'))
netdev_info(dev, "renamed from %s\n", oldname);
-@@ -1151,11 +1150,12 @@
+@@ -1147,11 +1146,12 @@
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
@@ -26060,7 +62323,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
netdev_adjacent_rename_links(dev, oldname);
-@@ -1176,7 +1176,8 @@
+@@ -1172,7 +1172,8 @@
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
@@ -26070,7 +62333,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
-@@ -1189,6 +1190,11 @@
+@@ -1185,6 +1186,11 @@
}
return err;
@@ -26082,7 +62345,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
}
/**
-@@ -2218,6 +2224,7 @@
+@@ -2214,6 +2220,7 @@
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -26090,7 +62353,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
}
void __netif_schedule(struct Qdisc *q)
-@@ -2299,6 +2306,7 @@
+@@ -2295,6 +2302,7 @@
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -26098,7 +62361,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -3366,6 +3374,7 @@
+@@ -3365,6 +3373,7 @@
rps_unlock(sd);
local_irq_restore(flags);
@@ -26106,7 +62369,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -3384,7 +3393,7 @@
+@@ -3383,7 +3392,7 @@
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -26115,7 +62378,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3394,13 +3403,13 @@
+@@ -3393,13 +3402,13 @@
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
@@ -26132,7 +62395,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
}
return ret;
}
-@@ -3434,16 +3443,44 @@
+@@ -3433,16 +3442,44 @@
trace_netif_rx_ni_entry(skb);
@@ -26181,7 +62444,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -3485,7 +3522,7 @@
+@@ -3484,7 +3521,7 @@
head = head->next_sched;
root_lock = qdisc_lock(q);
@@ -26190,7 +62453,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
smp_mb__before_atomic();
clear_bit(__QDISC_STATE_SCHED,
&q->state);
-@@ -3884,7 +3921,7 @@
+@@ -3881,7 +3918,7 @@
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -26199,7 +62462,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
input_queue_head_incr(sd);
}
}
-@@ -3893,10 +3930,13 @@
+@@ -3890,10 +3927,13 @@
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
@@ -26214,7 +62477,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
}
static int napi_gro_complete(struct sk_buff *skb)
-@@ -4347,6 +4387,7 @@
+@@ -4344,6 +4384,7 @@
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -26222,7 +62485,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
/* Send pending IPI's to kick RPS processing on remote cpus. */
while (remsd) {
-@@ -4360,6 +4401,7 @@
+@@ -4357,6 +4398,7 @@
} else
#endif
local_irq_enable();
@@ -26230,7 +62493,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4439,6 +4481,7 @@
+@@ -4438,6 +4480,7 @@
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -26238,7 +62501,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -7168,6 +7211,7 @@
+@@ -7167,6 +7210,7 @@
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
@@ -26246,7 +62509,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
/* Process offline CPU's input_pkt_queue */
while ((skb = __skb_dequeue(&oldsd->process_queue))) {
-@@ -7178,6 +7222,9 @@
+@@ -7177,6 +7221,9 @@
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -26256,7 +62519,7 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
return NOTIFY_OK;
}
-@@ -7479,8 +7526,9 @@
+@@ -7478,8 +7525,9 @@
for_each_possible_cpu(i) {
struct softnet_data *sd = &per_cpu(softnet_data, i);
@@ -26268,9 +62531,7535 @@ diff -Nur linux-4.1.6.orig/net/core/dev.c linux-4.1.6/net/core/dev.c
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
-diff -Nur linux-4.1.6.orig/net/core/skbuff.c linux-4.1.6/net/core/skbuff.c
---- linux-4.1.6.orig/net/core/skbuff.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/net/core/skbuff.c 2015-09-08 23:49:08.445829687 +0200
+diff -Nur linux-4.1.10.orig/net/core/dev.c.orig linux-4.1.10/net/core/dev.c.orig
+--- linux-4.1.10.orig/net/core/dev.c.orig 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.1.10/net/core/dev.c.orig 2015-10-03 13:49:38.000000000 +0200
+@@ -0,0 +1,7522 @@
++/*
++ * NET3 Protocol independent device support routines.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ *
++ * Derived from the non IP parts of dev.c 1.0.19
++ * Authors: Ross Biro
++ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
++ * Mark Evans, <evansmp@uhura.aston.ac.uk>
++ *
++ * Additional Authors:
++ * Florian la Roche <rzsfl@rz.uni-sb.de>
++ * Alan Cox <gw4pts@gw4pts.ampr.org>
++ * David Hinds <dahinds@users.sourceforge.net>
++ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
++ * Adam Sulmicki <adam@cfar.umd.edu>
++ * Pekka Riikonen <priikone@poesidon.pspt.fi>
++ *
++ * Changes:
++ * D.J. Barrow : Fixed bug where dev->refcnt gets set
++ * to 2 if register_netdev gets called
++ * before net_dev_init & also removed a
++ * few lines of code in the process.
++ * Alan Cox : device private ioctl copies fields back.
++ * Alan Cox : Transmit queue code does relevant
++ * stunts to keep the queue safe.
++ * Alan Cox : Fixed double lock.
++ * Alan Cox : Fixed promisc NULL pointer trap
++ * ???????? : Support the full private ioctl range
++ * Alan Cox : Moved ioctl permission check into
++ * drivers
++ * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
++ * Alan Cox : 100 backlog just doesn't cut it when
++ * you start doing multicast video 8)
++ * Alan Cox : Rewrote net_bh and list manager.
++ * Alan Cox : Fix ETH_P_ALL echoback lengths.
++ * Alan Cox : Took out transmit every packet pass
++ * Saved a few bytes in the ioctl handler
++ * Alan Cox : Network driver sets packet type before
++ * calling netif_rx. Saves a function
++ * call a packet.
++ * Alan Cox : Hashed net_bh()
++ * Richard Kooijman: Timestamp fixes.
++ * Alan Cox : Wrong field in SIOCGIFDSTADDR
++ * Alan Cox : Device lock protection.
++ * Alan Cox : Fixed nasty side effect of device close
++ * changes.
++ * Rudi Cilibrasi : Pass the right thing to
++ * set_mac_address()
++ * Dave Miller : 32bit quantity for the device lock to
++ * make it work out on a Sparc.
++ * Bjorn Ekwall : Added KERNELD hack.
++ * Alan Cox : Cleaned up the backlog initialise.
++ * Craig Metz : SIOCGIFCONF fix if space for under
++ * 1 device.
++ * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
++ * is no device open function.
++ * Andi Kleen : Fix error reporting for SIOCGIFCONF
++ * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
++ * Cyrus Durgin : Cleaned for KMOD
++ * Adam Sulmicki : Bug Fix : Network Device Unload
++ * A network device unload needs to purge
++ * the backlog queue.
++ * Paul Rusty Russell : SIOCSIFNAME
++ * Pekka Riikonen : Netdev boot-time settings code
++ * Andrew Morton : Make unregister_netdevice wait
++ * indefinitely on dev->refcnt
++ * J Hadi Salim : - Backlog queue sampling
++ * - netif_rx() feedback
++ */
++
++#include <asm/uaccess.h>
++#include <linux/bitops.h>
++#include <linux/capability.h>
++#include <linux/cpu.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/hash.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/mutex.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/socket.h>
++#include <linux/sockios.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/if_ether.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/ethtool.h>
++#include <linux/notifier.h>
++#include <linux/skbuff.h>
++#include <net/net_namespace.h>
++#include <net/sock.h>
++#include <linux/rtnetlink.h>
++#include <linux/stat.h>
++#include <net/dst.h>
++#include <net/pkt_sched.h>
++#include <net/checksum.h>
++#include <net/xfrm.h>
++#include <linux/highmem.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/netpoll.h>
++#include <linux/rcupdate.h>
++#include <linux/delay.h>
++#include <net/iw_handler.h>
++#include <asm/current.h>
++#include <linux/audit.h>
++#include <linux/dmaengine.h>
++#include <linux/err.h>
++#include <linux/ctype.h>
++#include <linux/if_arp.h>
++#include <linux/if_vlan.h>
++#include <linux/ip.h>
++#include <net/ip.h>
++#include <net/mpls.h>
++#include <linux/ipv6.h>
++#include <linux/in.h>
++#include <linux/jhash.h>
++#include <linux/random.h>
++#include <trace/events/napi.h>
++#include <trace/events/net.h>
++#include <trace/events/skb.h>
++#include <linux/pci.h>
++#include <linux/inetdevice.h>
++#include <linux/cpu_rmap.h>
++#include <linux/static_key.h>
++#include <linux/hashtable.h>
++#include <linux/vmalloc.h>
++#include <linux/if_macvlan.h>
++#include <linux/errqueue.h>
++#include <linux/hrtimer.h>
++
++#include "net-sysfs.h"
++
++/* Instead of increasing this, you should create a hash table. */
++#define MAX_GRO_SKBS 8
++
++/* This should be increased if a protocol with a bigger head is added. */
++#define GRO_MAX_HEAD (MAX_HEADER + 128)
++
++static DEFINE_SPINLOCK(ptype_lock);
++static DEFINE_SPINLOCK(offload_lock);
++struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
++struct list_head ptype_all __read_mostly; /* Taps */
++static struct list_head offload_base __read_mostly;
++
++static int netif_rx_internal(struct sk_buff *skb);
++static int call_netdevice_notifiers_info(unsigned long val,
++ struct net_device *dev,
++ struct netdev_notifier_info *info);
++
++/*
++ * The @dev_base_head list is protected by @dev_base_lock and the rtnl
++ * semaphore.
++ *
++ * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
++ *
++ * Writers must hold the rtnl semaphore while they loop through the
++ * dev_base_head list, and hold dev_base_lock for writing when they do the
++ * actual updates. This allows pure readers to access the list even
++ * while a writer is preparing to update it.
++ *
++ * To put it another way, dev_base_lock is held for writing only to
++ * protect against pure readers; the rtnl semaphore provides the
++ * protection against other writers.
++ *
++ * See, for example usages, register_netdevice() and
++ * unregister_netdevice(), which must be called with the rtnl
++ * semaphore held.
++ */
++DEFINE_RWLOCK(dev_base_lock);
++EXPORT_SYMBOL(dev_base_lock);
++
++/* protects napi_hash addition/deletion and napi_gen_id */
++static DEFINE_SPINLOCK(napi_hash_lock);
++
++static unsigned int napi_gen_id;
++static DEFINE_HASHTABLE(napi_hash, 8);
++
++static seqcount_t devnet_rename_seq;
++
++static inline void dev_base_seq_inc(struct net *net)
++{
++ while (++net->dev_base_seq == 0);
++}
++
++static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
++{
++ unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
++
++ return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
++}
++
++static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
++{
++ return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
++}
++
++static inline void rps_lock(struct softnet_data *sd)
++{
++#ifdef CONFIG_RPS
++ spin_lock(&sd->input_pkt_queue.lock);
++#endif
++}
++
++static inline void rps_unlock(struct softnet_data *sd)
++{
++#ifdef CONFIG_RPS
++ spin_unlock(&sd->input_pkt_queue.lock);
++#endif
++}
++
++/* Device list insertion */
++static void list_netdevice(struct net_device *dev)
++{
++ struct net *net = dev_net(dev);
++
++ ASSERT_RTNL();
++
++ write_lock_bh(&dev_base_lock);
++ list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
++ hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
++ hlist_add_head_rcu(&dev->index_hlist,
++ dev_index_hash(net, dev->ifindex));
++ write_unlock_bh(&dev_base_lock);
++
++ dev_base_seq_inc(net);
++}
++
++/* Device list removal
++ * caller must respect a RCU grace period before freeing/reusing dev
++ */
++static void unlist_netdevice(struct net_device *dev)
++{
++ ASSERT_RTNL();
++
++ /* Unlink dev from the device chain */
++ write_lock_bh(&dev_base_lock);
++ list_del_rcu(&dev->dev_list);
++ hlist_del_rcu(&dev->name_hlist);
++ hlist_del_rcu(&dev->index_hlist);
++ write_unlock_bh(&dev_base_lock);
++
++ dev_base_seq_inc(dev_net(dev));
++}
++
++/*
++ * Our notifier list
++ */
++
++static RAW_NOTIFIER_HEAD(netdev_chain);
++
++/*
++ * Device drivers call our routines to queue packets here. We empty the
++ * queue in the local softnet handler.
++ */
++
++DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
++EXPORT_PER_CPU_SYMBOL(softnet_data);
++
++#ifdef CONFIG_LOCKDEP
++/*
++ * register_netdevice() inits txq->_xmit_lock and sets lockdep class
++ * according to dev->type
++ */
++static const unsigned short netdev_lock_type[] =
++ {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
++ ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
++ ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
++ ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
++ ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
++ ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
++ ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
++ ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
++ ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
++ ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
++ ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
++ ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
++ ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
++ ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
++ ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
++
++static const char *const netdev_lock_name[] =
++ {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
++ "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
++ "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
++ "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
++ "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
++ "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
++ "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
++ "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
++ "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
++ "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
++ "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
++ "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
++ "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
++ "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
++ "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
++
++static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
++static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
++
++static inline unsigned short netdev_lock_pos(unsigned short dev_type)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
++ if (netdev_lock_type[i] == dev_type)
++ return i;
++ /* the last key is used by default */
++ return ARRAY_SIZE(netdev_lock_type) - 1;
++}
++
++static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
++ unsigned short dev_type)
++{
++ int i;
++
++ i = netdev_lock_pos(dev_type);
++ lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
++ netdev_lock_name[i]);
++}
++
++static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
++{
++ int i;
++
++ i = netdev_lock_pos(dev->type);
++ lockdep_set_class_and_name(&dev->addr_list_lock,
++ &netdev_addr_lock_key[i],
++ netdev_lock_name[i]);
++}
++#else
++static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
++ unsigned short dev_type)
++{
++}
++static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
++{
++}
++#endif
++
++/*******************************************************************************
++
++ Protocol management and registration routines
++
++*******************************************************************************/
++
++/*
++ * Add a protocol ID to the list. Now that the input handler is
++ * smarter we can dispense with all the messy stuff that used to be
++ * here.
++ *
++ * BEWARE!!! Protocol handlers, mangling input packets,
++ * MUST BE last in hash buckets and checking protocol handlers
++ * MUST start from promiscuous ptype_all chain in net_bh.
++ * It is true now, do not change it.
++ * Explanation follows: if protocol handler, mangling packet, will
++ * be the first on list, it is not able to sense, that packet
++ * is cloned and should be copied-on-write, so that it will
++ * change it and subsequent readers will get broken packet.
++ * --ANK (980803)
++ */
++
++static inline struct list_head *ptype_head(const struct packet_type *pt)
++{
++ if (pt->type == htons(ETH_P_ALL))
++ return pt->dev ? &pt->dev->ptype_all : &ptype_all;
++ else
++ return pt->dev ? &pt->dev->ptype_specific :
++ &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
++}
++
++/**
++ * dev_add_pack - add packet handler
++ * @pt: packet type declaration
++ *
++ * Add a protocol handler to the networking stack. The passed &packet_type
++ * is linked into kernel lists and may not be freed until it has been
++ * removed from the kernel lists.
++ *
++ * This call does not sleep therefore it can not
++ * guarantee all CPU's that are in middle of receiving packets
++ * will see the new packet type (until the next received packet).
++ */
++
++void dev_add_pack(struct packet_type *pt)
++{
++ struct list_head *head = ptype_head(pt);
++
++ spin_lock(&ptype_lock);
++ list_add_rcu(&pt->list, head);
++ spin_unlock(&ptype_lock);
++}
++EXPORT_SYMBOL(dev_add_pack);
++
++/**
++ * __dev_remove_pack - remove packet handler
++ * @pt: packet type declaration
++ *
++ * Remove a protocol handler that was previously added to the kernel
++ * protocol handlers by dev_add_pack(). The passed &packet_type is removed
++ * from the kernel lists and can be freed or reused once this function
++ * returns.
++ *
++ * The packet type might still be in use by receivers
++ * and must not be freed until after all the CPU's have gone
++ * through a quiescent state.
++ */
++void __dev_remove_pack(struct packet_type *pt)
++{
++ struct list_head *head = ptype_head(pt);
++ struct packet_type *pt1;
++
++ spin_lock(&ptype_lock);
++
++ list_for_each_entry(pt1, head, list) {
++ if (pt == pt1) {
++ list_del_rcu(&pt->list);
++ goto out;
++ }
++ }
++
++ pr_warn("dev_remove_pack: %p not found\n", pt);
++out:
++ spin_unlock(&ptype_lock);
++}
++EXPORT_SYMBOL(__dev_remove_pack);
++
++/**
++ * dev_remove_pack - remove packet handler
++ * @pt: packet type declaration
++ *
++ * Remove a protocol handler that was previously added to the kernel
++ * protocol handlers by dev_add_pack(). The passed &packet_type is removed
++ * from the kernel lists and can be freed or reused once this function
++ * returns.
++ *
++ * This call sleeps to guarantee that no CPU is looking at the packet
++ * type after return.
++ */
++void dev_remove_pack(struct packet_type *pt)
++{
++ __dev_remove_pack(pt);
++
++ synchronize_net();
++}
++EXPORT_SYMBOL(dev_remove_pack);
++
++
++/**
++ * dev_add_offload - register offload handlers
++ * @po: protocol offload declaration
++ *
++ * Add protocol offload handlers to the networking stack. The passed
++ * &proto_offload is linked into kernel lists and may not be freed until
++ * it has been removed from the kernel lists.
++ *
++ * This call does not sleep therefore it can not
++ * guarantee all CPU's that are in middle of receiving packets
++ * will see the new offload handlers (until the next received packet).
++ */
++void dev_add_offload(struct packet_offload *po)
++{
++ struct list_head *head = &offload_base;
++
++ spin_lock(&offload_lock);
++ list_add_rcu(&po->list, head);
++ spin_unlock(&offload_lock);
++}
++EXPORT_SYMBOL(dev_add_offload);
++
++/**
++ * __dev_remove_offload - remove offload handler
++ * @po: packet offload declaration
++ *
++ * Remove a protocol offload handler that was previously added to the
++ * kernel offload handlers by dev_add_offload(). The passed &offload_type
++ * is removed from the kernel lists and can be freed or reused once this
++ * function returns.
++ *
++ * The packet type might still be in use by receivers
++ * and must not be freed until after all the CPU's have gone
++ * through a quiescent state.
++ */
++static void __dev_remove_offload(struct packet_offload *po)
++{
++ struct list_head *head = &offload_base;
++ struct packet_offload *po1;
++
++ spin_lock(&offload_lock);
++
++ list_for_each_entry(po1, head, list) {
++ if (po == po1) {
++ list_del_rcu(&po->list);
++ goto out;
++ }
++ }
++
++ pr_warn("dev_remove_offload: %p not found\n", po);
++out:
++ spin_unlock(&offload_lock);
++}
++
++/**
++ * dev_remove_offload - remove packet offload handler
++ * @po: packet offload declaration
++ *
++ * Remove a packet offload handler that was previously added to the kernel
++ * offload handlers by dev_add_offload(). The passed &offload_type is
++ * removed from the kernel lists and can be freed or reused once this
++ * function returns.
++ *
++ * This call sleeps to guarantee that no CPU is looking at the packet
++ * type after return.
++ */
++void dev_remove_offload(struct packet_offload *po)
++{
++ __dev_remove_offload(po);
++
++ synchronize_net();
++}
++EXPORT_SYMBOL(dev_remove_offload);
++
++/******************************************************************************
++
++ Device Boot-time Settings Routines
++
++*******************************************************************************/
++
++/* Boot time configuration table */
++static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
++
++/**
++ * netdev_boot_setup_add - add new setup entry
++ * @name: name of the device
++ * @map: configured settings for the device
++ *
++ * Adds new setup entry to the dev_boot_setup list. The function
++ * returns 0 on error and 1 on success. This is a generic routine to
++ * all netdevices.
++ */
++static int netdev_boot_setup_add(char *name, struct ifmap *map)
++{
++ struct netdev_boot_setup *s;
++ int i;
++
++ s = dev_boot_setup;
++ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
++ if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
++ memset(s[i].name, 0, sizeof(s[i].name));
++ strlcpy(s[i].name, name, IFNAMSIZ);
++ memcpy(&s[i].map, map, sizeof(s[i].map));
++ break;
++ }
++ }
++
++ return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
++}
++
++/**
++ * netdev_boot_setup_check - check boot time settings
++ * @dev: the netdevice
++ *
++ * Check boot time settings for the device.
++ * The found settings are set for the device to be used
++ * later in the device probing.
++ * Returns 0 if no settings found, 1 if they are.
++ */
++int netdev_boot_setup_check(struct net_device *dev)
++{
++ struct netdev_boot_setup *s = dev_boot_setup;
++ int i;
++
++ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
++ if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
++ !strcmp(dev->name, s[i].name)) {
++ dev->irq = s[i].map.irq;
++ dev->base_addr = s[i].map.base_addr;
++ dev->mem_start = s[i].map.mem_start;
++ dev->mem_end = s[i].map.mem_end;
++ return 1;
++ }
++ }
++ return 0;
++}
++EXPORT_SYMBOL(netdev_boot_setup_check);
++
++
++/**
++ * netdev_boot_base - get address from boot time settings
++ * @prefix: prefix for network device
++ * @unit: id for network device
++ *
++ * Check boot time settings for the base address of device.
++ * The found settings are set for the device to be used
++ * later in the device probing.
++ * Returns 0 if no settings found.
++ */
++unsigned long netdev_boot_base(const char *prefix, int unit)
++{
++ const struct netdev_boot_setup *s = dev_boot_setup;
++ char name[IFNAMSIZ];
++ int i;
++
++ sprintf(name, "%s%d", prefix, unit);
++
++ /*
++ * If device already registered then return base of 1
++ * to indicate not to probe for this interface
++ */
++ if (__dev_get_by_name(&init_net, name))
++ return 1;
++
++ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
++ if (!strcmp(name, s[i].name))
++ return s[i].map.base_addr;
++ return 0;
++}
++
++/*
++ * Saves at boot time configured settings for any netdevice.
++ */
++int __init netdev_boot_setup(char *str)
++{
++ int ints[5];
++ struct ifmap map;
++
++ str = get_options(str, ARRAY_SIZE(ints), ints);
++ if (!str || !*str)
++ return 0;
++
++ /* Save settings */
++ memset(&map, 0, sizeof(map));
++ if (ints[0] > 0)
++ map.irq = ints[1];
++ if (ints[0] > 1)
++ map.base_addr = ints[2];
++ if (ints[0] > 2)
++ map.mem_start = ints[3];
++ if (ints[0] > 3)
++ map.mem_end = ints[4];
++
++ /* Add new entry to the list */
++ return netdev_boot_setup_add(str, &map);
++}
++
++__setup("netdev=", netdev_boot_setup);
++
++/*******************************************************************************
++
++ Device Interface Subroutines
++
++*******************************************************************************/
++
++/**
++ * dev_get_iflink - get 'iflink' value of a interface
++ * @dev: targeted interface
++ *
++ * Indicates the ifindex the interface is linked to.
++ * Physical interfaces have the same 'ifindex' and 'iflink' values.
++ */
++
++int dev_get_iflink(const struct net_device *dev)
++{
++ if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
++ return dev->netdev_ops->ndo_get_iflink(dev);
++
++ return dev->ifindex;
++}
++EXPORT_SYMBOL(dev_get_iflink);
++
++/**
++ * __dev_get_by_name - find a device by its name
++ * @net: the applicable net namespace
++ * @name: name to find
++ *
++ * Find an interface by name. Must be called under RTNL semaphore
++ * or @dev_base_lock. If the name is found a pointer to the device
++ * is returned. If the name is not found then %NULL is returned. The
++ * reference counters are not incremented so the caller must be
++ * careful with locks.
++ */
++
++struct net_device *__dev_get_by_name(struct net *net, const char *name)
++{
++ struct net_device *dev;
++ struct hlist_head *head = dev_name_hash(net, name);
++
++ hlist_for_each_entry(dev, head, name_hlist)
++ if (!strncmp(dev->name, name, IFNAMSIZ))
++ return dev;
++
++ return NULL;
++}
++EXPORT_SYMBOL(__dev_get_by_name);
++
++/**
++ * dev_get_by_name_rcu - find a device by its name
++ * @net: the applicable net namespace
++ * @name: name to find
++ *
++ * Find an interface by name.
++ * If the name is found a pointer to the device is returned.
++ * If the name is not found then %NULL is returned.
++ * The reference counters are not incremented so the caller must be
++ * careful with locks. The caller must hold RCU lock.
++ */
++
++struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
++{
++ struct net_device *dev;
++ struct hlist_head *head = dev_name_hash(net, name);
++
++ hlist_for_each_entry_rcu(dev, head, name_hlist)
++ if (!strncmp(dev->name, name, IFNAMSIZ))
++ return dev;
++
++ return NULL;
++}
++EXPORT_SYMBOL(dev_get_by_name_rcu);
++
++/**
++ * dev_get_by_name - find a device by its name
++ * @net: the applicable net namespace
++ * @name: name to find
++ *
++ * Find an interface by name. This can be called from any
++ * context and does its own locking. The returned handle has
++ * the usage count incremented and the caller must use dev_put() to
++ * release it when it is no longer needed. %NULL is returned if no
++ * matching device is found.
++ */
++
++struct net_device *dev_get_by_name(struct net *net, const char *name)
++{
++ struct net_device *dev;
++
++ rcu_read_lock();
++ dev = dev_get_by_name_rcu(net, name);
++ if (dev)
++ dev_hold(dev);
++ rcu_read_unlock();
++ return dev;
++}
++EXPORT_SYMBOL(dev_get_by_name);
++
++/**
++ * __dev_get_by_index - find a device by its ifindex
++ * @net: the applicable net namespace
++ * @ifindex: index of device
++ *
++ * Search for an interface by index. Returns %NULL if the device
++ * is not found or a pointer to the device. The device has not
++ * had its reference counter increased so the caller must be careful
++ * about locking. The caller must hold either the RTNL semaphore
++ * or @dev_base_lock.
++ */
++
++struct net_device *__dev_get_by_index(struct net *net, int ifindex)
++{
++ struct net_device *dev;
++ struct hlist_head *head = dev_index_hash(net, ifindex);
++
++ hlist_for_each_entry(dev, head, index_hlist)
++ if (dev->ifindex == ifindex)
++ return dev;
++
++ return NULL;
++}
++EXPORT_SYMBOL(__dev_get_by_index);
++
++/**
++ * dev_get_by_index_rcu - find a device by its ifindex
++ * @net: the applicable net namespace
++ * @ifindex: index of device
++ *
++ * Search for an interface by index. Returns %NULL if the device
++ * is not found or a pointer to the device. The device has not
++ * had its reference counter increased so the caller must be careful
++ * about locking. The caller must hold RCU lock.
++ */
++
++struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
++{
++ struct net_device *dev;
++ struct hlist_head *head = dev_index_hash(net, ifindex);
++
++ hlist_for_each_entry_rcu(dev, head, index_hlist)
++ if (dev->ifindex == ifindex)
++ return dev;
++
++ return NULL;
++}
++EXPORT_SYMBOL(dev_get_by_index_rcu);
++
++
++/**
++ * dev_get_by_index - find a device by its ifindex
++ * @net: the applicable net namespace
++ * @ifindex: index of device
++ *
++ * Search for an interface by index. Returns NULL if the device
++ * is not found or a pointer to the device. The device returned has
++ * had a reference added and the pointer is safe until the user calls
++ * dev_put to indicate they have finished with it.
++ */
++
++struct net_device *dev_get_by_index(struct net *net, int ifindex)
++{
++ struct net_device *dev;
++
++ rcu_read_lock();
++ dev = dev_get_by_index_rcu(net, ifindex);
++ if (dev)
++ dev_hold(dev);
++ rcu_read_unlock();
++ return dev;
++}
++EXPORT_SYMBOL(dev_get_by_index);
++
++/**
++ * netdev_get_name - get a netdevice name, knowing its ifindex.
++ * @net: network namespace
++ * @name: a pointer to the buffer where the name will be stored.
++ * @ifindex: the ifindex of the interface to get the name from.
++ *
++ * The use of raw_seqcount_begin() and cond_resched() before
++ * retrying is required as we want to give the writers a chance
++ * to complete when CONFIG_PREEMPT is not set.
++ */
++int netdev_get_name(struct net *net, char *name, int ifindex)
++{
++ struct net_device *dev;
++ unsigned int seq;
++
++retry:
++ seq = raw_seqcount_begin(&devnet_rename_seq);
++ rcu_read_lock();
++ dev = dev_get_by_index_rcu(net, ifindex);
++ if (!dev) {
++ rcu_read_unlock();
++ return -ENODEV;
++ }
++
++ strcpy(name, dev->name);
++ rcu_read_unlock();
++ if (read_seqcount_retry(&devnet_rename_seq, seq)) {
++ cond_resched();
++ goto retry;
++ }
++
++ return 0;
++}
++
++/**
++ * dev_getbyhwaddr_rcu - find a device by its hardware address
++ * @net: the applicable net namespace
++ * @type: media type of device
++ * @ha: hardware address
++ *
++ * Search for an interface by MAC address. Returns NULL if the device
++ * is not found or a pointer to the device.
++ * The caller must hold RCU or RTNL.
++ * The returned device has not had its ref count increased
++ * and the caller must therefore be careful about locking
++ *
++ */
++
++struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
++ const char *ha)
++{
++ struct net_device *dev;
++
++ for_each_netdev_rcu(net, dev)
++ if (dev->type == type &&
++ !memcmp(dev->dev_addr, ha, dev->addr_len))
++ return dev;
++
++ return NULL;
++}
++EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
++
++struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
++{
++ struct net_device *dev;
++
++ ASSERT_RTNL();
++ for_each_netdev(net, dev)
++ if (dev->type == type)
++ return dev;
++
++ return NULL;
++}
++EXPORT_SYMBOL(__dev_getfirstbyhwtype);
++
++struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
++{
++ struct net_device *dev, *ret = NULL;
++
++ rcu_read_lock();
++ for_each_netdev_rcu(net, dev)
++ if (dev->type == type) {
++ dev_hold(dev);
++ ret = dev;
++ break;
++ }
++ rcu_read_unlock();
++ return ret;
++}
++EXPORT_SYMBOL(dev_getfirstbyhwtype);
++
++/**
++ * __dev_get_by_flags - find any device with given flags
++ * @net: the applicable net namespace
++ * @if_flags: IFF_* values
++ * @mask: bitmask of bits in if_flags to check
++ *
++ * Search for any interface with the given flags. Returns NULL if a device
++ * is not found or a pointer to the device. Must be called inside
++ * rtnl_lock(), and result refcount is unchanged.
++ */
++
++struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
++ unsigned short mask)
++{
++ struct net_device *dev, *ret;
++
++ ASSERT_RTNL();
++
++ ret = NULL;
++ for_each_netdev(net, dev) {
++ if (((dev->flags ^ if_flags) & mask) == 0) {
++ ret = dev;
++ break;
++ }
++ }
++ return ret;
++}
++EXPORT_SYMBOL(__dev_get_by_flags);
++
++/**
++ * dev_valid_name - check if name is okay for network device
++ * @name: name string
++ *
++ * Network device names need to be valid file names to
++ * to allow sysfs to work. We also disallow any kind of
++ * whitespace.
++ */
++bool dev_valid_name(const char *name)
++{
++ if (*name == '\0')
++ return false;
++ if (strlen(name) >= IFNAMSIZ)
++ return false;
++ if (!strcmp(name, ".") || !strcmp(name, ".."))
++ return false;
++
++ while (*name) {
++ if (*name == '/' || *name == ':' || isspace(*name))
++ return false;
++ name++;
++ }
++ return true;
++}
++EXPORT_SYMBOL(dev_valid_name);
++
++/**
++ * __dev_alloc_name - allocate a name for a device
++ * @net: network namespace to allocate the device name in
++ * @name: name format string
++ * @buf: scratch buffer and result name string
++ *
++ * Passed a format string - eg "lt%d" it will try and find a suitable
++ * id. It scans list of devices to build up a free map, then chooses
++ * the first empty slot. The caller must hold the dev_base or rtnl lock
++ * while allocating the name and adding the device in order to avoid
++ * duplicates.
++ * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
++ * Returns the number of the unit assigned or a negative errno code.
++ */
++
++static int __dev_alloc_name(struct net *net, const char *name, char *buf)
++{
++ int i = 0;
++ const char *p;
++ const int max_netdevices = 8*PAGE_SIZE;
++ unsigned long *inuse;
++ struct net_device *d;
++
++ p = strnchr(name, IFNAMSIZ-1, '%');
++ if (p) {
++ /*
++ * Verify the string as this thing may have come from
++ * the user. There must be either one "%d" and no other "%"
++ * characters.
++ */
++ if (p[1] != 'd' || strchr(p + 2, '%'))
++ return -EINVAL;
++
++ /* Use one page as a bit array of possible slots */
++ inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
++ if (!inuse)
++ return -ENOMEM;
++
++ for_each_netdev(net, d) {
++ if (!sscanf(d->name, name, &i))
++ continue;
++ if (i < 0 || i >= max_netdevices)
++ continue;
++
++ /* avoid cases where sscanf is not exact inverse of printf */
++ snprintf(buf, IFNAMSIZ, name, i);
++ if (!strncmp(buf, d->name, IFNAMSIZ))
++ set_bit(i, inuse);
++ }
++
++ i = find_first_zero_bit(inuse, max_netdevices);
++ free_page((unsigned long) inuse);
++ }
++
++ if (buf != name)
++ snprintf(buf, IFNAMSIZ, name, i);
++ if (!__dev_get_by_name(net, buf))
++ return i;
++
++ /* It is possible to run out of possible slots
++ * when the name is long and there isn't enough space left
++ * for the digits, or if all bits are used.
++ */
++ return -ENFILE;
++}
++
++/**
++ * dev_alloc_name - allocate a name for a device
++ * @dev: device
++ * @name: name format string
++ *
++ * Passed a format string - eg "lt%d" it will try and find a suitable
++ * id. It scans list of devices to build up a free map, then chooses
++ * the first empty slot. The caller must hold the dev_base or rtnl lock
++ * while allocating the name and adding the device in order to avoid
++ * duplicates.
++ * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
++ * Returns the number of the unit assigned or a negative errno code.
++ */
++
++int dev_alloc_name(struct net_device *dev, const char *name)
++{
++ char buf[IFNAMSIZ];
++ struct net *net;
++ int ret;
++
++ BUG_ON(!dev_net(dev));
++ net = dev_net(dev);
++ ret = __dev_alloc_name(net, name, buf);
++ if (ret >= 0)
++ strlcpy(dev->name, buf, IFNAMSIZ);
++ return ret;
++}
++EXPORT_SYMBOL(dev_alloc_name);
++
++static int dev_alloc_name_ns(struct net *net,
++ struct net_device *dev,
++ const char *name)
++{
++ char buf[IFNAMSIZ];
++ int ret;
++
++ ret = __dev_alloc_name(net, name, buf);
++ if (ret >= 0)
++ strlcpy(dev->name, buf, IFNAMSIZ);
++ return ret;
++}
++
++static int dev_get_valid_name(struct net *net,
++ struct net_device *dev,
++ const char *name)
++{
++ BUG_ON(!net);
++
++ if (!dev_valid_name(name))
++ return -EINVAL;
++
++ if (strchr(name, '%'))
++ return dev_alloc_name_ns(net, dev, name);
++ else if (__dev_get_by_name(net, name))
++ return -EEXIST;
++ else if (dev->name != name)
++ strlcpy(dev->name, name, IFNAMSIZ);
++
++ return 0;
++}
++
++/**
++ * dev_change_name - change name of a device
++ * @dev: device
++ * @newname: name (or format string) must be at least IFNAMSIZ
++ *
++ * Change name of a device, can pass format strings "eth%d".
++ * for wildcarding.
++ */
++int dev_change_name(struct net_device *dev, const char *newname)
++{
++ unsigned char old_assign_type;
++ char oldname[IFNAMSIZ];
++ int err = 0;
++ int ret;
++ struct net *net;
++
++ ASSERT_RTNL();
++ BUG_ON(!dev_net(dev));
++
++ net = dev_net(dev);
++ if (dev->flags & IFF_UP)
++ return -EBUSY;
++
++ write_seqcount_begin(&devnet_rename_seq);
++
++ if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
++ write_seqcount_end(&devnet_rename_seq);
++ return 0;
++ }
++
++ memcpy(oldname, dev->name, IFNAMSIZ);
++
++ err = dev_get_valid_name(net, dev, newname);
++ if (err < 0) {
++ write_seqcount_end(&devnet_rename_seq);
++ return err;
++ }
++
++ if (oldname[0] && !strchr(oldname, '%'))
++ netdev_info(dev, "renamed from %s\n", oldname);
++
++ old_assign_type = dev->name_assign_type;
++ dev->name_assign_type = NET_NAME_RENAMED;
++
++rollback:
++ ret = device_rename(&dev->dev, dev->name);
++ if (ret) {
++ memcpy(dev->name, oldname, IFNAMSIZ);
++ dev->name_assign_type = old_assign_type;
++ write_seqcount_end(&devnet_rename_seq);
++ return ret;
++ }
++
++ write_seqcount_end(&devnet_rename_seq);
++
++ netdev_adjacent_rename_links(dev, oldname);
++
++ write_lock_bh(&dev_base_lock);
++ hlist_del_rcu(&dev->name_hlist);
++ write_unlock_bh(&dev_base_lock);
++
++ synchronize_rcu();
++
++ write_lock_bh(&dev_base_lock);
++ hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
++ write_unlock_bh(&dev_base_lock);
++
++ ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
++ ret = notifier_to_errno(ret);
++
++ if (ret) {
++ /* err >= 0 after dev_alloc_name() or stores the first errno */
++ if (err >= 0) {
++ err = ret;
++ write_seqcount_begin(&devnet_rename_seq);
++ memcpy(dev->name, oldname, IFNAMSIZ);
++ memcpy(oldname, newname, IFNAMSIZ);
++ dev->name_assign_type = old_assign_type;
++ old_assign_type = NET_NAME_RENAMED;
++ goto rollback;
++ } else {
++ pr_err("%s: name change rollback failed: %d\n",
++ dev->name, ret);
++ }
++ }
++
++ return err;
++}
++
++/**
++ * dev_set_alias - change ifalias of a device
++ * @dev: device
++ * @alias: name up to IFALIASZ
++ * @len: limit of bytes to copy from info
++ *
++ * Set ifalias for a device,
++ */
++int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
++{
++ char *new_ifalias;
++
++ ASSERT_RTNL();
++
++ if (len >= IFALIASZ)
++ return -EINVAL;
++
++ if (!len) {
++ kfree(dev->ifalias);
++ dev->ifalias = NULL;
++ return 0;
++ }
++
++ new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
++ if (!new_ifalias)
++ return -ENOMEM;
++ dev->ifalias = new_ifalias;
++
++ strlcpy(dev->ifalias, alias, len+1);
++ return len;
++}
++
++
++/**
++ * netdev_features_change - device changes features
++ * @dev: device to cause notification
++ *
++ * Called to indicate a device has changed features.
++ */
++void netdev_features_change(struct net_device *dev)
++{
++ call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
++}
++EXPORT_SYMBOL(netdev_features_change);
++
++/**
++ * netdev_state_change - device changes state
++ * @dev: device to cause notification
++ *
++ * Called to indicate a device has changed state. This function calls
++ * the notifier chains for netdev_chain and sends a NEWLINK message
++ * to the routing socket.
++ */
++void netdev_state_change(struct net_device *dev)
++{
++ if (dev->flags & IFF_UP) {
++ struct netdev_notifier_change_info change_info;
++
++ change_info.flags_changed = 0;
++ call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
++ &change_info.info);
++ rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
++ }
++}
++EXPORT_SYMBOL(netdev_state_change);
++
++/**
++ * netdev_notify_peers - notify network peers about existence of @dev
++ * @dev: network device
++ *
++ * Generate traffic such that interested network peers are aware of
++ * @dev, such as by generating a gratuitous ARP. This may be used when
++ * a device wants to inform the rest of the network about some sort of
++ * reconfiguration such as a failover event or virtual machine
++ * migration.
++ */
++void netdev_notify_peers(struct net_device *dev)
++{
++ rtnl_lock();
++ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
++ rtnl_unlock();
++}
++EXPORT_SYMBOL(netdev_notify_peers);
++
++static int __dev_open(struct net_device *dev)
++{
++ const struct net_device_ops *ops = dev->netdev_ops;
++ int ret;
++
++ ASSERT_RTNL();
++
++ if (!netif_device_present(dev))
++ return -ENODEV;
++
++ /* Block netpoll from trying to do any rx path servicing.
++ * If we don't do this there is a chance ndo_poll_controller
++ * or ndo_poll may be running while we open the device
++ */
++ netpoll_poll_disable(dev);
++
++ ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
++ ret = notifier_to_errno(ret);
++ if (ret)
++ return ret;
++
++ set_bit(__LINK_STATE_START, &dev->state);
++
++ if (ops->ndo_validate_addr)
++ ret = ops->ndo_validate_addr(dev);
++
++ if (!ret && ops->ndo_open)
++ ret = ops->ndo_open(dev);
++
++ netpoll_poll_enable(dev);
++
++ if (ret)
++ clear_bit(__LINK_STATE_START, &dev->state);
++ else {
++ dev->flags |= IFF_UP;
++ dev_set_rx_mode(dev);
++ dev_activate(dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
++ }
++
++ return ret;
++}
++
++/**
++ * dev_open - prepare an interface for use.
++ * @dev: device to open
++ *
++ * Takes a device from down to up state. The device's private open
++ * function is invoked and then the multicast lists are loaded. Finally
++ * the device is moved into the up state and a %NETDEV_UP message is
++ * sent to the netdev notifier chain.
++ *
++ * Calling this function on an active interface is a nop. On a failure
++ * a negative errno code is returned.
++ */
++int dev_open(struct net_device *dev)
++{
++ int ret;
++
++ if (dev->flags & IFF_UP)
++ return 0;
++
++ ret = __dev_open(dev);
++ if (ret < 0)
++ return ret;
++
++ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
++ call_netdevice_notifiers(NETDEV_UP, dev);
++
++ return ret;
++}
++EXPORT_SYMBOL(dev_open);
++
++static int __dev_close_many(struct list_head *head)
++{
++ struct net_device *dev;
++
++ ASSERT_RTNL();
++ might_sleep();
++
++ list_for_each_entry(dev, head, close_list) {
++ /* Temporarily disable netpoll until the interface is down */
++ netpoll_poll_disable(dev);
++
++ call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
++
++ clear_bit(__LINK_STATE_START, &dev->state);
++
++ /* Synchronize to scheduled poll. We cannot touch poll list, it
++ * can be even on different cpu. So just clear netif_running().
++ *
++ * dev->stop() will invoke napi_disable() on all of it's
++ * napi_struct instances on this device.
++ */
++ smp_mb__after_atomic(); /* Commit netif_running(). */
++ }
++
++ dev_deactivate_many(head);
++
++ list_for_each_entry(dev, head, close_list) {
++ const struct net_device_ops *ops = dev->netdev_ops;
++
++ /*
++ * Call the device specific close. This cannot fail.
++ * Only if device is UP
++ *
++ * We allow it to be called even after a DETACH hot-plug
++ * event.
++ */
++ if (ops->ndo_stop)
++ ops->ndo_stop(dev);
++
++ dev->flags &= ~IFF_UP;
++ netpoll_poll_enable(dev);
++ }
++
++ return 0;
++}
++
++static int __dev_close(struct net_device *dev)
++{
++ int retval;
++ LIST_HEAD(single);
++
++ list_add(&dev->close_list, &single);
++ retval = __dev_close_many(&single);
++ list_del(&single);
++
++ return retval;
++}
++
++int dev_close_many(struct list_head *head, bool unlink)
++{
++ struct net_device *dev, *tmp;
++
++ /* Remove the devices that don't need to be closed */
++ list_for_each_entry_safe(dev, tmp, head, close_list)
++ if (!(dev->flags & IFF_UP))
++ list_del_init(&dev->close_list);
++
++ __dev_close_many(head);
++
++ list_for_each_entry_safe(dev, tmp, head, close_list) {
++ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
++ call_netdevice_notifiers(NETDEV_DOWN, dev);
++ if (unlink)
++ list_del_init(&dev->close_list);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(dev_close_many);
++
++/**
++ * dev_close - shutdown an interface.
++ * @dev: device to shutdown
++ *
++ * This function moves an active device into down state. A
++ * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
++ * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
++ * chain.
++ */
++int dev_close(struct net_device *dev)
++{
++ if (dev->flags & IFF_UP) {
++ LIST_HEAD(single);
++
++ list_add(&dev->close_list, &single);
++ dev_close_many(&single, true);
++ list_del(&single);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(dev_close);
++
++
++/**
++ * dev_disable_lro - disable Large Receive Offload on a device
++ * @dev: device
++ *
++ * Disable Large Receive Offload (LRO) on a net device. Must be
++ * called under RTNL. This is needed if received packets may be
++ * forwarded to another interface.
++ */
++void dev_disable_lro(struct net_device *dev)
++{
++ struct net_device *lower_dev;
++ struct list_head *iter;
++
++ dev->wanted_features &= ~NETIF_F_LRO;
++ netdev_update_features(dev);
++
++ if (unlikely(dev->features & NETIF_F_LRO))
++ netdev_WARN(dev, "failed to disable LRO!\n");
++
++ netdev_for_each_lower_dev(dev, lower_dev, iter)
++ dev_disable_lro(lower_dev);
++}
++EXPORT_SYMBOL(dev_disable_lro);
++
++static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
++ struct net_device *dev)
++{
++ struct netdev_notifier_info info;
++
++ netdev_notifier_info_init(&info, dev);
++ return nb->notifier_call(nb, val, &info);
++}
++
++static int dev_boot_phase = 1;
++
++/**
++ * register_netdevice_notifier - register a network notifier block
++ * @nb: notifier
++ *
++ * Register a notifier to be called when network device events occur.
++ * The notifier passed is linked into the kernel structures and must
++ * not be reused until it has been unregistered. A negative errno code
++ * is returned on a failure.
++ *
++ * When registered all registration and up events are replayed
++ * to the new notifier to allow device to have a race free
++ * view of the network device list.
++ */
++
++int register_netdevice_notifier(struct notifier_block *nb)
++{
++ struct net_device *dev;
++ struct net_device *last;
++ struct net *net;
++ int err;
++
++ rtnl_lock();
++ err = raw_notifier_chain_register(&netdev_chain, nb);
++ if (err)
++ goto unlock;
++ if (dev_boot_phase)
++ goto unlock;
++ for_each_net(net) {
++ for_each_netdev(net, dev) {
++ err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
++ err = notifier_to_errno(err);
++ if (err)
++ goto rollback;
++
++ if (!(dev->flags & IFF_UP))
++ continue;
++
++ call_netdevice_notifier(nb, NETDEV_UP, dev);
++ }
++ }
++
++unlock:
++ rtnl_unlock();
++ return err;
++
++rollback:
++ last = dev;
++ for_each_net(net) {
++ for_each_netdev(net, dev) {
++ if (dev == last)
++ goto outroll;
++
++ if (dev->flags & IFF_UP) {
++ call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
++ dev);
++ call_netdevice_notifier(nb, NETDEV_DOWN, dev);
++ }
++ call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
++ }
++ }
++
++outroll:
++ raw_notifier_chain_unregister(&netdev_chain, nb);
++ goto unlock;
++}
++EXPORT_SYMBOL(register_netdevice_notifier);
++
++/**
++ * unregister_netdevice_notifier - unregister a network notifier block
++ * @nb: notifier
++ *
++ * Unregister a notifier previously registered by
++ * register_netdevice_notifier(). The notifier is unlinked into the
++ * kernel structures and may then be reused. A negative errno code
++ * is returned on a failure.
++ *
++ * After unregistering unregister and down device events are synthesized
++ * for all devices on the device list to the removed notifier to remove
++ * the need for special case cleanup code.
++ */
++
++int unregister_netdevice_notifier(struct notifier_block *nb)
++{
++ struct net_device *dev;
++ struct net *net;
++ int err;
++
++ rtnl_lock();
++ err = raw_notifier_chain_unregister(&netdev_chain, nb);
++ if (err)
++ goto unlock;
++
++ for_each_net(net) {
++ for_each_netdev(net, dev) {
++ if (dev->flags & IFF_UP) {
++ call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
++ dev);
++ call_netdevice_notifier(nb, NETDEV_DOWN, dev);
++ }
++ call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
++ }
++ }
++unlock:
++ rtnl_unlock();
++ return err;
++}
++EXPORT_SYMBOL(unregister_netdevice_notifier);
++
++/**
++ * call_netdevice_notifiers_info - call all network notifier blocks
++ * @val: value passed unmodified to notifier function
++ * @dev: net_device pointer passed unmodified to notifier function
++ * @info: notifier information data
++ *
++ * Call all network notifier blocks. Parameters and return value
++ * are as for raw_notifier_call_chain().
++ */
++
++static int call_netdevice_notifiers_info(unsigned long val,
++ struct net_device *dev,
++ struct netdev_notifier_info *info)
++{
++ ASSERT_RTNL();
++ netdev_notifier_info_init(info, dev);
++ return raw_notifier_call_chain(&netdev_chain, val, info);
++}
++
++/**
++ * call_netdevice_notifiers - call all network notifier blocks
++ * @val: value passed unmodified to notifier function
++ * @dev: net_device pointer passed unmodified to notifier function
++ *
++ * Call all network notifier blocks. Parameters and return value
++ * are as for raw_notifier_call_chain().
++ */
++
++int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
++{
++ struct netdev_notifier_info info;
++
++ return call_netdevice_notifiers_info(val, dev, &info);
++}
++EXPORT_SYMBOL(call_netdevice_notifiers);
++
++#ifdef CONFIG_NET_CLS_ACT
++static struct static_key ingress_needed __read_mostly;
++
++void net_inc_ingress_queue(void)
++{
++ static_key_slow_inc(&ingress_needed);
++}
++EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
++
++void net_dec_ingress_queue(void)
++{
++ static_key_slow_dec(&ingress_needed);
++}
++EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
++#endif
++
++static struct static_key netstamp_needed __read_mostly;
++#ifdef HAVE_JUMP_LABEL
++/* We are not allowed to call static_key_slow_dec() from irq context
++ * If net_disable_timestamp() is called from irq context, defer the
++ * static_key_slow_dec() calls.
++ */
++static atomic_t netstamp_needed_deferred;
++#endif
++
++void net_enable_timestamp(void)
++{
++#ifdef HAVE_JUMP_LABEL
++ int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
++
++ if (deferred) {
++ while (--deferred)
++ static_key_slow_dec(&netstamp_needed);
++ return;
++ }
++#endif
++ static_key_slow_inc(&netstamp_needed);
++}
++EXPORT_SYMBOL(net_enable_timestamp);
++
++void net_disable_timestamp(void)
++{
++#ifdef HAVE_JUMP_LABEL
++ if (in_interrupt()) {
++ atomic_inc(&netstamp_needed_deferred);
++ return;
++ }
++#endif
++ static_key_slow_dec(&netstamp_needed);
++}
++EXPORT_SYMBOL(net_disable_timestamp);
++
++static inline void net_timestamp_set(struct sk_buff *skb)
++{
++ skb->tstamp.tv64 = 0;
++ if (static_key_false(&netstamp_needed))
++ __net_timestamp(skb);
++}
++
++#define net_timestamp_check(COND, SKB) \
++ if (static_key_false(&netstamp_needed)) { \
++ if ((COND) && !(SKB)->tstamp.tv64) \
++ __net_timestamp(SKB); \
++ } \
++
++bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
++{
++ unsigned int len;
++
++ if (!(dev->flags & IFF_UP))
++ return false;
++
++ len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
++ if (skb->len <= len)
++ return true;
++
++ /* if TSO is enabled, we don't care about the length as the packet
++ * could be forwarded without being segmented before
++ */
++ if (skb_is_gso(skb))
++ return true;
++
++ return false;
++}
++EXPORT_SYMBOL_GPL(is_skb_forwardable);
++
++int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
++{
++ if (skb_orphan_frags(skb, GFP_ATOMIC) ||
++ unlikely(!is_skb_forwardable(dev, skb))) {
++ atomic_long_inc(&dev->rx_dropped);
++ kfree_skb(skb);
++ return NET_RX_DROP;
++ }
++
++ skb_scrub_packet(skb, true);
++ skb->priority = 0;
++ skb->protocol = eth_type_trans(skb, dev);
++ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(__dev_forward_skb);
++
++/**
++ * dev_forward_skb - loopback an skb to another netif
++ *
++ * @dev: destination network device
++ * @skb: buffer to forward
++ *
++ * return values:
++ * NET_RX_SUCCESS (no congestion)
++ * NET_RX_DROP (packet was dropped, but freed)
++ *
++ * dev_forward_skb can be used for injecting an skb from the
++ * start_xmit function of one device into the receive queue
++ * of another device.
++ *
++ * The receiving device may be in another namespace, so
++ * we have to clear all information in the skb that could
++ * impact namespace isolation.
++ */
++int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
++{
++ return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
++}
++EXPORT_SYMBOL_GPL(dev_forward_skb);
++
++static inline int deliver_skb(struct sk_buff *skb,
++ struct packet_type *pt_prev,
++ struct net_device *orig_dev)
++{
++ if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
++ return -ENOMEM;
++ atomic_inc(&skb->users);
++ return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
++}
++
++static inline void deliver_ptype_list_skb(struct sk_buff *skb,
++ struct packet_type **pt,
++ struct net_device *orig_dev,
++ __be16 type,
++ struct list_head *ptype_list)
++{
++ struct packet_type *ptype, *pt_prev = *pt;
++
++ list_for_each_entry_rcu(ptype, ptype_list, list) {
++ if (ptype->type != type)
++ continue;
++ if (pt_prev)
++ deliver_skb(skb, pt_prev, orig_dev);
++ pt_prev = ptype;
++ }
++ *pt = pt_prev;
++}
++
++static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
++{
++ if (!ptype->af_packet_priv || !skb->sk)
++ return false;
++
++ if (ptype->id_match)
++ return ptype->id_match(ptype, skb->sk);
++ else if ((struct sock *)ptype->af_packet_priv == skb->sk)
++ return true;
++
++ return false;
++}
++
++/*
++ * Support routine. Sends outgoing frames to any network
++ * taps currently in use.
++ */
++
++static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct packet_type *ptype;
++ struct sk_buff *skb2 = NULL;
++ struct packet_type *pt_prev = NULL;
++ struct list_head *ptype_list = &ptype_all;
++
++ rcu_read_lock();
++again:
++ list_for_each_entry_rcu(ptype, ptype_list, list) {
++ /* Never send packets back to the socket
++ * they originated from - MvS (miquels@drinkel.ow.org)
++ */
++ if (skb_loop_sk(ptype, skb))
++ continue;
++
++ if (pt_prev) {
++ deliver_skb(skb2, pt_prev, skb->dev);
++ pt_prev = ptype;
++ continue;
++ }
++
++ /* need to clone skb, done only once */
++ skb2 = skb_clone(skb, GFP_ATOMIC);
++ if (!skb2)
++ goto out_unlock;
++
++ net_timestamp_set(skb2);
++
++ /* skb->nh should be correctly
++ * set by sender, so that the second statement is
++ * just protection against buggy protocols.
++ */
++ skb_reset_mac_header(skb2);
++
++ if (skb_network_header(skb2) < skb2->data ||
++ skb_network_header(skb2) > skb_tail_pointer(skb2)) {
++ net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
++ ntohs(skb2->protocol),
++ dev->name);
++ skb_reset_network_header(skb2);
++ }
++
++ skb2->transport_header = skb2->network_header;
++ skb2->pkt_type = PACKET_OUTGOING;
++ pt_prev = ptype;
++ }
++
++ if (ptype_list == &ptype_all) {
++ ptype_list = &dev->ptype_all;
++ goto again;
++ }
++out_unlock:
++ if (pt_prev)
++ pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
++ rcu_read_unlock();
++}
++
++/**
++ * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
++ * @dev: Network device
++ * @txq: number of queues available
++ *
++ * If real_num_tx_queues is changed the tc mappings may no longer be
++ * valid. To resolve this verify the tc mapping remains valid and if
++ * not NULL the mapping. With no priorities mapping to this
++ * offset/count pair it will no longer be used. In the worst case TC0
++ * is invalid nothing can be done so disable priority mappings. If is
++ * expected that drivers will fix this mapping if they can before
++ * calling netif_set_real_num_tx_queues.
++ */
++static void netif_setup_tc(struct net_device *dev, unsigned int txq)
++{
++ int i;
++ struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
++
++ /* If TC0 is invalidated disable TC mapping */
++ if (tc->offset + tc->count > txq) {
++ pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
++ dev->num_tc = 0;
++ return;
++ }
++
++ /* Invalidated prio to tc mappings set to TC0 */
++ for (i = 1; i < TC_BITMASK + 1; i++) {
++ int q = netdev_get_prio_tc_map(dev, i);
++
++ tc = &dev->tc_to_txq[q];
++ if (tc->offset + tc->count > txq) {
++ pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
++ i, q);
++ netdev_set_prio_tc_map(dev, i, 0);
++ }
++ }
++}
++
++#ifdef CONFIG_XPS
++static DEFINE_MUTEX(xps_map_mutex);
++#define xmap_dereference(P) \
++ rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
++
++static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
++ int cpu, u16 index)
++{
++ struct xps_map *map = NULL;
++ int pos;
++
++ if (dev_maps)
++ map = xmap_dereference(dev_maps->cpu_map[cpu]);
++
++ for (pos = 0; map && pos < map->len; pos++) {
++ if (map->queues[pos] == index) {
++ if (map->len > 1) {
++ map->queues[pos] = map->queues[--map->len];
++ } else {
++ RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
++ kfree_rcu(map, rcu);
++ map = NULL;
++ }
++ break;
++ }
++ }
++
++ return map;
++}
++
++static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
++{
++ struct xps_dev_maps *dev_maps;
++ int cpu, i;
++ bool active = false;
++
++ mutex_lock(&xps_map_mutex);
++ dev_maps = xmap_dereference(dev->xps_maps);
++
++ if (!dev_maps)
++ goto out_no_maps;
++
++ for_each_possible_cpu(cpu) {
++ for (i = index; i < dev->num_tx_queues; i++) {
++ if (!remove_xps_queue(dev_maps, cpu, i))
++ break;
++ }
++ if (i == dev->num_tx_queues)
++ active = true;
++ }
++
++ if (!active) {
++ RCU_INIT_POINTER(dev->xps_maps, NULL);
++ kfree_rcu(dev_maps, rcu);
++ }
++
++ for (i = index; i < dev->num_tx_queues; i++)
++ netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
++ NUMA_NO_NODE);
++
++out_no_maps:
++ mutex_unlock(&xps_map_mutex);
++}
++
++static struct xps_map *expand_xps_map(struct xps_map *map,
++ int cpu, u16 index)
++{
++ struct xps_map *new_map;
++ int alloc_len = XPS_MIN_MAP_ALLOC;
++ int i, pos;
++
++ for (pos = 0; map && pos < map->len; pos++) {
++ if (map->queues[pos] != index)
++ continue;
++ return map;
++ }
++
++ /* Need to add queue to this CPU's existing map */
++ if (map) {
++ if (pos < map->alloc_len)
++ return map;
++
++ alloc_len = map->alloc_len * 2;
++ }
++
++ /* Need to allocate new map to store queue on this CPU's map */
++ new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
++ cpu_to_node(cpu));
++ if (!new_map)
++ return NULL;
++
++ for (i = 0; i < pos; i++)
++ new_map->queues[i] = map->queues[i];
++ new_map->alloc_len = alloc_len;
++ new_map->len = pos;
++
++ return new_map;
++}
++
++int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
++ u16 index)
++{
++ struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
++ struct xps_map *map, *new_map;
++ int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
++ int cpu, numa_node_id = -2;
++ bool active = false;
++
++ mutex_lock(&xps_map_mutex);
++
++ dev_maps = xmap_dereference(dev->xps_maps);
++
++ /* allocate memory for queue storage */
++ for_each_online_cpu(cpu) {
++ if (!cpumask_test_cpu(cpu, mask))
++ continue;
++
++ if (!new_dev_maps)
++ new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
++ if (!new_dev_maps) {
++ mutex_unlock(&xps_map_mutex);
++ return -ENOMEM;
++ }
++
++ map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
++ NULL;
++
++ map = expand_xps_map(map, cpu, index);
++ if (!map)
++ goto error;
++
++ RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
++ }
++
++ if (!new_dev_maps)
++ goto out_no_new_maps;
++
++ for_each_possible_cpu(cpu) {
++ if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
++ /* add queue to CPU maps */
++ int pos = 0;
++
++ map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
++ while ((pos < map->len) && (map->queues[pos] != index))
++ pos++;
++
++ if (pos == map->len)
++ map->queues[map->len++] = index;
++#ifdef CONFIG_NUMA
++ if (numa_node_id == -2)
++ numa_node_id = cpu_to_node(cpu);
++ else if (numa_node_id != cpu_to_node(cpu))
++ numa_node_id = -1;
++#endif
++ } else if (dev_maps) {
++ /* fill in the new device map from the old device map */
++ map = xmap_dereference(dev_maps->cpu_map[cpu]);
++ RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
++ }
++
++ }
++
++ rcu_assign_pointer(dev->xps_maps, new_dev_maps);
++
++ /* Cleanup old maps */
++ if (dev_maps) {
++ for_each_possible_cpu(cpu) {
++ new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
++ map = xmap_dereference(dev_maps->cpu_map[cpu]);
++ if (map && map != new_map)
++ kfree_rcu(map, rcu);
++ }
++
++ kfree_rcu(dev_maps, rcu);
++ }
++
++ dev_maps = new_dev_maps;
++ active = true;
++
++out_no_new_maps:
++ /* update Tx queue numa node */
++ netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
++ (numa_node_id >= 0) ? numa_node_id :
++ NUMA_NO_NODE);
++
++ if (!dev_maps)
++ goto out_no_maps;
++
++ /* removes queue from unused CPUs */
++ for_each_possible_cpu(cpu) {
++ if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
++ continue;
++
++ if (remove_xps_queue(dev_maps, cpu, index))
++ active = true;
++ }
++
++ /* free map if not active */
++ if (!active) {
++ RCU_INIT_POINTER(dev->xps_maps, NULL);
++ kfree_rcu(dev_maps, rcu);
++ }
++
++out_no_maps:
++ mutex_unlock(&xps_map_mutex);
++
++ return 0;
++error:
++ /* remove any maps that we added */
++ for_each_possible_cpu(cpu) {
++ new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
++ map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
++ NULL;
++ if (new_map && new_map != map)
++ kfree(new_map);
++ }
++
++ mutex_unlock(&xps_map_mutex);
++
++ kfree(new_dev_maps);
++ return -ENOMEM;
++}
++EXPORT_SYMBOL(netif_set_xps_queue);
++
++#endif
++/*
++ * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
++ * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
++ */
++int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
++{
++ int rc;
++
++ if (txq < 1 || txq > dev->num_tx_queues)
++ return -EINVAL;
++
++ if (dev->reg_state == NETREG_REGISTERED ||
++ dev->reg_state == NETREG_UNREGISTERING) {
++ ASSERT_RTNL();
++
++ rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
++ txq);
++ if (rc)
++ return rc;
++
++ if (dev->num_tc)
++ netif_setup_tc(dev, txq);
++
++ if (txq < dev->real_num_tx_queues) {
++ qdisc_reset_all_tx_gt(dev, txq);
++#ifdef CONFIG_XPS
++ netif_reset_xps_queues_gt(dev, txq);
++#endif
++ }
++ }
++
++ dev->real_num_tx_queues = txq;
++ return 0;
++}
++EXPORT_SYMBOL(netif_set_real_num_tx_queues);
++
++#ifdef CONFIG_SYSFS
++/**
++ * netif_set_real_num_rx_queues - set actual number of RX queues used
++ * @dev: Network device
++ * @rxq: Actual number of RX queues
++ *
++ * This must be called either with the rtnl_lock held or before
++ * registration of the net device. Returns 0 on success, or a
++ * negative error code. If called before registration, it always
++ * succeeds.
++ */
++int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
++{
++ int rc;
++
++ if (rxq < 1 || rxq > dev->num_rx_queues)
++ return -EINVAL;
++
++ if (dev->reg_state == NETREG_REGISTERED) {
++ ASSERT_RTNL();
++
++ rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
++ rxq);
++ if (rc)
++ return rc;
++ }
++
++ dev->real_num_rx_queues = rxq;
++ return 0;
++}
++EXPORT_SYMBOL(netif_set_real_num_rx_queues);
++#endif
++
++/**
++ * netif_get_num_default_rss_queues - default number of RSS queues
++ *
++ * This routine should set an upper limit on the number of RSS queues
++ * used by default by multiqueue devices.
++ */
++int netif_get_num_default_rss_queues(void)
++{
++ return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
++}
++EXPORT_SYMBOL(netif_get_num_default_rss_queues);
++
++static inline void __netif_reschedule(struct Qdisc *q)
++{
++ struct softnet_data *sd;
++ unsigned long flags;
++
++ local_irq_save(flags);
++ sd = this_cpu_ptr(&softnet_data);
++ q->next_sched = NULL;
++ *sd->output_queue_tailp = q;
++ sd->output_queue_tailp = &q->next_sched;
++ raise_softirq_irqoff(NET_TX_SOFTIRQ);
++ local_irq_restore(flags);
++}
++
++void __netif_schedule(struct Qdisc *q)
++{
++ if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
++ __netif_reschedule(q);
++}
++EXPORT_SYMBOL(__netif_schedule);
++
++struct dev_kfree_skb_cb {
++ enum skb_free_reason reason;
++};
++
++static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
++{
++ return (struct dev_kfree_skb_cb *)skb->cb;
++}
++
++void netif_schedule_queue(struct netdev_queue *txq)
++{
++ rcu_read_lock();
++ if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
++ struct Qdisc *q = rcu_dereference(txq->qdisc);
++
++ __netif_schedule(q);
++ }
++ rcu_read_unlock();
++}
++EXPORT_SYMBOL(netif_schedule_queue);
++
++/**
++ * netif_wake_subqueue - allow sending packets on subqueue
++ * @dev: network device
++ * @queue_index: sub queue index
++ *
++ * Resume individual transmit queue of a device with multiple transmit queues.
++ */
++void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
++{
++ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
++
++ if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
++ struct Qdisc *q;
++
++ rcu_read_lock();
++ q = rcu_dereference(txq->qdisc);
++ __netif_schedule(q);
++ rcu_read_unlock();
++ }
++}
++EXPORT_SYMBOL(netif_wake_subqueue);
++
++void netif_tx_wake_queue(struct netdev_queue *dev_queue)
++{
++ if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
++ struct Qdisc *q;
++
++ rcu_read_lock();
++ q = rcu_dereference(dev_queue->qdisc);
++ __netif_schedule(q);
++ rcu_read_unlock();
++ }
++}
++EXPORT_SYMBOL(netif_tx_wake_queue);
++
++void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
++{
++ unsigned long flags;
++
++ if (likely(atomic_read(&skb->users) == 1)) {
++ smp_rmb();
++ atomic_set(&skb->users, 0);
++ } else if (likely(!atomic_dec_and_test(&skb->users))) {
++ return;
++ }
++ get_kfree_skb_cb(skb)->reason = reason;
++ local_irq_save(flags);
++ skb->next = __this_cpu_read(softnet_data.completion_queue);
++ __this_cpu_write(softnet_data.completion_queue, skb);
++ raise_softirq_irqoff(NET_TX_SOFTIRQ);
++ local_irq_restore(flags);
++}
++EXPORT_SYMBOL(__dev_kfree_skb_irq);
++
++void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
++{
++ if (in_irq() || irqs_disabled())
++ __dev_kfree_skb_irq(skb, reason);
++ else
++ dev_kfree_skb(skb);
++}
++EXPORT_SYMBOL(__dev_kfree_skb_any);
++
++
++/**
++ * netif_device_detach - mark device as removed
++ * @dev: network device
++ *
++ * Mark device as removed from system and therefore no longer available.
++ */
++void netif_device_detach(struct net_device *dev)
++{
++ if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
++ netif_running(dev)) {
++ netif_tx_stop_all_queues(dev);
++ }
++}
++EXPORT_SYMBOL(netif_device_detach);
++
++/**
++ * netif_device_attach - mark device as attached
++ * @dev: network device
++ *
++ * Mark device as attached from system and restart if needed.
++ */
++void netif_device_attach(struct net_device *dev)
++{
++ if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
++ netif_running(dev)) {
++ netif_tx_wake_all_queues(dev);
++ __netdev_watchdog_up(dev);
++ }
++}
++EXPORT_SYMBOL(netif_device_attach);
++
++static void skb_warn_bad_offload(const struct sk_buff *skb)
++{
++ static const netdev_features_t null_features = 0;
++ struct net_device *dev = skb->dev;
++ const char *driver = "";
++
++ if (!net_ratelimit())
++ return;
++
++ if (dev && dev->dev.parent)
++ driver = dev_driver_string(dev->dev.parent);
++
++ WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
++ "gso_type=%d ip_summed=%d\n",
++ driver, dev ? &dev->features : &null_features,
++ skb->sk ? &skb->sk->sk_route_caps : &null_features,
++ skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
++ skb_shinfo(skb)->gso_type, skb->ip_summed);
++}
++
++/*
++ * Invalidate hardware checksum when packet is to be mangled, and
++ * complete checksum manually on outgoing path.
++ */
++int skb_checksum_help(struct sk_buff *skb)
++{
++ __wsum csum;
++ int ret = 0, offset;
++
++ if (skb->ip_summed == CHECKSUM_COMPLETE)
++ goto out_set_summed;
++
++ if (unlikely(skb_shinfo(skb)->gso_size)) {
++ skb_warn_bad_offload(skb);
++ return -EINVAL;
++ }
++
++ /* Before computing a checksum, we should make sure no frag could
++ * be modified by an external entity : checksum could be wrong.
++ */
++ if (skb_has_shared_frag(skb)) {
++ ret = __skb_linearize(skb);
++ if (ret)
++ goto out;
++ }
++
++ offset = skb_checksum_start_offset(skb);
++ BUG_ON(offset >= skb_headlen(skb));
++ csum = skb_checksum(skb, offset, skb->len - offset, 0);
++
++ offset += skb->csum_offset;
++ BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
++
++ if (skb_cloned(skb) &&
++ !skb_clone_writable(skb, offset + sizeof(__sum16))) {
++ ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
++ if (ret)
++ goto out;
++ }
++
++ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
++out_set_summed:
++ skb->ip_summed = CHECKSUM_NONE;
++out:
++ return ret;
++}
++EXPORT_SYMBOL(skb_checksum_help);
++
++__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
++{
++ __be16 type = skb->protocol;
++
++ /* Tunnel gso handlers can set protocol to ethernet. */
++ if (type == htons(ETH_P_TEB)) {
++ struct ethhdr *eth;
++
++ if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
++ return 0;
++
++ eth = (struct ethhdr *)skb_mac_header(skb);
++ type = eth->h_proto;
++ }
++
++ return __vlan_get_protocol(skb, type, depth);
++}
++
++/**
++ * skb_mac_gso_segment - mac layer segmentation handler.
++ * @skb: buffer to segment
++ * @features: features for the output path (see dev->features)
++ */
++struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
++ netdev_features_t features)
++{
++ struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
++ struct packet_offload *ptype;
++ int vlan_depth = skb->mac_len;
++ __be16 type = skb_network_protocol(skb, &vlan_depth);
++
++ if (unlikely(!type))
++ return ERR_PTR(-EINVAL);
++
++ __skb_pull(skb, vlan_depth);
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(ptype, &offload_base, list) {
++ if (ptype->type == type && ptype->callbacks.gso_segment) {
++ segs = ptype->callbacks.gso_segment(skb, features);
++ break;
++ }
++ }
++ rcu_read_unlock();
++
++ __skb_push(skb, skb->data - skb_mac_header(skb));
++
++ return segs;
++}
++EXPORT_SYMBOL(skb_mac_gso_segment);
++
++
++/* openvswitch calls this on rx path, so we need a different check.
++ */
++static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
++{
++ if (tx_path)
++ return skb->ip_summed != CHECKSUM_PARTIAL;
++ else
++ return skb->ip_summed == CHECKSUM_NONE;
++}
++
++/**
++ * __skb_gso_segment - Perform segmentation on skb.
++ * @skb: buffer to segment
++ * @features: features for the output path (see dev->features)
++ * @tx_path: whether it is called in TX path
++ *
++ * This function segments the given skb and returns a list of segments.
++ *
++ * It may return NULL if the skb requires no segmentation. This is
++ * only possible when GSO is used for verifying header integrity.
++ */
++struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
++ netdev_features_t features, bool tx_path)
++{
++ if (unlikely(skb_needs_check(skb, tx_path))) {
++ int err;
++
++ skb_warn_bad_offload(skb);
++
++ err = skb_cow_head(skb, 0);
++ if (err < 0)
++ return ERR_PTR(err);
++ }
++
++ SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
++ SKB_GSO_CB(skb)->encap_level = 0;
++
++ skb_reset_mac_header(skb);
++ skb_reset_mac_len(skb);
++
++ return skb_mac_gso_segment(skb, features);
++}
++EXPORT_SYMBOL(__skb_gso_segment);
++
++/* Take action when hardware reception checksum errors are detected. */
++#ifdef CONFIG_BUG
++void netdev_rx_csum_fault(struct net_device *dev)
++{
++ if (net_ratelimit()) {
++ pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
++ dump_stack();
++ }
++}
++EXPORT_SYMBOL(netdev_rx_csum_fault);
++#endif
++
++/* Actually, we should eliminate this check as soon as we know, that:
++ * 1. IOMMU is present and allows to map all the memory.
++ * 2. No high memory really exists on this machine.
++ */
++
++static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
++{
++#ifdef CONFIG_HIGHMEM
++ int i;
++ if (!(dev->features & NETIF_F_HIGHDMA)) {
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++ if (PageHighMem(skb_frag_page(frag)))
++ return 1;
++ }
++ }
++
++ if (PCI_DMA_BUS_IS_PHYS) {
++ struct device *pdev = dev->dev.parent;
++
++ if (!pdev)
++ return 0;
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++ dma_addr_t addr = page_to_phys(skb_frag_page(frag));
++ if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
++ return 1;
++ }
++ }
++#endif
++ return 0;
++}
++
++/* If MPLS offload request, verify we are testing hardware MPLS features
++ * instead of standard features for the netdev.
++ */
++#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
++static netdev_features_t net_mpls_features(struct sk_buff *skb,
++ netdev_features_t features,
++ __be16 type)
++{
++ if (eth_p_mpls(type))
++ features &= skb->dev->mpls_features;
++
++ return features;
++}
++#else
++static netdev_features_t net_mpls_features(struct sk_buff *skb,
++ netdev_features_t features,
++ __be16 type)
++{
++ return features;
++}
++#endif
++
++static netdev_features_t harmonize_features(struct sk_buff *skb,
++ netdev_features_t features)
++{
++ int tmp;
++ __be16 type;
++
++ type = skb_network_protocol(skb, &tmp);
++ features = net_mpls_features(skb, features, type);
++
++ if (skb->ip_summed != CHECKSUM_NONE &&
++ !can_checksum_protocol(features, type)) {
++ features &= ~NETIF_F_ALL_CSUM;
++ } else if (illegal_highdma(skb->dev, skb)) {
++ features &= ~NETIF_F_SG;
++ }
++
++ return features;
++}
++
++netdev_features_t passthru_features_check(struct sk_buff *skb,
++ struct net_device *dev,
++ netdev_features_t features)
++{
++ return features;
++}
++EXPORT_SYMBOL(passthru_features_check);
++
++static netdev_features_t dflt_features_check(const struct sk_buff *skb,
++ struct net_device *dev,
++ netdev_features_t features)
++{
++ return vlan_features_check(skb, features);
++}
++
++netdev_features_t netif_skb_features(struct sk_buff *skb)
++{
++ struct net_device *dev = skb->dev;
++ netdev_features_t features = dev->features;
++ u16 gso_segs = skb_shinfo(skb)->gso_segs;
++
++ if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
++ features &= ~NETIF_F_GSO_MASK;
++
++ /* If encapsulation offload request, verify we are testing
++ * hardware encapsulation features instead of standard
++ * features for the netdev
++ */
++ if (skb->encapsulation)
++ features &= dev->hw_enc_features;
++
++ if (skb_vlan_tagged(skb))
++ features = netdev_intersect_features(features,
++ dev->vlan_features |
++ NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_STAG_TX);
++
++ if (dev->netdev_ops->ndo_features_check)
++ features &= dev->netdev_ops->ndo_features_check(skb, dev,
++ features);
++ else
++ features &= dflt_features_check(skb, dev, features);
++
++ return harmonize_features(skb, features);
++}
++EXPORT_SYMBOL(netif_skb_features);
++
++static int xmit_one(struct sk_buff *skb, struct net_device *dev,
++ struct netdev_queue *txq, bool more)
++{
++ unsigned int len;
++ int rc;
++
++ if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
++ dev_queue_xmit_nit(skb, dev);
++
++ len = skb->len;
++ trace_net_dev_start_xmit(skb, dev);
++ rc = netdev_start_xmit(skb, dev, txq, more);
++ trace_net_dev_xmit(skb, rc, dev, len);
++
++ return rc;
++}
++
++struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
++ struct netdev_queue *txq, int *ret)
++{
++ struct sk_buff *skb = first;
++ int rc = NETDEV_TX_OK;
++
++ while (skb) {
++ struct sk_buff *next = skb->next;
++
++ skb->next = NULL;
++ rc = xmit_one(skb, dev, txq, next != NULL);
++ if (unlikely(!dev_xmit_complete(rc))) {
++ skb->next = next;
++ goto out;
++ }
++
++ skb = next;
++ if (netif_xmit_stopped(txq) && skb) {
++ rc = NETDEV_TX_BUSY;
++ break;
++ }
++ }
++
++out:
++ *ret = rc;
++ return skb;
++}
++
++static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
++ netdev_features_t features)
++{
++ if (skb_vlan_tag_present(skb) &&
++ !vlan_hw_offload_capable(features, skb->vlan_proto))
++ skb = __vlan_hwaccel_push_inside(skb);
++ return skb;
++}
++
++static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
++{
++ netdev_features_t features;
++
++ if (skb->next)
++ return skb;
++
++ features = netif_skb_features(skb);
++ skb = validate_xmit_vlan(skb, features);
++ if (unlikely(!skb))
++ goto out_null;
++
++ if (netif_needs_gso(skb, features)) {
++ struct sk_buff *segs;
++
++ segs = skb_gso_segment(skb, features);
++ if (IS_ERR(segs)) {
++ goto out_kfree_skb;
++ } else if (segs) {
++ consume_skb(skb);
++ skb = segs;
++ }
++ } else {
++ if (skb_needs_linearize(skb, features) &&
++ __skb_linearize(skb))
++ goto out_kfree_skb;
++
++ /* If packet is not checksummed and device does not
++ * support checksumming for this protocol, complete
++ * checksumming here.
++ */
++ if (skb->ip_summed == CHECKSUM_PARTIAL) {
++ if (skb->encapsulation)
++ skb_set_inner_transport_header(skb,
++ skb_checksum_start_offset(skb));
++ else
++ skb_set_transport_header(skb,
++ skb_checksum_start_offset(skb));
++ if (!(features & NETIF_F_ALL_CSUM) &&
++ skb_checksum_help(skb))
++ goto out_kfree_skb;
++ }
++ }
++
++ return skb;
++
++out_kfree_skb:
++ kfree_skb(skb);
++out_null:
++ return NULL;
++}
++
++struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
++{
++ struct sk_buff *next, *head = NULL, *tail;
++
++ for (; skb != NULL; skb = next) {
++ next = skb->next;
++ skb->next = NULL;
++
++ /* in case skb wont be segmented, point to itself */
++ skb->prev = skb;
++
++ skb = validate_xmit_skb(skb, dev);
++ if (!skb)
++ continue;
++
++ if (!head)
++ head = skb;
++ else
++ tail->next = skb;
++ /* If skb was segmented, skb->prev points to
++ * the last segment. If not, it still contains skb.
++ */
++ tail = skb->prev;
++ }
++ return head;
++}
++
++static void qdisc_pkt_len_init(struct sk_buff *skb)
++{
++ const struct skb_shared_info *shinfo = skb_shinfo(skb);
++
++ qdisc_skb_cb(skb)->pkt_len = skb->len;
++
++ /* To get more precise estimation of bytes sent on wire,
++ * we add to pkt_len the headers size of all segments
++ */
++ if (shinfo->gso_size) {
++ unsigned int hdr_len;
++ u16 gso_segs = shinfo->gso_segs;
++
++ /* mac layer + network layer */
++ hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
++
++ /* + transport layer */
++ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
++ hdr_len += tcp_hdrlen(skb);
++ else
++ hdr_len += sizeof(struct udphdr);
++
++ if (shinfo->gso_type & SKB_GSO_DODGY)
++ gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
++ shinfo->gso_size);
++
++ qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
++ }
++}
++
++static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
++ struct net_device *dev,
++ struct netdev_queue *txq)
++{
++ spinlock_t *root_lock = qdisc_lock(q);
++ bool contended;
++ int rc;
++
++ qdisc_pkt_len_init(skb);
++ qdisc_calculate_pkt_len(skb, q);
++ /*
++ * Heuristic to force contended enqueues to serialize on a
++ * separate lock before trying to get qdisc main lock.
++ * This permits __QDISC___STATE_RUNNING owner to get the lock more
++ * often and dequeue packets faster.
++ */
++ contended = qdisc_is_running(q);
++ if (unlikely(contended))
++ spin_lock(&q->busylock);
++
++ spin_lock(root_lock);
++ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
++ kfree_skb(skb);
++ rc = NET_XMIT_DROP;
++ } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
++ qdisc_run_begin(q)) {
++ /*
++ * This is a work-conserving queue; there are no old skbs
++ * waiting to be sent out; and the qdisc is not running -
++ * xmit the skb directly.
++ */
++
++ qdisc_bstats_update(q, skb);
++
++ if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
++ if (unlikely(contended)) {
++ spin_unlock(&q->busylock);
++ contended = false;
++ }
++ __qdisc_run(q);
++ } else
++ qdisc_run_end(q);
++
++ rc = NET_XMIT_SUCCESS;
++ } else {
++ rc = q->enqueue(skb, q) & NET_XMIT_MASK;
++ if (qdisc_run_begin(q)) {
++ if (unlikely(contended)) {
++ spin_unlock(&q->busylock);
++ contended = false;
++ }
++ __qdisc_run(q);
++ }
++ }
++ spin_unlock(root_lock);
++ if (unlikely(contended))
++ spin_unlock(&q->busylock);
++ return rc;
++}
++
++#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
++static void skb_update_prio(struct sk_buff *skb)
++{
++ struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
++
++ if (!skb->priority && skb->sk && map) {
++ unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
++
++ if (prioidx < map->priomap_len)
++ skb->priority = map->priomap[prioidx];
++ }
++}
++#else
++#define skb_update_prio(skb)
++#endif
++
++DEFINE_PER_CPU(int, xmit_recursion);
++EXPORT_SYMBOL(xmit_recursion);
++
++#define RECURSION_LIMIT 10
++
++/**
++ * dev_loopback_xmit - loop back @skb
++ * @skb: buffer to transmit
++ */
++int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
++{
++ skb_reset_mac_header(skb);
++ __skb_pull(skb, skb_network_offset(skb));
++ skb->pkt_type = PACKET_LOOPBACK;
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ WARN_ON(!skb_dst(skb));
++ skb_dst_force(skb);
++ netif_rx_ni(skb);
++ return 0;
++}
++EXPORT_SYMBOL(dev_loopback_xmit);
++
++/**
++ * __dev_queue_xmit - transmit a buffer
++ * @skb: buffer to transmit
++ * @accel_priv: private data used for L2 forwarding offload
++ *
++ * Queue a buffer for transmission to a network device. The caller must
++ * have set the device and priority and built the buffer before calling
++ * this function. The function can be called from an interrupt.
++ *
++ * A negative errno code is returned on a failure. A success does not
++ * guarantee the frame will be transmitted as it may be dropped due
++ * to congestion or traffic shaping.
++ *
++ * -----------------------------------------------------------------------------------
++ * I notice this method can also return errors from the queue disciplines,
++ * including NET_XMIT_DROP, which is a positive value. So, errors can also
++ * be positive.
++ *
++ * Regardless of the return value, the skb is consumed, so it is currently
++ * difficult to retry a send to this method. (You can bump the ref count
++ * before sending to hold a reference for retry if you are careful.)
++ *
++ * When calling this method, interrupts MUST be enabled. This is because
++ * the BH enable code must have IRQs enabled so that it will not deadlock.
++ * --BLG
++ */
++static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
++{
++ struct net_device *dev = skb->dev;
++ struct netdev_queue *txq;
++ struct Qdisc *q;
++ int rc = -ENOMEM;
++
++ skb_reset_mac_header(skb);
++
++ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
++ __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
++
++ /* Disable soft irqs for various locks below. Also
++ * stops preemption for RCU.
++ */
++ rcu_read_lock_bh();
++
++ skb_update_prio(skb);
++
++ /* If device/qdisc don't need skb->dst, release it right now while
++ * its hot in this cpu cache.
++ */
++ if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
++ skb_dst_drop(skb);
++ else
++ skb_dst_force(skb);
++
++ txq = netdev_pick_tx(dev, skb, accel_priv);
++ q = rcu_dereference_bh(txq->qdisc);
++
++#ifdef CONFIG_NET_CLS_ACT
++ skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
++#endif
++ trace_net_dev_queue(skb);
++ if (q->enqueue) {
++ rc = __dev_xmit_skb(skb, q, dev, txq);
++ goto out;
++ }
++
++ /* The device has no queue. Common case for software devices:
++ loopback, all the sorts of tunnels...
++
++ Really, it is unlikely that netif_tx_lock protection is necessary
++ here. (f.e. loopback and IP tunnels are clean ignoring statistics
++ counters.)
++ However, it is possible, that they rely on protection
++ made by us here.
++
++ Check this and shot the lock. It is not prone from deadlocks.
++ Either shot noqueue qdisc, it is even simpler 8)
++ */
++ if (dev->flags & IFF_UP) {
++ int cpu = smp_processor_id(); /* ok because BHs are off */
++
++ if (txq->xmit_lock_owner != cpu) {
++
++ if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
++ goto recursion_alert;
++
++ skb = validate_xmit_skb(skb, dev);
++ if (!skb)
++ goto drop;
++
++ HARD_TX_LOCK(dev, txq, cpu);
++
++ if (!netif_xmit_stopped(txq)) {
++ __this_cpu_inc(xmit_recursion);
++ skb = dev_hard_start_xmit(skb, dev, txq, &rc);
++ __this_cpu_dec(xmit_recursion);
++ if (dev_xmit_complete(rc)) {
++ HARD_TX_UNLOCK(dev, txq);
++ goto out;
++ }
++ }
++ HARD_TX_UNLOCK(dev, txq);
++ net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
++ dev->name);
++ } else {
++ /* Recursion is detected! It is possible,
++ * unfortunately
++ */
++recursion_alert:
++ net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
++ dev->name);
++ }
++ }
++
++ rc = -ENETDOWN;
++drop:
++ rcu_read_unlock_bh();
++
++ atomic_long_inc(&dev->tx_dropped);
++ kfree_skb_list(skb);
++ return rc;
++out:
++ rcu_read_unlock_bh();
++ return rc;
++}
++
++int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
++{
++ return __dev_queue_xmit(skb, NULL);
++}
++EXPORT_SYMBOL(dev_queue_xmit_sk);
++
++int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
++{
++ return __dev_queue_xmit(skb, accel_priv);
++}
++EXPORT_SYMBOL(dev_queue_xmit_accel);
++
++
++/*=======================================================================
++ Receiver routines
++ =======================================================================*/
++
++int netdev_max_backlog __read_mostly = 1000;
++EXPORT_SYMBOL(netdev_max_backlog);
++
++int netdev_tstamp_prequeue __read_mostly = 1;
++int netdev_budget __read_mostly = 300;
++int weight_p __read_mostly = 64; /* old backlog weight */
++
++/* Called with irq disabled */
++static inline void ____napi_schedule(struct softnet_data *sd,
++ struct napi_struct *napi)
++{
++ list_add_tail(&napi->poll_list, &sd->poll_list);
++ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
++}
++
++#ifdef CONFIG_RPS
++
++/* One global table that all flow-based protocols share. */
++struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
++EXPORT_SYMBOL(rps_sock_flow_table);
++u32 rps_cpu_mask __read_mostly;
++EXPORT_SYMBOL(rps_cpu_mask);
++
++struct static_key rps_needed __read_mostly;
++
++static struct rps_dev_flow *
++set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
++ struct rps_dev_flow *rflow, u16 next_cpu)
++{
++ if (next_cpu < nr_cpu_ids) {
++#ifdef CONFIG_RFS_ACCEL
++ struct netdev_rx_queue *rxqueue;
++ struct rps_dev_flow_table *flow_table;
++ struct rps_dev_flow *old_rflow;
++ u32 flow_id;
++ u16 rxq_index;
++ int rc;
++
++ /* Should we steer this flow to a different hardware queue? */
++ if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
++ !(dev->features & NETIF_F_NTUPLE))
++ goto out;
++ rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
++ if (rxq_index == skb_get_rx_queue(skb))
++ goto out;
++
++ rxqueue = dev->_rx + rxq_index;
++ flow_table = rcu_dereference(rxqueue->rps_flow_table);
++ if (!flow_table)
++ goto out;
++ flow_id = skb_get_hash(skb) & flow_table->mask;
++ rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
++ rxq_index, flow_id);
++ if (rc < 0)
++ goto out;
++ old_rflow = rflow;
++ rflow = &flow_table->flows[flow_id];
++ rflow->filter = rc;
++ if (old_rflow->filter == rflow->filter)
++ old_rflow->filter = RPS_NO_FILTER;
++ out:
++#endif
++ rflow->last_qtail =
++ per_cpu(softnet_data, next_cpu).input_queue_head;
++ }
++
++ rflow->cpu = next_cpu;
++ return rflow;
++}
++
++/*
++ * get_rps_cpu is called from netif_receive_skb and returns the target
++ * CPU from the RPS map of the receiving queue for a given skb.
++ * rcu_read_lock must be held on entry.
++ */
++static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
++ struct rps_dev_flow **rflowp)
++{
++ const struct rps_sock_flow_table *sock_flow_table;
++ struct netdev_rx_queue *rxqueue = dev->_rx;
++ struct rps_dev_flow_table *flow_table;
++ struct rps_map *map;
++ int cpu = -1;
++ u32 tcpu;
++ u32 hash;
++
++ if (skb_rx_queue_recorded(skb)) {
++ u16 index = skb_get_rx_queue(skb);
++
++ if (unlikely(index >= dev->real_num_rx_queues)) {
++ WARN_ONCE(dev->real_num_rx_queues > 1,
++ "%s received packet on queue %u, but number "
++ "of RX queues is %u\n",
++ dev->name, index, dev->real_num_rx_queues);
++ goto done;
++ }
++ rxqueue += index;
++ }
++
++ /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
++
++ flow_table = rcu_dereference(rxqueue->rps_flow_table);
++ map = rcu_dereference(rxqueue->rps_map);
++ if (!flow_table && !map)
++ goto done;
++
++ skb_reset_network_header(skb);
++ hash = skb_get_hash(skb);
++ if (!hash)
++ goto done;
++
++ sock_flow_table = rcu_dereference(rps_sock_flow_table);
++ if (flow_table && sock_flow_table) {
++ struct rps_dev_flow *rflow;
++ u32 next_cpu;
++ u32 ident;
++
++ /* First check into global flow table if there is a match */
++ ident = sock_flow_table->ents[hash & sock_flow_table->mask];
++ if ((ident ^ hash) & ~rps_cpu_mask)
++ goto try_rps;
++
++ next_cpu = ident & rps_cpu_mask;
++
++ /* OK, now we know there is a match,
++ * we can look at the local (per receive queue) flow table
++ */
++ rflow = &flow_table->flows[hash & flow_table->mask];
++ tcpu = rflow->cpu;
++
++ /*
++ * If the desired CPU (where last recvmsg was done) is
++ * different from current CPU (one in the rx-queue flow
++ * table entry), switch if one of the following holds:
++ * - Current CPU is unset (>= nr_cpu_ids).
++ * - Current CPU is offline.
++ * - The current CPU's queue tail has advanced beyond the
++ * last packet that was enqueued using this table entry.
++ * This guarantees that all previous packets for the flow
++ * have been dequeued, thus preserving in order delivery.
++ */
++ if (unlikely(tcpu != next_cpu) &&
++ (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
++ ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
++ rflow->last_qtail)) >= 0)) {
++ tcpu = next_cpu;
++ rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
++ }
++
++ if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
++ *rflowp = rflow;
++ cpu = tcpu;
++ goto done;
++ }
++ }
++
++try_rps:
++
++ if (map) {
++ tcpu = map->cpus[reciprocal_scale(hash, map->len)];
++ if (cpu_online(tcpu)) {
++ cpu = tcpu;
++ goto done;
++ }
++ }
++
++done:
++ return cpu;
++}
++
++#ifdef CONFIG_RFS_ACCEL
++
++/**
++ * rps_may_expire_flow - check whether an RFS hardware filter may be removed
++ * @dev: Device on which the filter was set
++ * @rxq_index: RX queue index
++ * @flow_id: Flow ID passed to ndo_rx_flow_steer()
++ * @filter_id: Filter ID returned by ndo_rx_flow_steer()
++ *
++ * Drivers that implement ndo_rx_flow_steer() should periodically call
++ * this function for each installed filter and remove the filters for
++ * which it returns %true.
++ */
++bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
++ u32 flow_id, u16 filter_id)
++{
++ struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
++ struct rps_dev_flow_table *flow_table;
++ struct rps_dev_flow *rflow;
++ bool expire = true;
++ unsigned int cpu;
++
++ rcu_read_lock();
++ flow_table = rcu_dereference(rxqueue->rps_flow_table);
++ if (flow_table && flow_id <= flow_table->mask) {
++ rflow = &flow_table->flows[flow_id];
++ cpu = ACCESS_ONCE(rflow->cpu);
++ if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
++ ((int)(per_cpu(softnet_data, cpu).input_queue_head -
++ rflow->last_qtail) <
++ (int)(10 * flow_table->mask)))
++ expire = false;
++ }
++ rcu_read_unlock();
++ return expire;
++}
++EXPORT_SYMBOL(rps_may_expire_flow);
++
++#endif /* CONFIG_RFS_ACCEL */
++
++/* Called from hardirq (IPI) context */
++static void rps_trigger_softirq(void *data)
++{
++ struct softnet_data *sd = data;
++
++ ____napi_schedule(sd, &sd->backlog);
++ sd->received_rps++;
++}
++
++#endif /* CONFIG_RPS */
++
++/*
++ * Check if this softnet_data structure is another cpu one
++ * If yes, queue it to our IPI list and return 1
++ * If no, return 0
++ */
++static int rps_ipi_queued(struct softnet_data *sd)
++{
++#ifdef CONFIG_RPS
++ struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
++
++ if (sd != mysd) {
++ sd->rps_ipi_next = mysd->rps_ipi_list;
++ mysd->rps_ipi_list = sd;
++
++ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
++ return 1;
++ }
++#endif /* CONFIG_RPS */
++ return 0;
++}
++
++#ifdef CONFIG_NET_FLOW_LIMIT
++int netdev_flow_limit_table_len __read_mostly = (1 << 12);
++#endif
++
++static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
++{
++#ifdef CONFIG_NET_FLOW_LIMIT
++ struct sd_flow_limit *fl;
++ struct softnet_data *sd;
++ unsigned int old_flow, new_flow;
++
++ if (qlen < (netdev_max_backlog >> 1))
++ return false;
++
++ sd = this_cpu_ptr(&softnet_data);
++
++ rcu_read_lock();
++ fl = rcu_dereference(sd->flow_limit);
++ if (fl) {
++ new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
++ old_flow = fl->history[fl->history_head];
++ fl->history[fl->history_head] = new_flow;
++
++ fl->history_head++;
++ fl->history_head &= FLOW_LIMIT_HISTORY - 1;
++
++ if (likely(fl->buckets[old_flow]))
++ fl->buckets[old_flow]--;
++
++ if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
++ fl->count++;
++ rcu_read_unlock();
++ return true;
++ }
++ }
++ rcu_read_unlock();
++#endif
++ return false;
++}
++
++/*
++ * enqueue_to_backlog is called to queue an skb to a per CPU backlog
++ * queue (may be a remote CPU queue).
++ */
++static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
++ unsigned int *qtail)
++{
++ struct softnet_data *sd;
++ unsigned long flags;
++ unsigned int qlen;
++
++ sd = &per_cpu(softnet_data, cpu);
++
++ local_irq_save(flags);
++
++ rps_lock(sd);
++ if (!netif_running(skb->dev))
++ goto drop;
++ qlen = skb_queue_len(&sd->input_pkt_queue);
++ if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
++ if (qlen) {
++enqueue:
++ __skb_queue_tail(&sd->input_pkt_queue, skb);
++ input_queue_tail_incr_save(sd, qtail);
++ rps_unlock(sd);
++ local_irq_restore(flags);
++ return NET_RX_SUCCESS;
++ }
++
++ /* Schedule NAPI for backlog device
++ * We can use non atomic operation since we own the queue lock
++ */
++ if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
++ if (!rps_ipi_queued(sd))
++ ____napi_schedule(sd, &sd->backlog);
++ }
++ goto enqueue;
++ }
++
++drop:
++ sd->dropped++;
++ rps_unlock(sd);
++
++ local_irq_restore(flags);
++
++ atomic_long_inc(&skb->dev->rx_dropped);
++ kfree_skb(skb);
++ return NET_RX_DROP;
++}
++
++static int netif_rx_internal(struct sk_buff *skb)
++{
++ int ret;
++
++ net_timestamp_check(netdev_tstamp_prequeue, skb);
++
++ trace_netif_rx(skb);
++#ifdef CONFIG_RPS
++ if (static_key_false(&rps_needed)) {
++ struct rps_dev_flow voidflow, *rflow = &voidflow;
++ int cpu;
++
++ preempt_disable();
++ rcu_read_lock();
++
++ cpu = get_rps_cpu(skb->dev, skb, &rflow);
++ if (cpu < 0)
++ cpu = smp_processor_id();
++
++ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
++
++ rcu_read_unlock();
++ preempt_enable();
++ } else
++#endif
++ {
++ unsigned int qtail;
++ ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
++ put_cpu();
++ }
++ return ret;
++}
++
++/**
++ * netif_rx - post buffer to the network code
++ * @skb: buffer to post
++ *
++ * This function receives a packet from a device driver and queues it for
++ * the upper (protocol) levels to process. It always succeeds. The buffer
++ * may be dropped during processing for congestion control or by the
++ * protocol layers.
++ *
++ * return values:
++ * NET_RX_SUCCESS (no congestion)
++ * NET_RX_DROP (packet was dropped)
++ *
++ */
++
++int netif_rx(struct sk_buff *skb)
++{
++ trace_netif_rx_entry(skb);
++
++ return netif_rx_internal(skb);
++}
++EXPORT_SYMBOL(netif_rx);
++
++int netif_rx_ni(struct sk_buff *skb)
++{
++ int err;
++
++ trace_netif_rx_ni_entry(skb);
++
++ preempt_disable();
++ err = netif_rx_internal(skb);
++ if (local_softirq_pending())
++ do_softirq();
++ preempt_enable();
++
++ return err;
++}
++EXPORT_SYMBOL(netif_rx_ni);
++
++static void net_tx_action(struct softirq_action *h)
++{
++ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
++
++ if (sd->completion_queue) {
++ struct sk_buff *clist;
++
++ local_irq_disable();
++ clist = sd->completion_queue;
++ sd->completion_queue = NULL;
++ local_irq_enable();
++
++ while (clist) {
++ struct sk_buff *skb = clist;
++ clist = clist->next;
++
++ WARN_ON(atomic_read(&skb->users));
++ if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
++ trace_consume_skb(skb);
++ else
++ trace_kfree_skb(skb, net_tx_action);
++ __kfree_skb(skb);
++ }
++ }
++
++ if (sd->output_queue) {
++ struct Qdisc *head;
++
++ local_irq_disable();
++ head = sd->output_queue;
++ sd->output_queue = NULL;
++ sd->output_queue_tailp = &sd->output_queue;
++ local_irq_enable();
++
++ while (head) {
++ struct Qdisc *q = head;
++ spinlock_t *root_lock;
++
++ head = head->next_sched;
++
++ root_lock = qdisc_lock(q);
++ if (spin_trylock(root_lock)) {
++ smp_mb__before_atomic();
++ clear_bit(__QDISC_STATE_SCHED,
++ &q->state);
++ qdisc_run(q);
++ spin_unlock(root_lock);
++ } else {
++ if (!test_bit(__QDISC_STATE_DEACTIVATED,
++ &q->state)) {
++ __netif_reschedule(q);
++ } else {
++ smp_mb__before_atomic();
++ clear_bit(__QDISC_STATE_SCHED,
++ &q->state);
++ }
++ }
++ }
++ }
++}
++
++#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
++ (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
++/* This hook is defined here for ATM LANE */
++int (*br_fdb_test_addr_hook)(struct net_device *dev,
++ unsigned char *addr) __read_mostly;
++EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
++#endif
++
++#ifdef CONFIG_NET_CLS_ACT
++/* TODO: Maybe we should just force sch_ingress to be compiled in
++ * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
++ * a compare and 2 stores extra right now if we dont have it on
++ * but have CONFIG_NET_CLS_ACT
++ * NOTE: This doesn't stop any functionality; if you dont have
++ * the ingress scheduler, you just can't add policies on ingress.
++ *
++ */
++static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
++{
++ struct net_device *dev = skb->dev;
++ u32 ttl = G_TC_RTTL(skb->tc_verd);
++ int result = TC_ACT_OK;
++ struct Qdisc *q;
++
++ if (unlikely(MAX_RED_LOOP < ttl++)) {
++ net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
++ skb->skb_iif, dev->ifindex);
++ return TC_ACT_SHOT;
++ }
++
++ skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
++ skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
++
++ q = rcu_dereference(rxq->qdisc);
++ if (q != &noop_qdisc) {
++ spin_lock(qdisc_lock(q));
++ if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
++ result = qdisc_enqueue_root(skb, q);
++ spin_unlock(qdisc_lock(q));
++ }
++
++ return result;
++}
++
++static inline struct sk_buff *handle_ing(struct sk_buff *skb,
++ struct packet_type **pt_prev,
++ int *ret, struct net_device *orig_dev)
++{
++ struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
++
++ if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
++ return skb;
++
++ if (*pt_prev) {
++ *ret = deliver_skb(skb, *pt_prev, orig_dev);
++ *pt_prev = NULL;
++ }
++
++ switch (ing_filter(skb, rxq)) {
++ case TC_ACT_SHOT:
++ case TC_ACT_STOLEN:
++ kfree_skb(skb);
++ return NULL;
++ }
++
++ return skb;
++}
++#endif
++
++/**
++ * netdev_rx_handler_register - register receive handler
++ * @dev: device to register a handler for
++ * @rx_handler: receive handler to register
++ * @rx_handler_data: data pointer that is used by rx handler
++ *
++ * Register a receive handler for a device. This handler will then be
++ * called from __netif_receive_skb. A negative errno code is returned
++ * on a failure.
++ *
++ * The caller must hold the rtnl_mutex.
++ *
++ * For a general description of rx_handler, see enum rx_handler_result.
++ */
++int netdev_rx_handler_register(struct net_device *dev,
++ rx_handler_func_t *rx_handler,
++ void *rx_handler_data)
++{
++ ASSERT_RTNL();
++
++ if (dev->rx_handler)
++ return -EBUSY;
++
++ /* Note: rx_handler_data must be set before rx_handler */
++ rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
++ rcu_assign_pointer(dev->rx_handler, rx_handler);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
++
++/**
++ * netdev_rx_handler_unregister - unregister receive handler
++ * @dev: device to unregister a handler from
++ *
++ * Unregister a receive handler from a device.
++ *
++ * The caller must hold the rtnl_mutex.
++ */
++void netdev_rx_handler_unregister(struct net_device *dev)
++{
++
++ ASSERT_RTNL();
++ RCU_INIT_POINTER(dev->rx_handler, NULL);
++ /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
++ * section has a guarantee to see a non NULL rx_handler_data
++ * as well.
++ */
++ synchronize_net();
++ RCU_INIT_POINTER(dev->rx_handler_data, NULL);
++}
++EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
++
++/*
++ * Limit the use of PFMEMALLOC reserves to those protocols that implement
++ * the special handling of PFMEMALLOC skbs.
++ */
++static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
++{
++ switch (skb->protocol) {
++ case htons(ETH_P_ARP):
++ case htons(ETH_P_IP):
++ case htons(ETH_P_IPV6):
++ case htons(ETH_P_8021Q):
++ case htons(ETH_P_8021AD):
++ return true;
++ default:
++ return false;
++ }
++}
++
++static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
++{
++ struct packet_type *ptype, *pt_prev;
++ rx_handler_func_t *rx_handler;
++ struct net_device *orig_dev;
++ bool deliver_exact = false;
++ int ret = NET_RX_DROP;
++ __be16 type;
++
++ net_timestamp_check(!netdev_tstamp_prequeue, skb);
++
++ trace_netif_receive_skb(skb);
++
++ orig_dev = skb->dev;
++
++ skb_reset_network_header(skb);
++ if (!skb_transport_header_was_set(skb))
++ skb_reset_transport_header(skb);
++ skb_reset_mac_len(skb);
++
++ pt_prev = NULL;
++
++another_round:
++ skb->skb_iif = skb->dev->ifindex;
++
++ __this_cpu_inc(softnet_data.processed);
++
++ if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
++ skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
++ skb = skb_vlan_untag(skb);
++ if (unlikely(!skb))
++ goto out;
++ }
++
++#ifdef CONFIG_NET_CLS_ACT
++ if (skb->tc_verd & TC_NCLS) {
++ skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
++ goto ncls;
++ }
++#endif
++
++ if (pfmemalloc)
++ goto skip_taps;
++
++ list_for_each_entry_rcu(ptype, &ptype_all, list) {
++ if (pt_prev)
++ ret = deliver_skb(skb, pt_prev, orig_dev);
++ pt_prev = ptype;
++ }
++
++ list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
++ if (pt_prev)
++ ret = deliver_skb(skb, pt_prev, orig_dev);
++ pt_prev = ptype;
++ }
++
++skip_taps:
++#ifdef CONFIG_NET_CLS_ACT
++ if (static_key_false(&ingress_needed)) {
++ skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
++ if (!skb)
++ goto out;
++ }
++
++ skb->tc_verd = 0;
++ncls:
++#endif
++ if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
++ goto drop;
++
++ if (skb_vlan_tag_present(skb)) {
++ if (pt_prev) {
++ ret = deliver_skb(skb, pt_prev, orig_dev);
++ pt_prev = NULL;
++ }
++ if (vlan_do_receive(&skb))
++ goto another_round;
++ else if (unlikely(!skb))
++ goto out;
++ }
++
++ rx_handler = rcu_dereference(skb->dev->rx_handler);
++ if (rx_handler) {
++ if (pt_prev) {
++ ret = deliver_skb(skb, pt_prev, orig_dev);
++ pt_prev = NULL;
++ }
++ switch (rx_handler(&skb)) {
++ case RX_HANDLER_CONSUMED:
++ ret = NET_RX_SUCCESS;
++ goto out;
++ case RX_HANDLER_ANOTHER:
++ goto another_round;
++ case RX_HANDLER_EXACT:
++ deliver_exact = true;
++ case RX_HANDLER_PASS:
++ break;
++ default:
++ BUG();
++ }
++ }
++
++ if (unlikely(skb_vlan_tag_present(skb))) {
++ if (skb_vlan_tag_get_id(skb))
++ skb->pkt_type = PACKET_OTHERHOST;
++ /* Note: we might in the future use prio bits
++ * and set skb->priority like in vlan_do_receive()
++ * For the time being, just ignore Priority Code Point
++ */
++ skb->vlan_tci = 0;
++ }
++
++ type = skb->protocol;
++
++ /* deliver only exact match when indicated */
++ if (likely(!deliver_exact)) {
++ deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
++ &ptype_base[ntohs(type) &
++ PTYPE_HASH_MASK]);
++ }
++
++ deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
++ &orig_dev->ptype_specific);
++
++ if (unlikely(skb->dev != orig_dev)) {
++ deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
++ &skb->dev->ptype_specific);
++ }
++
++ if (pt_prev) {
++ if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
++ goto drop;
++ else
++ ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
++ } else {
++drop:
++ atomic_long_inc(&skb->dev->rx_dropped);
++ kfree_skb(skb);
++ /* Jamal, now you will not able to escape explaining
++ * me how you were going to use this. :-)
++ */
++ ret = NET_RX_DROP;
++ }
++
++out:
++ return ret;
++}
++
++static int __netif_receive_skb(struct sk_buff *skb)
++{
++ int ret;
++
++ if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
++ unsigned long pflags = current->flags;
++
++ /*
++ * PFMEMALLOC skbs are special, they should
++ * - be delivered to SOCK_MEMALLOC sockets only
++ * - stay away from userspace
++ * - have bounded memory usage
++ *
++ * Use PF_MEMALLOC as this saves us from propagating the allocation
++ * context down to all allocation sites.
++ */
++ current->flags |= PF_MEMALLOC;
++ ret = __netif_receive_skb_core(skb, true);
++ tsk_restore_flags(current, pflags, PF_MEMALLOC);
++ } else
++ ret = __netif_receive_skb_core(skb, false);
++
++ return ret;
++}
++
++static int netif_receive_skb_internal(struct sk_buff *skb)
++{
++ int ret;
++
++ net_timestamp_check(netdev_tstamp_prequeue, skb);
++
++ if (skb_defer_rx_timestamp(skb))
++ return NET_RX_SUCCESS;
++
++ rcu_read_lock();
++
++#ifdef CONFIG_RPS
++ if (static_key_false(&rps_needed)) {
++ struct rps_dev_flow voidflow, *rflow = &voidflow;
++ int cpu = get_rps_cpu(skb->dev, skb, &rflow);
++
++ if (cpu >= 0) {
++ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
++ rcu_read_unlock();
++ return ret;
++ }
++ }
++#endif
++ ret = __netif_receive_skb(skb);
++ rcu_read_unlock();
++ return ret;
++}
++
++/**
++ * netif_receive_skb - process receive buffer from network
++ * @skb: buffer to process
++ *
++ * netif_receive_skb() is the main receive data processing function.
++ * It always succeeds. The buffer may be dropped during processing
++ * for congestion control or by the protocol layers.
++ *
++ * This function may only be called from softirq context and interrupts
++ * should be enabled.
++ *
++ * Return values (usually ignored):
++ * NET_RX_SUCCESS: no congestion
++ * NET_RX_DROP: packet was dropped
++ */
++int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb)
++{
++ trace_netif_receive_skb_entry(skb);
++
++ return netif_receive_skb_internal(skb);
++}
++EXPORT_SYMBOL(netif_receive_skb_sk);
++
++/* Network device is going away, flush any packets still pending
++ * Called with irqs disabled.
++ */
++static void flush_backlog(void *arg)
++{
++ struct net_device *dev = arg;
++ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
++ struct sk_buff *skb, *tmp;
++
++ rps_lock(sd);
++ skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
++ if (skb->dev == dev) {
++ __skb_unlink(skb, &sd->input_pkt_queue);
++ kfree_skb(skb);
++ input_queue_head_incr(sd);
++ }
++ }
++ rps_unlock(sd);
++
++ skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
++ if (skb->dev == dev) {
++ __skb_unlink(skb, &sd->process_queue);
++ kfree_skb(skb);
++ input_queue_head_incr(sd);
++ }
++ }
++}
++
++static int napi_gro_complete(struct sk_buff *skb)
++{
++ struct packet_offload *ptype;
++ __be16 type = skb->protocol;
++ struct list_head *head = &offload_base;
++ int err = -ENOENT;
++
++ BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
++
++ if (NAPI_GRO_CB(skb)->count == 1) {
++ skb_shinfo(skb)->gso_size = 0;
++ goto out;
++ }
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(ptype, head, list) {
++ if (ptype->type != type || !ptype->callbacks.gro_complete)
++ continue;
++
++ err = ptype->callbacks.gro_complete(skb, 0);
++ break;
++ }
++ rcu_read_unlock();
++
++ if (err) {
++ WARN_ON(&ptype->list == head);
++ kfree_skb(skb);
++ return NET_RX_SUCCESS;
++ }
++
++out:
++ return netif_receive_skb_internal(skb);
++}
++
++/* napi->gro_list contains packets ordered by age.
++ * youngest packets at the head of it.
++ * Complete skbs in reverse order to reduce latencies.
++ */
++void napi_gro_flush(struct napi_struct *napi, bool flush_old)
++{
++ struct sk_buff *skb, *prev = NULL;
++
++ /* scan list and build reverse chain */
++ for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
++ skb->prev = prev;
++ prev = skb;
++ }
++
++ for (skb = prev; skb; skb = prev) {
++ skb->next = NULL;
++
++ if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
++ return;
++
++ prev = skb->prev;
++ napi_gro_complete(skb);
++ napi->gro_count--;
++ }
++
++ napi->gro_list = NULL;
++}
++EXPORT_SYMBOL(napi_gro_flush);
++
++static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
++{
++ struct sk_buff *p;
++ unsigned int maclen = skb->dev->hard_header_len;
++ u32 hash = skb_get_hash_raw(skb);
++
++ for (p = napi->gro_list; p; p = p->next) {
++ unsigned long diffs;
++
++ NAPI_GRO_CB(p)->flush = 0;
++
++ if (hash != skb_get_hash_raw(p)) {
++ NAPI_GRO_CB(p)->same_flow = 0;
++ continue;
++ }
++
++ diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
++ diffs |= p->vlan_tci ^ skb->vlan_tci;
++ if (maclen == ETH_HLEN)
++ diffs |= compare_ether_header(skb_mac_header(p),
++ skb_mac_header(skb));
++ else if (!diffs)
++ diffs = memcmp(skb_mac_header(p),
++ skb_mac_header(skb),
++ maclen);
++ NAPI_GRO_CB(p)->same_flow = !diffs;
++ }
++}
++
++static void skb_gro_reset_offset(struct sk_buff *skb)
++{
++ const struct skb_shared_info *pinfo = skb_shinfo(skb);
++ const skb_frag_t *frag0 = &pinfo->frags[0];
++
++ NAPI_GRO_CB(skb)->data_offset = 0;
++ NAPI_GRO_CB(skb)->frag0 = NULL;
++ NAPI_GRO_CB(skb)->frag0_len = 0;
++
++ if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
++ pinfo->nr_frags &&
++ !PageHighMem(skb_frag_page(frag0))) {
++ NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
++ NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
++ }
++}
++
++static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
++{
++ struct skb_shared_info *pinfo = skb_shinfo(skb);
++
++ BUG_ON(skb->end - skb->tail < grow);
++
++ memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
++
++ skb->data_len -= grow;
++ skb->tail += grow;
++
++ pinfo->frags[0].page_offset += grow;
++ skb_frag_size_sub(&pinfo->frags[0], grow);
++
++ if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
++ skb_frag_unref(skb, 0);
++ memmove(pinfo->frags, pinfo->frags + 1,
++ --pinfo->nr_frags * sizeof(pinfo->frags[0]));
++ }
++}
++
++static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
++{
++ struct sk_buff **pp = NULL;
++ struct packet_offload *ptype;
++ __be16 type = skb->protocol;
++ struct list_head *head = &offload_base;
++ int same_flow;
++ enum gro_result ret;
++ int grow;
++
++ if (!(skb->dev->features & NETIF_F_GRO))
++ goto normal;
++
++ if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
++ goto normal;
++
++ gro_list_prepare(napi, skb);
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(ptype, head, list) {
++ if (ptype->type != type || !ptype->callbacks.gro_receive)
++ continue;
++
++ skb_set_network_header(skb, skb_gro_offset(skb));
++ skb_reset_mac_len(skb);
++ NAPI_GRO_CB(skb)->same_flow = 0;
++ NAPI_GRO_CB(skb)->flush = 0;
++ NAPI_GRO_CB(skb)->free = 0;
++ NAPI_GRO_CB(skb)->udp_mark = 0;
++ NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
++
++ /* Setup for GRO checksum validation */
++ switch (skb->ip_summed) {
++ case CHECKSUM_COMPLETE:
++ NAPI_GRO_CB(skb)->csum = skb->csum;
++ NAPI_GRO_CB(skb)->csum_valid = 1;
++ NAPI_GRO_CB(skb)->csum_cnt = 0;
++ break;
++ case CHECKSUM_UNNECESSARY:
++ NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
++ NAPI_GRO_CB(skb)->csum_valid = 0;
++ break;
++ default:
++ NAPI_GRO_CB(skb)->csum_cnt = 0;
++ NAPI_GRO_CB(skb)->csum_valid = 0;
++ }
++
++ pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
++ break;
++ }
++ rcu_read_unlock();
++
++ if (&ptype->list == head)
++ goto normal;
++
++ same_flow = NAPI_GRO_CB(skb)->same_flow;
++ ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
++
++ if (pp) {
++ struct sk_buff *nskb = *pp;
++
++ *pp = nskb->next;
++ nskb->next = NULL;
++ napi_gro_complete(nskb);
++ napi->gro_count--;
++ }
++
++ if (same_flow)
++ goto ok;
++
++ if (NAPI_GRO_CB(skb)->flush)
++ goto normal;
++
++ if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
++ struct sk_buff *nskb = napi->gro_list;
++
++ /* locate the end of the list to select the 'oldest' flow */
++ while (nskb->next) {
++ pp = &nskb->next;
++ nskb = *pp;
++ }
++ *pp = NULL;
++ nskb->next = NULL;
++ napi_gro_complete(nskb);
++ } else {
++ napi->gro_count++;
++ }
++ NAPI_GRO_CB(skb)->count = 1;
++ NAPI_GRO_CB(skb)->age = jiffies;
++ NAPI_GRO_CB(skb)->last = skb;
++ skb_shinfo(skb)->gso_size = skb_gro_len(skb);
++ skb->next = napi->gro_list;
++ napi->gro_list = skb;
++ ret = GRO_HELD;
++
++pull:
++ grow = skb_gro_offset(skb) - skb_headlen(skb);
++ if (grow > 0)
++ gro_pull_from_frag0(skb, grow);
++ok:
++ return ret;
++
++normal:
++ ret = GRO_NORMAL;
++ goto pull;
++}
++
++struct packet_offload *gro_find_receive_by_type(__be16 type)
++{
++ struct list_head *offload_head = &offload_base;
++ struct packet_offload *ptype;
++
++ list_for_each_entry_rcu(ptype, offload_head, list) {
++ if (ptype->type != type || !ptype->callbacks.gro_receive)
++ continue;
++ return ptype;
++ }
++ return NULL;
++}
++EXPORT_SYMBOL(gro_find_receive_by_type);
++
++struct packet_offload *gro_find_complete_by_type(__be16 type)
++{
++ struct list_head *offload_head = &offload_base;
++ struct packet_offload *ptype;
++
++ list_for_each_entry_rcu(ptype, offload_head, list) {
++ if (ptype->type != type || !ptype->callbacks.gro_complete)
++ continue;
++ return ptype;
++ }
++ return NULL;
++}
++EXPORT_SYMBOL(gro_find_complete_by_type);
++
++static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
++{
++ switch (ret) {
++ case GRO_NORMAL:
++ if (netif_receive_skb_internal(skb))
++ ret = GRO_DROP;
++ break;
++
++ case GRO_DROP:
++ kfree_skb(skb);
++ break;
++
++ case GRO_MERGED_FREE:
++ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
++ kmem_cache_free(skbuff_head_cache, skb);
++ else
++ __kfree_skb(skb);
++ break;
++
++ case GRO_HELD:
++ case GRO_MERGED:
++ break;
++ }
++
++ return ret;
++}
++
++gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
++{
++ trace_napi_gro_receive_entry(skb);
++
++ skb_gro_reset_offset(skb);
++
++ return napi_skb_finish(dev_gro_receive(napi, skb), skb);
++}
++EXPORT_SYMBOL(napi_gro_receive);
++
++static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
++{
++ if (unlikely(skb->pfmemalloc)) {
++ consume_skb(skb);
++ return;
++ }
++ __skb_pull(skb, skb_headlen(skb));
++ /* restore the reserve we had after netdev_alloc_skb_ip_align() */
++ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
++ skb->vlan_tci = 0;
++ skb->dev = napi->dev;
++ skb->skb_iif = 0;
++ skb->encapsulation = 0;
++ skb_shinfo(skb)->gso_type = 0;
++ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
++
++ napi->skb = skb;
++}
++
++struct sk_buff *napi_get_frags(struct napi_struct *napi)
++{
++ struct sk_buff *skb = napi->skb;
++
++ if (!skb) {
++ skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
++ napi->skb = skb;
++ }
++ return skb;
++}
++EXPORT_SYMBOL(napi_get_frags);
++
++static gro_result_t napi_frags_finish(struct napi_struct *napi,
++ struct sk_buff *skb,
++ gro_result_t ret)
++{
++ switch (ret) {
++ case GRO_NORMAL:
++ case GRO_HELD:
++ __skb_push(skb, ETH_HLEN);
++ skb->protocol = eth_type_trans(skb, skb->dev);
++ if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
++ ret = GRO_DROP;
++ break;
++
++ case GRO_DROP:
++ case GRO_MERGED_FREE:
++ napi_reuse_skb(napi, skb);
++ break;
++
++ case GRO_MERGED:
++ break;
++ }
++
++ return ret;
++}
++
++/* Upper GRO stack assumes network header starts at gro_offset=0
++ * Drivers could call both napi_gro_frags() and napi_gro_receive()
++ * We copy ethernet header into skb->data to have a common layout.
++ */
++static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
++{
++ struct sk_buff *skb = napi->skb;
++ const struct ethhdr *eth;
++ unsigned int hlen = sizeof(*eth);
++
++ napi->skb = NULL;
++
++ skb_reset_mac_header(skb);
++ skb_gro_reset_offset(skb);
++
++ eth = skb_gro_header_fast(skb, 0);
++ if (unlikely(skb_gro_header_hard(skb, hlen))) {
++ eth = skb_gro_header_slow(skb, hlen, 0);
++ if (unlikely(!eth)) {
++ napi_reuse_skb(napi, skb);
++ return NULL;
++ }
++ } else {
++ gro_pull_from_frag0(skb, hlen);
++ NAPI_GRO_CB(skb)->frag0 += hlen;
++ NAPI_GRO_CB(skb)->frag0_len -= hlen;
++ }
++ __skb_pull(skb, hlen);
++
++ /*
++ * This works because the only protocols we care about don't require
++ * special handling.
++ * We'll fix it up properly in napi_frags_finish()
++ */
++ skb->protocol = eth->h_proto;
++
++ return skb;
++}
++
++gro_result_t napi_gro_frags(struct napi_struct *napi)
++{
++ struct sk_buff *skb = napi_frags_skb(napi);
++
++ if (!skb)
++ return GRO_DROP;
++
++ trace_napi_gro_frags_entry(skb);
++
++ return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
++}
++EXPORT_SYMBOL(napi_gro_frags);
++
++/* Compute the checksum from gro_offset and return the folded value
++ * after adding in any pseudo checksum.
++ */
++__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
++{
++ __wsum wsum;
++ __sum16 sum;
++
++ wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
++
++ /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
++ sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
++ if (likely(!sum)) {
++ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
++ !skb->csum_complete_sw)
++ netdev_rx_csum_fault(skb->dev);
++ }
++
++ NAPI_GRO_CB(skb)->csum = wsum;
++ NAPI_GRO_CB(skb)->csum_valid = 1;
++
++ return sum;
++}
++EXPORT_SYMBOL(__skb_gro_checksum_complete);
++
++/*
++ * net_rps_action_and_irq_enable sends any pending IPI's for rps.
++ * Note: called with local irq disabled, but exits with local irq enabled.
++ */
++static void net_rps_action_and_irq_enable(struct softnet_data *sd)
++{
++#ifdef CONFIG_RPS
++ struct softnet_data *remsd = sd->rps_ipi_list;
++
++ if (remsd) {
++ sd->rps_ipi_list = NULL;
++
++ local_irq_enable();
++
++ /* Send pending IPI's to kick RPS processing on remote cpus. */
++ while (remsd) {
++ struct softnet_data *next = remsd->rps_ipi_next;
++
++ if (cpu_online(remsd->cpu))
++ smp_call_function_single_async(remsd->cpu,
++ &remsd->csd);
++ remsd = next;
++ }
++ } else
++#endif
++ local_irq_enable();
++}
++
++static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
++{
++#ifdef CONFIG_RPS
++ return sd->rps_ipi_list != NULL;
++#else
++ return false;
++#endif
++}
++
++static int process_backlog(struct napi_struct *napi, int quota)
++{
++ int work = 0;
++ struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
++
++ /* Check if we have pending ipi, its better to send them now,
++ * not waiting net_rx_action() end.
++ */
++ if (sd_has_rps_ipi_waiting(sd)) {
++ local_irq_disable();
++ net_rps_action_and_irq_enable(sd);
++ }
++
++ napi->weight = weight_p;
++ local_irq_disable();
++ while (1) {
++ struct sk_buff *skb;
++
++ while ((skb = __skb_dequeue(&sd->process_queue))) {
++ rcu_read_lock();
++ local_irq_enable();
++ __netif_receive_skb(skb);
++ rcu_read_unlock();
++ local_irq_disable();
++ input_queue_head_incr(sd);
++ if (++work >= quota) {
++ local_irq_enable();
++ return work;
++ }
++ }
++
++ rps_lock(sd);
++ if (skb_queue_empty(&sd->input_pkt_queue)) {
++ /*
++ * Inline a custom version of __napi_complete().
++ * only current cpu owns and manipulates this napi,
++ * and NAPI_STATE_SCHED is the only possible flag set
++ * on backlog.
++ * We can use a plain write instead of clear_bit(),
++ * and we dont need an smp_mb() memory barrier.
++ */
++ napi->state = 0;
++ rps_unlock(sd);
++
++ break;
++ }
++
++ skb_queue_splice_tail_init(&sd->input_pkt_queue,
++ &sd->process_queue);
++ rps_unlock(sd);
++ }
++ local_irq_enable();
++
++ return work;
++}
++
++/**
++ * __napi_schedule - schedule for receive
++ * @n: entry to schedule
++ *
++ * The entry's receive function will be scheduled to run.
++ * Consider using __napi_schedule_irqoff() if hard irqs are masked.
++ */
++void __napi_schedule(struct napi_struct *n)
++{
++ unsigned long flags;
++
++ local_irq_save(flags);
++ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
++ local_irq_restore(flags);
++}
++EXPORT_SYMBOL(__napi_schedule);
++
++/**
++ * __napi_schedule_irqoff - schedule for receive
++ * @n: entry to schedule
++ *
++ * Variant of __napi_schedule() assuming hard irqs are masked
++ */
++void __napi_schedule_irqoff(struct napi_struct *n)
++{
++ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
++}
++EXPORT_SYMBOL(__napi_schedule_irqoff);
++
++void __napi_complete(struct napi_struct *n)
++{
++ BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
++
++ list_del_init(&n->poll_list);
++ smp_mb__before_atomic();
++ clear_bit(NAPI_STATE_SCHED, &n->state);
++}
++EXPORT_SYMBOL(__napi_complete);
++
++void napi_complete_done(struct napi_struct *n, int work_done)
++{
++ unsigned long flags;
++
++ /*
++ * don't let napi dequeue from the cpu poll list
++ * just in case its running on a different cpu
++ */
++ if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
++ return;
++
++ if (n->gro_list) {
++ unsigned long timeout = 0;
++
++ if (work_done)
++ timeout = n->dev->gro_flush_timeout;
++
++ if (timeout)
++ hrtimer_start(&n->timer, ns_to_ktime(timeout),
++ HRTIMER_MODE_REL_PINNED);
++ else
++ napi_gro_flush(n, false);
++ }
++ if (likely(list_empty(&n->poll_list))) {
++ WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
++ } else {
++ /* If n->poll_list is not empty, we need to mask irqs */
++ local_irq_save(flags);
++ __napi_complete(n);
++ local_irq_restore(flags);
++ }
++}
++EXPORT_SYMBOL(napi_complete_done);
++
++/* must be called under rcu_read_lock(), as we dont take a reference */
++struct napi_struct *napi_by_id(unsigned int napi_id)
++{
++ unsigned int hash = napi_id % HASH_SIZE(napi_hash);
++ struct napi_struct *napi;
++
++ hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
++ if (napi->napi_id == napi_id)
++ return napi;
++
++ return NULL;
++}
++EXPORT_SYMBOL_GPL(napi_by_id);
++
++void napi_hash_add(struct napi_struct *napi)
++{
++ if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
++
++ spin_lock(&napi_hash_lock);
++
++ /* 0 is not a valid id, we also skip an id that is taken
++ * we expect both events to be extremely rare
++ */
++ napi->napi_id = 0;
++ while (!napi->napi_id) {
++ napi->napi_id = ++napi_gen_id;
++ if (napi_by_id(napi->napi_id))
++ napi->napi_id = 0;
++ }
++
++ hlist_add_head_rcu(&napi->napi_hash_node,
++ &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
++
++ spin_unlock(&napi_hash_lock);
++ }
++}
++EXPORT_SYMBOL_GPL(napi_hash_add);
++
++/* Warning : caller is responsible to make sure rcu grace period
++ * is respected before freeing memory containing @napi
++ */
++void napi_hash_del(struct napi_struct *napi)
++{
++ spin_lock(&napi_hash_lock);
++
++ if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
++ hlist_del_rcu(&napi->napi_hash_node);
++
++ spin_unlock(&napi_hash_lock);
++}
++EXPORT_SYMBOL_GPL(napi_hash_del);
++
++static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
++{
++ struct napi_struct *napi;
++
++ napi = container_of(timer, struct napi_struct, timer);
++ if (napi->gro_list)
++ napi_schedule(napi);
++
++ return HRTIMER_NORESTART;
++}
++
++void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
++ int (*poll)(struct napi_struct *, int), int weight)
++{
++ INIT_LIST_HEAD(&napi->poll_list);
++ hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
++ napi->timer.function = napi_watchdog;
++ napi->gro_count = 0;
++ napi->gro_list = NULL;
++ napi->skb = NULL;
++ napi->poll = poll;
++ if (weight > NAPI_POLL_WEIGHT)
++ pr_err_once("netif_napi_add() called with weight %d on device %s\n",
++ weight, dev->name);
++ napi->weight = weight;
++ list_add(&napi->dev_list, &dev->napi_list);
++ napi->dev = dev;
++#ifdef CONFIG_NETPOLL
++ spin_lock_init(&napi->poll_lock);
++ napi->poll_owner = -1;
++#endif
++ set_bit(NAPI_STATE_SCHED, &napi->state);
++}
++EXPORT_SYMBOL(netif_napi_add);
++
++void napi_disable(struct napi_struct *n)
++{
++ might_sleep();
++ set_bit(NAPI_STATE_DISABLE, &n->state);
++
++ while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
++ msleep(1);
++
++ hrtimer_cancel(&n->timer);
++
++ clear_bit(NAPI_STATE_DISABLE, &n->state);
++}
++EXPORT_SYMBOL(napi_disable);
++
++void netif_napi_del(struct napi_struct *napi)
++{
++ list_del_init(&napi->dev_list);
++ napi_free_frags(napi);
++
++ kfree_skb_list(napi->gro_list);
++ napi->gro_list = NULL;
++ napi->gro_count = 0;
++}
++EXPORT_SYMBOL(netif_napi_del);
++
++static int napi_poll(struct napi_struct *n, struct list_head *repoll)
++{
++ void *have;
++ int work, weight;
++
++ list_del_init(&n->poll_list);
++
++ have = netpoll_poll_lock(n);
++
++ weight = n->weight;
++
++ /* This NAPI_STATE_SCHED test is for avoiding a race
++ * with netpoll's poll_napi(). Only the entity which
++ * obtains the lock and sees NAPI_STATE_SCHED set will
++ * actually make the ->poll() call. Therefore we avoid
++ * accidentally calling ->poll() when NAPI is not scheduled.
++ */
++ work = 0;
++ if (test_bit(NAPI_STATE_SCHED, &n->state)) {
++ work = n->poll(n, weight);
++ trace_napi_poll(n);
++ }
++
++ WARN_ON_ONCE(work > weight);
++
++ if (likely(work < weight))
++ goto out_unlock;
++
++ /* Drivers must not modify the NAPI state if they
++ * consume the entire weight. In such cases this code
++ * still "owns" the NAPI instance and therefore can
++ * move the instance around on the list at-will.
++ */
++ if (unlikely(napi_disable_pending(n))) {
++ napi_complete(n);
++ goto out_unlock;
++ }
++
++ if (n->gro_list) {
++ /* flush too old packets
++ * If HZ < 1000, flush all packets.
++ */
++ napi_gro_flush(n, HZ >= 1000);
++ }
++
++ /* Some drivers may have called napi_schedule
++ * prior to exhausting their budget.
++ */
++ if (unlikely(!list_empty(&n->poll_list))) {
++ pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
++ n->dev ? n->dev->name : "backlog");
++ goto out_unlock;
++ }
++
++ list_add_tail(&n->poll_list, repoll);
++
++out_unlock:
++ netpoll_poll_unlock(have);
++
++ return work;
++}
++
++static void net_rx_action(struct softirq_action *h)
++{
++ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
++ unsigned long time_limit = jiffies + 2;
++ int budget = netdev_budget;
++ LIST_HEAD(list);
++ LIST_HEAD(repoll);
++
++ local_irq_disable();
++ list_splice_init(&sd->poll_list, &list);
++ local_irq_enable();
++
++ for (;;) {
++ struct napi_struct *n;
++
++ if (list_empty(&list)) {
++ if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
++ return;
++ break;
++ }
++
++ n = list_first_entry(&list, struct napi_struct, poll_list);
++ budget -= napi_poll(n, &repoll);
++
++ /* If softirq window is exhausted then punt.
++ * Allow this to run for 2 jiffies since which will allow
++ * an average latency of 1.5/HZ.
++ */
++ if (unlikely(budget <= 0 ||
++ time_after_eq(jiffies, time_limit))) {
++ sd->time_squeeze++;
++ break;
++ }
++ }
++
++ local_irq_disable();
++
++ list_splice_tail_init(&sd->poll_list, &list);
++ list_splice_tail(&repoll, &list);
++ list_splice(&list, &sd->poll_list);
++ if (!list_empty(&sd->poll_list))
++ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
++
++ net_rps_action_and_irq_enable(sd);
++}
++
++struct netdev_adjacent {
++ struct net_device *dev;
++
++ /* upper master flag, there can only be one master device per list */
++ bool master;
++
++ /* counter for the number of times this device was added to us */
++ u16 ref_nr;
++
++ /* private field for the users */
++ void *private;
++
++ struct list_head list;
++ struct rcu_head rcu;
++};
++
++static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
++ struct net_device *adj_dev,
++ struct list_head *adj_list)
++{
++ struct netdev_adjacent *adj;
++
++ list_for_each_entry(adj, adj_list, list) {
++ if (adj->dev == adj_dev)
++ return adj;
++ }
++ return NULL;
++}
++
++/**
++ * netdev_has_upper_dev - Check if device is linked to an upper device
++ * @dev: device
++ * @upper_dev: upper device to check
++ *
++ * Find out if a device is linked to specified upper device and return true
++ * in case it is. Note that this checks only immediate upper device,
++ * not through a complete stack of devices. The caller must hold the RTNL lock.
++ */
++bool netdev_has_upper_dev(struct net_device *dev,
++ struct net_device *upper_dev)
++{
++ ASSERT_RTNL();
++
++ return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
++}
++EXPORT_SYMBOL(netdev_has_upper_dev);
++
++/**
++ * netdev_has_any_upper_dev - Check if device is linked to some device
++ * @dev: device
++ *
++ * Find out if a device is linked to an upper device and return true in case
++ * it is. The caller must hold the RTNL lock.
++ */
++static bool netdev_has_any_upper_dev(struct net_device *dev)
++{
++ ASSERT_RTNL();
++
++ return !list_empty(&dev->all_adj_list.upper);
++}
++
++/**
++ * netdev_master_upper_dev_get - Get master upper device
++ * @dev: device
++ *
++ * Find a master upper device and return pointer to it or NULL in case
++ * it's not there. The caller must hold the RTNL lock.
++ */
++struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
++{
++ struct netdev_adjacent *upper;
++
++ ASSERT_RTNL();
++
++ if (list_empty(&dev->adj_list.upper))
++ return NULL;
++
++ upper = list_first_entry(&dev->adj_list.upper,
++ struct netdev_adjacent, list);
++ if (likely(upper->master))
++ return upper->dev;
++ return NULL;
++}
++EXPORT_SYMBOL(netdev_master_upper_dev_get);
++
++void *netdev_adjacent_get_private(struct list_head *adj_list)
++{
++ struct netdev_adjacent *adj;
++
++ adj = list_entry(adj_list, struct netdev_adjacent, list);
++
++ return adj->private;
++}
++EXPORT_SYMBOL(netdev_adjacent_get_private);
++
++/**
++ * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
++ * @dev: device
++ * @iter: list_head ** of the current position
++ *
++ * Gets the next device from the dev's upper list, starting from iter
++ * position. The caller must hold RCU read lock.
++ */
++struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
++ struct list_head **iter)
++{
++ struct netdev_adjacent *upper;
++
++ WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
++
++ upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
++
++ if (&upper->list == &dev->adj_list.upper)
++ return NULL;
++
++ *iter = &upper->list;
++
++ return upper->dev;
++}
++EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
++
++/**
++ * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
++ * @dev: device
++ * @iter: list_head ** of the current position
++ *
++ * Gets the next device from the dev's upper list, starting from iter
++ * position. The caller must hold RCU read lock.
++ */
++struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
++ struct list_head **iter)
++{
++ struct netdev_adjacent *upper;
++
++ WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
++
++ upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
++
++ if (&upper->list == &dev->all_adj_list.upper)
++ return NULL;
++
++ *iter = &upper->list;
++
++ return upper->dev;
++}
++EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
++
++/**
++ * netdev_lower_get_next_private - Get the next ->private from the
++ * lower neighbour list
++ * @dev: device
++ * @iter: list_head ** of the current position
++ *
++ * Gets the next netdev_adjacent->private from the dev's lower neighbour
++ * list, starting from iter position. The caller must hold either hold the
++ * RTNL lock or its own locking that guarantees that the neighbour lower
++ * list will remain unchainged.
++ */
++void *netdev_lower_get_next_private(struct net_device *dev,
++ struct list_head **iter)
++{
++ struct netdev_adjacent *lower;
++
++ lower = list_entry(*iter, struct netdev_adjacent, list);
++
++ if (&lower->list == &dev->adj_list.lower)
++ return NULL;
++
++ *iter = lower->list.next;
++
++ return lower->private;
++}
++EXPORT_SYMBOL(netdev_lower_get_next_private);
++
++/**
++ * netdev_lower_get_next_private_rcu - Get the next ->private from the
++ * lower neighbour list, RCU
++ * variant
++ * @dev: device
++ * @iter: list_head ** of the current position
++ *
++ * Gets the next netdev_adjacent->private from the dev's lower neighbour
++ * list, starting from iter position. The caller must hold RCU read lock.
++ */
++void *netdev_lower_get_next_private_rcu(struct net_device *dev,
++ struct list_head **iter)
++{
++ struct netdev_adjacent *lower;
++
++ WARN_ON_ONCE(!rcu_read_lock_held());
++
++ lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
++
++ if (&lower->list == &dev->adj_list.lower)
++ return NULL;
++
++ *iter = &lower->list;
++
++ return lower->private;
++}
++EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
++
++/**
++ * netdev_lower_get_next - Get the next device from the lower neighbour
++ * list
++ * @dev: device
++ * @iter: list_head ** of the current position
++ *
++ * Gets the next netdev_adjacent from the dev's lower neighbour
++ * list, starting from iter position. The caller must hold RTNL lock or
++ * its own locking that guarantees that the neighbour lower
++ * list will remain unchainged.
++ */
++void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
++{
++ struct netdev_adjacent *lower;
++
++ lower = list_entry((*iter)->next, struct netdev_adjacent, list);
++
++ if (&lower->list == &dev->adj_list.lower)
++ return NULL;
++
++ *iter = &lower->list;
++
++ return lower->dev;
++}
++EXPORT_SYMBOL(netdev_lower_get_next);
++
++/**
++ * netdev_lower_get_first_private_rcu - Get the first ->private from the
++ * lower neighbour list, RCU
++ * variant
++ * @dev: device
++ *
++ * Gets the first netdev_adjacent->private from the dev's lower neighbour
++ * list. The caller must hold RCU read lock.
++ */
++void *netdev_lower_get_first_private_rcu(struct net_device *dev)
++{
++ struct netdev_adjacent *lower;
++
++ lower = list_first_or_null_rcu(&dev->adj_list.lower,
++ struct netdev_adjacent, list);
++ if (lower)
++ return lower->private;
++ return NULL;
++}
++EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
++
++/**
++ * netdev_master_upper_dev_get_rcu - Get master upper device
++ * @dev: device
++ *
++ * Find a master upper device and return pointer to it or NULL in case
++ * it's not there. The caller must hold the RCU read lock.
++ */
++struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
++{
++ struct netdev_adjacent *upper;
++
++ upper = list_first_or_null_rcu(&dev->adj_list.upper,
++ struct netdev_adjacent, list);
++ if (upper && likely(upper->master))
++ return upper->dev;
++ return NULL;
++}
++EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
++
++static int netdev_adjacent_sysfs_add(struct net_device *dev,
++ struct net_device *adj_dev,
++ struct list_head *dev_list)
++{
++ char linkname[IFNAMSIZ+7];
++ sprintf(linkname, dev_list == &dev->adj_list.upper ?
++ "upper_%s" : "lower_%s", adj_dev->name);
++ return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
++ linkname);
++}
++static void netdev_adjacent_sysfs_del(struct net_device *dev,
++ char *name,
++ struct list_head *dev_list)
++{
++ char linkname[IFNAMSIZ+7];
++ sprintf(linkname, dev_list == &dev->adj_list.upper ?
++ "upper_%s" : "lower_%s", name);
++ sysfs_remove_link(&(dev->dev.kobj), linkname);
++}
++
++static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
++ struct net_device *adj_dev,
++ struct list_head *dev_list)
++{
++ return (dev_list == &dev->adj_list.upper ||
++ dev_list == &dev->adj_list.lower) &&
++ net_eq(dev_net(dev), dev_net(adj_dev));
++}
++
++static int __netdev_adjacent_dev_insert(struct net_device *dev,
++ struct net_device *adj_dev,
++ struct list_head *dev_list,
++ void *private, bool master)
++{
++ struct netdev_adjacent *adj;
++ int ret;
++
++ adj = __netdev_find_adj(dev, adj_dev, dev_list);
++
++ if (adj) {
++ adj->ref_nr++;
++ return 0;
++ }
++
++ adj = kmalloc(sizeof(*adj), GFP_KERNEL);
++ if (!adj)
++ return -ENOMEM;
++
++ adj->dev = adj_dev;
++ adj->master = master;
++ adj->ref_nr = 1;
++ adj->private = private;
++ dev_hold(adj_dev);
++
++ pr_debug("dev_hold for %s, because of link added from %s to %s\n",
++ adj_dev->name, dev->name, adj_dev->name);
++
++ if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
++ ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
++ if (ret)
++ goto free_adj;
++ }
++
++ /* Ensure that master link is always the first item in list. */
++ if (master) {
++ ret = sysfs_create_link(&(dev->dev.kobj),
++ &(adj_dev->dev.kobj), "master");
++ if (ret)
++ goto remove_symlinks;
++
++ list_add_rcu(&adj->list, dev_list);
++ } else {
++ list_add_tail_rcu(&adj->list, dev_list);
++ }
++
++ return 0;
++
++remove_symlinks:
++ if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
++ netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
++free_adj:
++ kfree(adj);
++ dev_put(adj_dev);
++
++ return ret;
++}
++
++static void __netdev_adjacent_dev_remove(struct net_device *dev,
++ struct net_device *adj_dev,
++ struct list_head *dev_list)
++{
++ struct netdev_adjacent *adj;
++
++ adj = __netdev_find_adj(dev, adj_dev, dev_list);
++
++ if (!adj) {
++ pr_err("tried to remove device %s from %s\n",
++ dev->name, adj_dev->name);
++ BUG();
++ }
++
++ if (adj->ref_nr > 1) {
++ pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
++ adj->ref_nr-1);
++ adj->ref_nr--;
++ return;
++ }
++
++ if (adj->master)
++ sysfs_remove_link(&(dev->dev.kobj), "master");
++
++ if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
++ netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
++
++ list_del_rcu(&adj->list);
++ pr_debug("dev_put for %s, because link removed from %s to %s\n",
++ adj_dev->name, dev->name, adj_dev->name);
++ dev_put(adj_dev);
++ kfree_rcu(adj, rcu);
++}
++
++static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
++ struct net_device *upper_dev,
++ struct list_head *up_list,
++ struct list_head *down_list,
++ void *private, bool master)
++{
++ int ret;
++
++ ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
++ master);
++ if (ret)
++ return ret;
++
++ ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
++ false);
++ if (ret) {
++ __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
++ return ret;
++ }
++
++ return 0;
++}
++
++static int __netdev_adjacent_dev_link(struct net_device *dev,
++ struct net_device *upper_dev)
++{
++ return __netdev_adjacent_dev_link_lists(dev, upper_dev,
++ &dev->all_adj_list.upper,
++ &upper_dev->all_adj_list.lower,
++ NULL, false);
++}
++
++static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
++ struct net_device *upper_dev,
++ struct list_head *up_list,
++ struct list_head *down_list)
++{
++ __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
++ __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
++}
++
++static void __netdev_adjacent_dev_unlink(struct net_device *dev,
++ struct net_device *upper_dev)
++{
++ __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
++ &dev->all_adj_list.upper,
++ &upper_dev->all_adj_list.lower);
++}
++
++static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
++ struct net_device *upper_dev,
++ void *private, bool master)
++{
++ int ret = __netdev_adjacent_dev_link(dev, upper_dev);
++
++ if (ret)
++ return ret;
++
++ ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
++ &dev->adj_list.upper,
++ &upper_dev->adj_list.lower,
++ private, master);
++ if (ret) {
++ __netdev_adjacent_dev_unlink(dev, upper_dev);
++ return ret;
++ }
++
++ return 0;
++}
++
++static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
++ struct net_device *upper_dev)
++{
++ __netdev_adjacent_dev_unlink(dev, upper_dev);
++ __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
++ &dev->adj_list.upper,
++ &upper_dev->adj_list.lower);
++}
++
++static int __netdev_upper_dev_link(struct net_device *dev,
++ struct net_device *upper_dev, bool master,
++ void *private)
++{
++ struct netdev_adjacent *i, *j, *to_i, *to_j;
++ int ret = 0;
++
++ ASSERT_RTNL();
++
++ if (dev == upper_dev)
++ return -EBUSY;
++
++ /* To prevent loops, check if dev is not upper device to upper_dev. */
++ if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
++ return -EBUSY;
++
++ if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
++ return -EEXIST;
++
++ if (master && netdev_master_upper_dev_get(dev))
++ return -EBUSY;
++
++ ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
++ master);
++ if (ret)
++ return ret;
++
++ /* Now that we linked these devs, make all the upper_dev's
++ * all_adj_list.upper visible to every dev's all_adj_list.lower an
++ * versa, and don't forget the devices itself. All of these
++ * links are non-neighbours.
++ */
++ list_for_each_entry(i, &dev->all_adj_list.lower, list) {
++ list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
++ pr_debug("Interlinking %s with %s, non-neighbour\n",
++ i->dev->name, j->dev->name);
++ ret = __netdev_adjacent_dev_link(i->dev, j->dev);
++ if (ret)
++ goto rollback_mesh;
++ }
++ }
++
++ /* add dev to every upper_dev's upper device */
++ list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
++ pr_debug("linking %s's upper device %s with %s\n",
++ upper_dev->name, i->dev->name, dev->name);
++ ret = __netdev_adjacent_dev_link(dev, i->dev);
++ if (ret)
++ goto rollback_upper_mesh;
++ }
++
++ /* add upper_dev to every dev's lower device */
++ list_for_each_entry(i, &dev->all_adj_list.lower, list) {
++ pr_debug("linking %s's lower device %s with %s\n", dev->name,
++ i->dev->name, upper_dev->name);
++ ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
++ if (ret)
++ goto rollback_lower_mesh;
++ }
++
++ call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
++ return 0;
++
++rollback_lower_mesh:
++ to_i = i;
++ list_for_each_entry(i, &dev->all_adj_list.lower, list) {
++ if (i == to_i)
++ break;
++ __netdev_adjacent_dev_unlink(i->dev, upper_dev);
++ }
++
++ i = NULL;
++
++rollback_upper_mesh:
++ to_i = i;
++ list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
++ if (i == to_i)
++ break;
++ __netdev_adjacent_dev_unlink(dev, i->dev);
++ }
++
++ i = j = NULL;
++
++rollback_mesh:
++ to_i = i;
++ to_j = j;
++ list_for_each_entry(i, &dev->all_adj_list.lower, list) {
++ list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
++ if (i == to_i && j == to_j)
++ break;
++ __netdev_adjacent_dev_unlink(i->dev, j->dev);
++ }
++ if (i == to_i)
++ break;
++ }
++
++ __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
++
++ return ret;
++}
++
++/**
++ * netdev_upper_dev_link - Add a link to the upper device
++ * @dev: device
++ * @upper_dev: new upper device
++ *
++ * Adds a link to device which is upper to this one. The caller must hold
++ * the RTNL lock. On a failure a negative errno code is returned.
++ * On success the reference counts are adjusted and the function
++ * returns zero.
++ */
++int netdev_upper_dev_link(struct net_device *dev,
++ struct net_device *upper_dev)
++{
++ return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
++}
++EXPORT_SYMBOL(netdev_upper_dev_link);
++
++/**
++ * netdev_master_upper_dev_link - Add a master link to the upper device
++ * @dev: device
++ * @upper_dev: new upper device
++ *
++ * Adds a link to device which is upper to this one. In this case, only
++ * one master upper device can be linked, although other non-master devices
++ * might be linked as well. The caller must hold the RTNL lock.
++ * On a failure a negative errno code is returned. On success the reference
++ * counts are adjusted and the function returns zero.
++ */
++int netdev_master_upper_dev_link(struct net_device *dev,
++ struct net_device *upper_dev)
++{
++ return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
++}
++EXPORT_SYMBOL(netdev_master_upper_dev_link);
++
++int netdev_master_upper_dev_link_private(struct net_device *dev,
++ struct net_device *upper_dev,
++ void *private)
++{
++ return __netdev_upper_dev_link(dev, upper_dev, true, private);
++}
++EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
++
++/**
++ * netdev_upper_dev_unlink - Removes a link to upper device
++ * @dev: device
++ * @upper_dev: new upper device
++ *
++ * Removes a link to device which is upper to this one. The caller must hold
++ * the RTNL lock.
++ */
++void netdev_upper_dev_unlink(struct net_device *dev,
++ struct net_device *upper_dev)
++{
++ struct netdev_adjacent *i, *j;
++ ASSERT_RTNL();
++
++ __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
++
++ /* Here is the tricky part. We must remove all dev's lower
++ * devices from all upper_dev's upper devices and vice
++ * versa, to maintain the graph relationship.
++ */
++ list_for_each_entry(i, &dev->all_adj_list.lower, list)
++ list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
++ __netdev_adjacent_dev_unlink(i->dev, j->dev);
++
++ /* remove also the devices itself from lower/upper device
++ * list
++ */
++ list_for_each_entry(i, &dev->all_adj_list.lower, list)
++ __netdev_adjacent_dev_unlink(i->dev, upper_dev);
++
++ list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
++ __netdev_adjacent_dev_unlink(dev, i->dev);
++
++ call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
++}
++EXPORT_SYMBOL(netdev_upper_dev_unlink);
++
++/**
++ * netdev_bonding_info_change - Dispatch event about slave change
++ * @dev: device
++ * @bonding_info: info to dispatch
++ *
++ * Send NETDEV_BONDING_INFO to netdev notifiers with info.
++ * The caller must hold the RTNL lock.
++ */
++void netdev_bonding_info_change(struct net_device *dev,
++ struct netdev_bonding_info *bonding_info)
++{
++ struct netdev_notifier_bonding_info info;
++
++ memcpy(&info.bonding_info, bonding_info,
++ sizeof(struct netdev_bonding_info));
++ call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
++ &info.info);
++}
++EXPORT_SYMBOL(netdev_bonding_info_change);
++
++static void netdev_adjacent_add_links(struct net_device *dev)
++{
++ struct netdev_adjacent *iter;
++
++ struct net *net = dev_net(dev);
++
++ list_for_each_entry(iter, &dev->adj_list.upper, list) {
++ if (!net_eq(net,dev_net(iter->dev)))
++ continue;
++ netdev_adjacent_sysfs_add(iter->dev, dev,
++ &iter->dev->adj_list.lower);
++ netdev_adjacent_sysfs_add(dev, iter->dev,
++ &dev->adj_list.upper);
++ }
++
++ list_for_each_entry(iter, &dev->adj_list.lower, list) {
++ if (!net_eq(net,dev_net(iter->dev)))
++ continue;
++ netdev_adjacent_sysfs_add(iter->dev, dev,
++ &iter->dev->adj_list.upper);
++ netdev_adjacent_sysfs_add(dev, iter->dev,
++ &dev->adj_list.lower);
++ }
++}
++
++static void netdev_adjacent_del_links(struct net_device *dev)
++{
++ struct netdev_adjacent *iter;
++
++ struct net *net = dev_net(dev);
++
++ list_for_each_entry(iter, &dev->adj_list.upper, list) {
++ if (!net_eq(net,dev_net(iter->dev)))
++ continue;
++ netdev_adjacent_sysfs_del(iter->dev, dev->name,
++ &iter->dev->adj_list.lower);
++ netdev_adjacent_sysfs_del(dev, iter->dev->name,
++ &dev->adj_list.upper);
++ }
++
++ list_for_each_entry(iter, &dev->adj_list.lower, list) {
++ if (!net_eq(net,dev_net(iter->dev)))
++ continue;
++ netdev_adjacent_sysfs_del(iter->dev, dev->name,
++ &iter->dev->adj_list.upper);
++ netdev_adjacent_sysfs_del(dev, iter->dev->name,
++ &dev->adj_list.lower);
++ }
++}
++
++void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
++{
++ struct netdev_adjacent *iter;
++
++ struct net *net = dev_net(dev);
++
++ list_for_each_entry(iter, &dev->adj_list.upper, list) {
++ if (!net_eq(net,dev_net(iter->dev)))
++ continue;
++ netdev_adjacent_sysfs_del(iter->dev, oldname,
++ &iter->dev->adj_list.lower);
++ netdev_adjacent_sysfs_add(iter->dev, dev,
++ &iter->dev->adj_list.lower);
++ }
++
++ list_for_each_entry(iter, &dev->adj_list.lower, list) {
++ if (!net_eq(net,dev_net(iter->dev)))
++ continue;
++ netdev_adjacent_sysfs_del(iter->dev, oldname,
++ &iter->dev->adj_list.upper);
++ netdev_adjacent_sysfs_add(iter->dev, dev,
++ &iter->dev->adj_list.upper);
++ }
++}
++
++void *netdev_lower_dev_get_private(struct net_device *dev,
++ struct net_device *lower_dev)
++{
++ struct netdev_adjacent *lower;
++
++ if (!lower_dev)
++ return NULL;
++ lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
++ if (!lower)
++ return NULL;
++
++ return lower->private;
++}
++EXPORT_SYMBOL(netdev_lower_dev_get_private);
++
++
++int dev_get_nest_level(struct net_device *dev,
++ bool (*type_check)(struct net_device *dev))
++{
++ struct net_device *lower = NULL;
++ struct list_head *iter;
++ int max_nest = -1;
++ int nest;
++
++ ASSERT_RTNL();
++
++ netdev_for_each_lower_dev(dev, lower, iter) {
++ nest = dev_get_nest_level(lower, type_check);
++ if (max_nest < nest)
++ max_nest = nest;
++ }
++
++ if (type_check(dev))
++ max_nest++;
++
++ return max_nest;
++}
++EXPORT_SYMBOL(dev_get_nest_level);
++
++static void dev_change_rx_flags(struct net_device *dev, int flags)
++{
++ const struct net_device_ops *ops = dev->netdev_ops;
++
++ if (ops->ndo_change_rx_flags)
++ ops->ndo_change_rx_flags(dev, flags);
++}
++
++static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
++{
++ unsigned int old_flags = dev->flags;
++ kuid_t uid;
++ kgid_t gid;
++
++ ASSERT_RTNL();
++
++ dev->flags |= IFF_PROMISC;
++ dev->promiscuity += inc;
++ if (dev->promiscuity == 0) {
++ /*
++ * Avoid overflow.
++ * If inc causes overflow, untouch promisc and return error.
++ */
++ if (inc < 0)
++ dev->flags &= ~IFF_PROMISC;
++ else {
++ dev->promiscuity -= inc;
++ pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
++ dev->name);
++ return -EOVERFLOW;
++ }
++ }
++ if (dev->flags != old_flags) {
++ pr_info("device %s %s promiscuous mode\n",
++ dev->name,
++ dev->flags & IFF_PROMISC ? "entered" : "left");
++ if (audit_enabled) {
++ current_uid_gid(&uid, &gid);
++ audit_log(current->audit_context, GFP_ATOMIC,
++ AUDIT_ANOM_PROMISCUOUS,
++ "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
++ dev->name, (dev->flags & IFF_PROMISC),
++ (old_flags & IFF_PROMISC),
++ from_kuid(&init_user_ns, audit_get_loginuid(current)),
++ from_kuid(&init_user_ns, uid),
++ from_kgid(&init_user_ns, gid),
++ audit_get_sessionid(current));
++ }
++
++ dev_change_rx_flags(dev, IFF_PROMISC);
++ }
++ if (notify)
++ __dev_notify_flags(dev, old_flags, IFF_PROMISC);
++ return 0;
++}
++
++/**
++ * dev_set_promiscuity - update promiscuity count on a device
++ * @dev: device
++ * @inc: modifier
++ *
++ * Add or remove promiscuity from a device. While the count in the device
++ * remains above zero the interface remains promiscuous. Once it hits zero
++ * the device reverts back to normal filtering operation. A negative inc
++ * value is used to drop promiscuity on the device.
++ * Return 0 if successful or a negative errno code on error.
++ */
++int dev_set_promiscuity(struct net_device *dev, int inc)
++{
++ unsigned int old_flags = dev->flags;
++ int err;
++
++ err = __dev_set_promiscuity(dev, inc, true);
++ if (err < 0)
++ return err;
++ if (dev->flags != old_flags)
++ dev_set_rx_mode(dev);
++ return err;
++}
++EXPORT_SYMBOL(dev_set_promiscuity);
++
++static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
++{
++ unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
++
++ ASSERT_RTNL();
++
++ dev->flags |= IFF_ALLMULTI;
++ dev->allmulti += inc;
++ if (dev->allmulti == 0) {
++ /*
++ * Avoid overflow.
++ * If inc causes overflow, untouch allmulti and return error.
++ */
++ if (inc < 0)
++ dev->flags &= ~IFF_ALLMULTI;
++ else {
++ dev->allmulti -= inc;
++ pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
++ dev->name);
++ return -EOVERFLOW;
++ }
++ }
++ if (dev->flags ^ old_flags) {
++ dev_change_rx_flags(dev, IFF_ALLMULTI);
++ dev_set_rx_mode(dev);
++ if (notify)
++ __dev_notify_flags(dev, old_flags,
++ dev->gflags ^ old_gflags);
++ }
++ return 0;
++}
++
++/**
++ * dev_set_allmulti - update allmulti count on a device
++ * @dev: device
++ * @inc: modifier
++ *
++ * Add or remove reception of all multicast frames to a device. While the
++ * count in the device remains above zero the interface remains listening
++ * to all interfaces. Once it hits zero the device reverts back to normal
++ * filtering operation. A negative @inc value is used to drop the counter
++ * when releasing a resource needing all multicasts.
++ * Return 0 if successful or a negative errno code on error.
++ */
++
++int dev_set_allmulti(struct net_device *dev, int inc)
++{
++ return __dev_set_allmulti(dev, inc, true);
++}
++EXPORT_SYMBOL(dev_set_allmulti);
++
++/*
++ * Upload unicast and multicast address lists to device and
++ * configure RX filtering. When the device doesn't support unicast
++ * filtering it is put in promiscuous mode while unicast addresses
++ * are present.
++ */
++void __dev_set_rx_mode(struct net_device *dev)
++{
++ const struct net_device_ops *ops = dev->netdev_ops;
++
++ /* dev_open will call this function so the list will stay sane. */
++ if (!(dev->flags&IFF_UP))
++ return;
++
++ if (!netif_device_present(dev))
++ return;
++
++ if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
++ /* Unicast addresses changes may only happen under the rtnl,
++ * therefore calling __dev_set_promiscuity here is safe.
++ */
++ if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
++ __dev_set_promiscuity(dev, 1, false);
++ dev->uc_promisc = true;
++ } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
++ __dev_set_promiscuity(dev, -1, false);
++ dev->uc_promisc = false;
++ }
++ }
++
++ if (ops->ndo_set_rx_mode)
++ ops->ndo_set_rx_mode(dev);
++}
++
++void dev_set_rx_mode(struct net_device *dev)
++{
++ netif_addr_lock_bh(dev);
++ __dev_set_rx_mode(dev);
++ netif_addr_unlock_bh(dev);
++}
++
++/**
++ * dev_get_flags - get flags reported to userspace
++ * @dev: device
++ *
++ * Get the combination of flag bits exported through APIs to userspace.
++ */
++unsigned int dev_get_flags(const struct net_device *dev)
++{
++ unsigned int flags;
++
++ flags = (dev->flags & ~(IFF_PROMISC |
++ IFF_ALLMULTI |
++ IFF_RUNNING |
++ IFF_LOWER_UP |
++ IFF_DORMANT)) |
++ (dev->gflags & (IFF_PROMISC |
++ IFF_ALLMULTI));
++
++ if (netif_running(dev)) {
++ if (netif_oper_up(dev))
++ flags |= IFF_RUNNING;
++ if (netif_carrier_ok(dev))
++ flags |= IFF_LOWER_UP;
++ if (netif_dormant(dev))
++ flags |= IFF_DORMANT;
++ }
++
++ return flags;
++}
++EXPORT_SYMBOL(dev_get_flags);
++
++int __dev_change_flags(struct net_device *dev, unsigned int flags)
++{
++ unsigned int old_flags = dev->flags;
++ int ret;
++
++ ASSERT_RTNL();
++
++ /*
++ * Set the flags on our device.
++ */
++
++ dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
++ IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
++ IFF_AUTOMEDIA)) |
++ (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
++ IFF_ALLMULTI));
++
++ /*
++ * Load in the correct multicast list now the flags have changed.
++ */
++
++ if ((old_flags ^ flags) & IFF_MULTICAST)
++ dev_change_rx_flags(dev, IFF_MULTICAST);
++
++ dev_set_rx_mode(dev);
++
++ /*
++ * Have we downed the interface. We handle IFF_UP ourselves
++ * according to user attempts to set it, rather than blindly
++ * setting it.
++ */
++
++ ret = 0;
++ if ((old_flags ^ flags) & IFF_UP)
++ ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
++
++ if ((flags ^ dev->gflags) & IFF_PROMISC) {
++ int inc = (flags & IFF_PROMISC) ? 1 : -1;
++ unsigned int old_flags = dev->flags;
++
++ dev->gflags ^= IFF_PROMISC;
++
++ if (__dev_set_promiscuity(dev, inc, false) >= 0)
++ if (dev->flags != old_flags)
++ dev_set_rx_mode(dev);
++ }
++
++ /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
++ is important. Some (broken) drivers set IFF_PROMISC, when
++ IFF_ALLMULTI is requested not asking us and not reporting.
++ */
++ if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
++ int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
++
++ dev->gflags ^= IFF_ALLMULTI;
++ __dev_set_allmulti(dev, inc, false);
++ }
++
++ return ret;
++}
++
++void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
++ unsigned int gchanges)
++{
++ unsigned int changes = dev->flags ^ old_flags;
++
++ if (gchanges)
++ rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
++
++ if (changes & IFF_UP) {
++ if (dev->flags & IFF_UP)
++ call_netdevice_notifiers(NETDEV_UP, dev);
++ else
++ call_netdevice_notifiers(NETDEV_DOWN, dev);
++ }
++
++ if (dev->flags & IFF_UP &&
++ (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
++ struct netdev_notifier_change_info change_info;
++
++ change_info.flags_changed = changes;
++ call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
++ &change_info.info);
++ }
++}
++
++/**
++ * dev_change_flags - change device settings
++ * @dev: device
++ * @flags: device state flags
++ *
++ * Change settings on device based state flags. The flags are
++ * in the userspace exported format.
++ */
++int dev_change_flags(struct net_device *dev, unsigned int flags)
++{
++ int ret;
++ unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
++
++ ret = __dev_change_flags(dev, flags);
++ if (ret < 0)
++ return ret;
++
++ changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
++ __dev_notify_flags(dev, old_flags, changes);
++ return ret;
++}
++EXPORT_SYMBOL(dev_change_flags);
++
++static int __dev_set_mtu(struct net_device *dev, int new_mtu)
++{
++ const struct net_device_ops *ops = dev->netdev_ops;
++
++ if (ops->ndo_change_mtu)
++ return ops->ndo_change_mtu(dev, new_mtu);
++
++ dev->mtu = new_mtu;
++ return 0;
++}
++
++/**
++ * dev_set_mtu - Change maximum transfer unit
++ * @dev: device
++ * @new_mtu: new transfer unit
++ *
++ * Change the maximum transfer size of the network device.
++ */
++int dev_set_mtu(struct net_device *dev, int new_mtu)
++{
++ int err, orig_mtu;
++
++ if (new_mtu == dev->mtu)
++ return 0;
++
++ /* MTU must be positive. */
++ if (new_mtu < 0)
++ return -EINVAL;
++
++ if (!netif_device_present(dev))
++ return -ENODEV;
++
++ err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
++ err = notifier_to_errno(err);
++ if (err)
++ return err;
++
++ orig_mtu = dev->mtu;
++ err = __dev_set_mtu(dev, new_mtu);
++
++ if (!err) {
++ err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
++ err = notifier_to_errno(err);
++ if (err) {
++ /* setting mtu back and notifying everyone again,
++ * so that they have a chance to revert changes.
++ */
++ __dev_set_mtu(dev, orig_mtu);
++ call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
++ }
++ }
++ return err;
++}
++EXPORT_SYMBOL(dev_set_mtu);
++
++/**
++ * dev_set_group - Change group this device belongs to
++ * @dev: device
++ * @new_group: group this device should belong to
++ */
++void dev_set_group(struct net_device *dev, int new_group)
++{
++ dev->group = new_group;
++}
++EXPORT_SYMBOL(dev_set_group);
++
++/**
++ * dev_set_mac_address - Change Media Access Control Address
++ * @dev: device
++ * @sa: new address
++ *
++ * Change the hardware (MAC) address of the device
++ */
++int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
++{
++ const struct net_device_ops *ops = dev->netdev_ops;
++ int err;
++
++ if (!ops->ndo_set_mac_address)
++ return -EOPNOTSUPP;
++ if (sa->sa_family != dev->type)
++ return -EINVAL;
++ if (!netif_device_present(dev))
++ return -ENODEV;
++ err = ops->ndo_set_mac_address(dev, sa);
++ if (err)
++ return err;
++ dev->addr_assign_type = NET_ADDR_SET;
++ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
++ return 0;
++}
++EXPORT_SYMBOL(dev_set_mac_address);
++
++/**
++ * dev_change_carrier - Change device carrier
++ * @dev: device
++ * @new_carrier: new value
++ *
++ * Change device carrier
++ */
++int dev_change_carrier(struct net_device *dev, bool new_carrier)
++{
++ const struct net_device_ops *ops = dev->netdev_ops;
++
++ if (!ops->ndo_change_carrier)
++ return -EOPNOTSUPP;
++ if (!netif_device_present(dev))
++ return -ENODEV;
++ return ops->ndo_change_carrier(dev, new_carrier);
++}
++EXPORT_SYMBOL(dev_change_carrier);
++
++/**
++ * dev_get_phys_port_id - Get device physical port ID
++ * @dev: device
++ * @ppid: port ID
++ *
++ * Get device physical port ID
++ */
++int dev_get_phys_port_id(struct net_device *dev,
++ struct netdev_phys_item_id *ppid)
++{
++ const struct net_device_ops *ops = dev->netdev_ops;
++
++ if (!ops->ndo_get_phys_port_id)
++ return -EOPNOTSUPP;
++ return ops->ndo_get_phys_port_id(dev, ppid);
++}
++EXPORT_SYMBOL(dev_get_phys_port_id);
++
++/**
++ * dev_get_phys_port_name - Get device physical port name
++ * @dev: device
++ * @name: port name
++ *
++ * Get device physical port name
++ */
++int dev_get_phys_port_name(struct net_device *dev,
++ char *name, size_t len)
++{
++ const struct net_device_ops *ops = dev->netdev_ops;
++
++ if (!ops->ndo_get_phys_port_name)
++ return -EOPNOTSUPP;
++ return ops->ndo_get_phys_port_name(dev, name, len);
++}
++EXPORT_SYMBOL(dev_get_phys_port_name);
++
++/**
++ * dev_new_index - allocate an ifindex
++ * @net: the applicable net namespace
++ *
++ * Returns a suitable unique value for a new device interface
++ * number. The caller must hold the rtnl semaphore or the
++ * dev_base_lock to be sure it remains unique.
++ */
++static int dev_new_index(struct net *net)
++{
++ int ifindex = net->ifindex;
++ for (;;) {
++ if (++ifindex <= 0)
++ ifindex = 1;
++ if (!__dev_get_by_index(net, ifindex))
++ return net->ifindex = ifindex;
++ }
++}
++
++/* Delayed registration/unregisteration */
++static LIST_HEAD(net_todo_list);
++DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
++
++static void net_set_todo(struct net_device *dev)
++{
++ list_add_tail(&dev->todo_list, &net_todo_list);
++ dev_net(dev)->dev_unreg_count++;
++}
++
++static void rollback_registered_many(struct list_head *head)
++{
++ struct net_device *dev, *tmp;
++ LIST_HEAD(close_head);
++
++ BUG_ON(dev_boot_phase);
++ ASSERT_RTNL();
++
++ list_for_each_entry_safe(dev, tmp, head, unreg_list) {
++ /* Some devices call without registering
++ * for initialization unwind. Remove those
++ * devices and proceed with the remaining.
++ */
++ if (dev->reg_state == NETREG_UNINITIALIZED) {
++ pr_debug("unregister_netdevice: device %s/%p never was registered\n",
++ dev->name, dev);
++
++ WARN_ON(1);
++ list_del(&dev->unreg_list);
++ continue;
++ }
++ dev->dismantle = true;
++ BUG_ON(dev->reg_state != NETREG_REGISTERED);
++ }
++
++ /* If device is running, close it first. */
++ list_for_each_entry(dev, head, unreg_list)
++ list_add_tail(&dev->close_list, &close_head);
++ dev_close_many(&close_head, true);
++
++ list_for_each_entry(dev, head, unreg_list) {
++ /* And unlink it from device chain. */
++ unlist_netdevice(dev);
++
++ dev->reg_state = NETREG_UNREGISTERING;
++ on_each_cpu(flush_backlog, dev, 1);
++ }
++
++ synchronize_net();
++
++ list_for_each_entry(dev, head, unreg_list) {
++ struct sk_buff *skb = NULL;
++
++ /* Shutdown queueing discipline. */
++ dev_shutdown(dev);
++
++
++ /* Notify protocols, that we are about to destroy
++ this device. They should clean all the things.
++ */
++ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
++
++ if (!dev->rtnl_link_ops ||
++ dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
++ skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
++ GFP_KERNEL);
++
++ /*
++ * Flush the unicast and multicast chains
++ */
++ dev_uc_flush(dev);
++ dev_mc_flush(dev);
++
++ if (dev->netdev_ops->ndo_uninit)
++ dev->netdev_ops->ndo_uninit(dev);
++
++ if (skb)
++ rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
++
++ /* Notifier chain MUST detach us all upper devices. */
++ WARN_ON(netdev_has_any_upper_dev(dev));
++
++ /* Remove entries from kobject tree */
++ netdev_unregister_kobject(dev);
++#ifdef CONFIG_XPS
++ /* Remove XPS queueing entries */
++ netif_reset_xps_queues_gt(dev, 0);
++#endif
++ }
++
++ synchronize_net();
++
++ list_for_each_entry(dev, head, unreg_list)
++ dev_put(dev);
++}
++
++static void rollback_registered(struct net_device *dev)
++{
++ LIST_HEAD(single);
++
++ list_add(&dev->unreg_list, &single);
++ rollback_registered_many(&single);
++ list_del(&single);
++}
++
++static netdev_features_t netdev_fix_features(struct net_device *dev,
++ netdev_features_t features)
++{
++ /* Fix illegal checksum combinations */
++ if ((features & NETIF_F_HW_CSUM) &&
++ (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
++ netdev_warn(dev, "mixed HW and IP checksum settings.\n");
++ features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
++ }
++
++ /* TSO requires that SG is present as well. */
++ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
++ netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
++ features &= ~NETIF_F_ALL_TSO;
++ }
++
++ if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
++ !(features & NETIF_F_IP_CSUM)) {
++ netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
++ features &= ~NETIF_F_TSO;
++ features &= ~NETIF_F_TSO_ECN;
++ }
++
++ if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
++ !(features & NETIF_F_IPV6_CSUM)) {
++ netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
++ features &= ~NETIF_F_TSO6;
++ }
++
++ /* TSO ECN requires that TSO is present as well. */
++ if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
++ features &= ~NETIF_F_TSO_ECN;
++
++ /* Software GSO depends on SG. */
++ if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
++ netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
++ features &= ~NETIF_F_GSO;
++ }
++
++ /* UFO needs SG and checksumming */
++ if (features & NETIF_F_UFO) {
++ /* maybe split UFO into V4 and V6? */
++ if (!((features & NETIF_F_GEN_CSUM) ||
++ (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
++ == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
++ netdev_dbg(dev,
++ "Dropping NETIF_F_UFO since no checksum offload features.\n");
++ features &= ~NETIF_F_UFO;
++ }
++
++ if (!(features & NETIF_F_SG)) {
++ netdev_dbg(dev,
++ "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
++ features &= ~NETIF_F_UFO;
++ }
++ }
++
++#ifdef CONFIG_NET_RX_BUSY_POLL
++ if (dev->netdev_ops->ndo_busy_poll)
++ features |= NETIF_F_BUSY_POLL;
++ else
++#endif
++ features &= ~NETIF_F_BUSY_POLL;
++
++ return features;
++}
++
++int __netdev_update_features(struct net_device *dev)
++{
++ netdev_features_t features;
++ int err = 0;
++
++ ASSERT_RTNL();
++
++ features = netdev_get_wanted_features(dev);
++
++ if (dev->netdev_ops->ndo_fix_features)
++ features = dev->netdev_ops->ndo_fix_features(dev, features);
++
++ /* driver might be less strict about feature dependencies */
++ features = netdev_fix_features(dev, features);
++
++ if (dev->features == features)
++ return 0;
++
++ netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
++ &dev->features, &features);
++
++ if (dev->netdev_ops->ndo_set_features)
++ err = dev->netdev_ops->ndo_set_features(dev, features);
++
++ if (unlikely(err < 0)) {
++ netdev_err(dev,
++ "set_features() failed (%d); wanted %pNF, left %pNF\n",
++ err, &features, &dev->features);
++ return -1;
++ }
++
++ if (!err)
++ dev->features = features;
++
++ return 1;
++}
++
++/**
++ * netdev_update_features - recalculate device features
++ * @dev: the device to check
++ *
++ * Recalculate dev->features set and send notifications if it
++ * has changed. Should be called after driver or hardware dependent
++ * conditions might have changed that influence the features.
++ */
++void netdev_update_features(struct net_device *dev)
++{
++ if (__netdev_update_features(dev))
++ netdev_features_change(dev);
++}
++EXPORT_SYMBOL(netdev_update_features);
++
++/**
++ * netdev_change_features - recalculate device features
++ * @dev: the device to check
++ *
++ * Recalculate dev->features set and send notifications even
++ * if they have not changed. Should be called instead of
++ * netdev_update_features() if also dev->vlan_features might
++ * have changed to allow the changes to be propagated to stacked
++ * VLAN devices.
++ */
++void netdev_change_features(struct net_device *dev)
++{
++ __netdev_update_features(dev);
++ netdev_features_change(dev);
++}
++EXPORT_SYMBOL(netdev_change_features);
++
++/**
++ * netif_stacked_transfer_operstate - transfer operstate
++ * @rootdev: the root or lower level device to transfer state from
++ * @dev: the device to transfer operstate to
++ *
++ * Transfer operational state from root to device. This is normally
++ * called when a stacking relationship exists between the root
++ * device and the device(a leaf device).
++ */
++void netif_stacked_transfer_operstate(const struct net_device *rootdev,
++ struct net_device *dev)
++{
++ if (rootdev->operstate == IF_OPER_DORMANT)
++ netif_dormant_on(dev);
++ else
++ netif_dormant_off(dev);
++
++ if (netif_carrier_ok(rootdev)) {
++ if (!netif_carrier_ok(dev))
++ netif_carrier_on(dev);
++ } else {
++ if (netif_carrier_ok(dev))
++ netif_carrier_off(dev);
++ }
++}
++EXPORT_SYMBOL(netif_stacked_transfer_operstate);
++
++#ifdef CONFIG_SYSFS
++static int netif_alloc_rx_queues(struct net_device *dev)
++{
++ unsigned int i, count = dev->num_rx_queues;
++ struct netdev_rx_queue *rx;
++ size_t sz = count * sizeof(*rx);
++
++ BUG_ON(count < 1);
++
++ rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
++ if (!rx) {
++ rx = vzalloc(sz);
++ if (!rx)
++ return -ENOMEM;
++ }
++ dev->_rx = rx;
++
++ for (i = 0; i < count; i++)
++ rx[i].dev = dev;
++ return 0;
++}
++#endif
++
++static void netdev_init_one_queue(struct net_device *dev,
++ struct netdev_queue *queue, void *_unused)
++{
++ /* Initialize queue lock */
++ spin_lock_init(&queue->_xmit_lock);
++ netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
++ queue->xmit_lock_owner = -1;
++ netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
++ queue->dev = dev;
++#ifdef CONFIG_BQL
++ dql_init(&queue->dql, HZ);
++#endif
++}
++
++static void netif_free_tx_queues(struct net_device *dev)
++{
++ kvfree(dev->_tx);
++}
++
++static int netif_alloc_netdev_queues(struct net_device *dev)
++{
++ unsigned int count = dev->num_tx_queues;
++ struct netdev_queue *tx;
++ size_t sz = count * sizeof(*tx);
++
++ if (count < 1 || count > 0xffff)
++ return -EINVAL;
++
++ tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
++ if (!tx) {
++ tx = vzalloc(sz);
++ if (!tx)
++ return -ENOMEM;
++ }
++ dev->_tx = tx;
++
++ netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
++ spin_lock_init(&dev->tx_global_lock);
++
++ return 0;
++}
++
++/**
++ * register_netdevice - register a network device
++ * @dev: device to register
++ *
++ * Take a completed network device structure and add it to the kernel
++ * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
++ * chain. 0 is returned on success. A negative errno code is returned
++ * on a failure to set up the device, or if the name is a duplicate.
++ *
++ * Callers must hold the rtnl semaphore. You may want
++ * register_netdev() instead of this.
++ *
++ * BUGS:
++ * The locking appears insufficient to guarantee two parallel registers
++ * will not get the same name.
++ */
++
++int register_netdevice(struct net_device *dev)
++{
++ int ret;
++ struct net *net = dev_net(dev);
++
++ BUG_ON(dev_boot_phase);
++ ASSERT_RTNL();
++
++ might_sleep();
++
++ /* When net_device's are persistent, this will be fatal. */
++ BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
++ BUG_ON(!net);
++
++ spin_lock_init(&dev->addr_list_lock);
++ netdev_set_addr_lockdep_class(dev);
++
++ ret = dev_get_valid_name(net, dev, dev->name);
++ if (ret < 0)
++ goto out;
++
++ /* Init, if this function is available */
++ if (dev->netdev_ops->ndo_init) {
++ ret = dev->netdev_ops->ndo_init(dev);
++ if (ret) {
++ if (ret > 0)
++ ret = -EIO;
++ goto out;
++ }
++ }
++
++ if (((dev->hw_features | dev->features) &
++ NETIF_F_HW_VLAN_CTAG_FILTER) &&
++ (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
++ !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
++ netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
++ ret = -EINVAL;
++ goto err_uninit;
++ }
++
++ ret = -EBUSY;
++ if (!dev->ifindex)
++ dev->ifindex = dev_new_index(net);
++ else if (__dev_get_by_index(net, dev->ifindex))
++ goto err_uninit;
++
++ /* Transfer changeable features to wanted_features and enable
++ * software offloads (GSO and GRO).
++ */
++ dev->hw_features |= NETIF_F_SOFT_FEATURES;
++ dev->features |= NETIF_F_SOFT_FEATURES;
++ dev->wanted_features = dev->features & dev->hw_features;
++
++ if (!(dev->flags & IFF_LOOPBACK)) {
++ dev->hw_features |= NETIF_F_NOCACHE_COPY;
++ }
++
++ /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
++ */
++ dev->vlan_features |= NETIF_F_HIGHDMA;
++
++ /* Make NETIF_F_SG inheritable to tunnel devices.
++ */
++ dev->hw_enc_features |= NETIF_F_SG;
++
++ /* Make NETIF_F_SG inheritable to MPLS.
++ */
++ dev->mpls_features |= NETIF_F_SG;
++
++ ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
++ ret = notifier_to_errno(ret);
++ if (ret)
++ goto err_uninit;
++
++ ret = netdev_register_kobject(dev);
++ if (ret)
++ goto err_uninit;
++ dev->reg_state = NETREG_REGISTERED;
++
++ __netdev_update_features(dev);
++
++ /*
++ * Default initial state at registry is that the
++ * device is present.
++ */
++
++ set_bit(__LINK_STATE_PRESENT, &dev->state);
++
++ linkwatch_init_dev(dev);
++
++ dev_init_scheduler(dev);
++ dev_hold(dev);
++ list_netdevice(dev);
++ add_device_randomness(dev->dev_addr, dev->addr_len);
++
++ /* If the device has permanent device address, driver should
++ * set dev_addr and also addr_assign_type should be set to
++ * NET_ADDR_PERM (default value).
++ */
++ if (dev->addr_assign_type == NET_ADDR_PERM)
++ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
++
++ /* Notify protocols, that a new device appeared. */
++ ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
++ ret = notifier_to_errno(ret);
++ if (ret) {
++ rollback_registered(dev);
++ dev->reg_state = NETREG_UNREGISTERED;
++ }
++ /*
++ * Prevent userspace races by waiting until the network
++ * device is fully setup before sending notifications.
++ */
++ if (!dev->rtnl_link_ops ||
++ dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
++ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
++
++out:
++ return ret;
++
++err_uninit:
++ if (dev->netdev_ops->ndo_uninit)
++ dev->netdev_ops->ndo_uninit(dev);
++ goto out;
++}
++EXPORT_SYMBOL(register_netdevice);
++
++/**
++ * init_dummy_netdev - init a dummy network device for NAPI
++ * @dev: device to init
++ *
++ * This takes a network device structure and initialize the minimum
++ * amount of fields so it can be used to schedule NAPI polls without
++ * registering a full blown interface. This is to be used by drivers
++ * that need to tie several hardware interfaces to a single NAPI
++ * poll scheduler due to HW limitations.
++ */
++int init_dummy_netdev(struct net_device *dev)
++{
++ /* Clear everything. Note we don't initialize spinlocks
++ * are they aren't supposed to be taken by any of the
++ * NAPI code and this dummy netdev is supposed to be
++ * only ever used for NAPI polls
++ */
++ memset(dev, 0, sizeof(struct net_device));
++
++ /* make sure we BUG if trying to hit standard
++ * register/unregister code path
++ */
++ dev->reg_state = NETREG_DUMMY;
++
++ /* NAPI wants this */
++ INIT_LIST_HEAD(&dev->napi_list);
++
++ /* a dummy interface is started by default */
++ set_bit(__LINK_STATE_PRESENT, &dev->state);
++ set_bit(__LINK_STATE_START, &dev->state);
++
++ /* Note : We dont allocate pcpu_refcnt for dummy devices,
++ * because users of this 'device' dont need to change
++ * its refcount.
++ */
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(init_dummy_netdev);
++
++
++/**
++ * register_netdev - register a network device
++ * @dev: device to register
++ *
++ * Take a completed network device structure and add it to the kernel
++ * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
++ * chain. 0 is returned on success. A negative errno code is returned
++ * on a failure to set up the device, or if the name is a duplicate.
++ *
++ * This is a wrapper around register_netdevice that takes the rtnl semaphore
++ * and expands the device name if you passed a format string to
++ * alloc_netdev.
++ */
++int register_netdev(struct net_device *dev)
++{
++ int err;
++
++ rtnl_lock();
++ err = register_netdevice(dev);
++ rtnl_unlock();
++ return err;
++}
++EXPORT_SYMBOL(register_netdev);
++
++int netdev_refcnt_read(const struct net_device *dev)
++{
++ int i, refcnt = 0;
++
++ for_each_possible_cpu(i)
++ refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
++ return refcnt;
++}
++EXPORT_SYMBOL(netdev_refcnt_read);
++
++/**
++ * netdev_wait_allrefs - wait until all references are gone.
++ * @dev: target net_device
++ *
++ * This is called when unregistering network devices.
++ *
++ * Any protocol or device that holds a reference should register
++ * for netdevice notification, and cleanup and put back the
++ * reference if they receive an UNREGISTER event.
++ * We can get stuck here if buggy protocols don't correctly
++ * call dev_put.
++ */
++static void netdev_wait_allrefs(struct net_device *dev)
++{
++ unsigned long rebroadcast_time, warning_time;
++ int refcnt;
++
++ linkwatch_forget_dev(dev);
++
++ rebroadcast_time = warning_time = jiffies;
++ refcnt = netdev_refcnt_read(dev);
++
++ while (refcnt != 0) {
++ if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
++ rtnl_lock();
++
++ /* Rebroadcast unregister notification */
++ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
++
++ __rtnl_unlock();
++ rcu_barrier();
++ rtnl_lock();
++
++ call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
++ if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
++ &dev->state)) {
++ /* We must not have linkwatch events
++ * pending on unregister. If this
++ * happens, we simply run the queue
++ * unscheduled, resulting in a noop
++ * for this device.
++ */
++ linkwatch_run_queue();
++ }
++
++ __rtnl_unlock();
++
++ rebroadcast_time = jiffies;
++ }
++
++ msleep(250);
++
++ refcnt = netdev_refcnt_read(dev);
++
++ if (time_after(jiffies, warning_time + 10 * HZ)) {
++ pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
++ dev->name, refcnt);
++ warning_time = jiffies;
++ }
++ }
++}
++
++/* The sequence is:
++ *
++ * rtnl_lock();
++ * ...
++ * register_netdevice(x1);
++ * register_netdevice(x2);
++ * ...
++ * unregister_netdevice(y1);
++ * unregister_netdevice(y2);
++ * ...
++ * rtnl_unlock();
++ * free_netdev(y1);
++ * free_netdev(y2);
++ *
++ * We are invoked by rtnl_unlock().
++ * This allows us to deal with problems:
++ * 1) We can delete sysfs objects which invoke hotplug
++ * without deadlocking with linkwatch via keventd.
++ * 2) Since we run with the RTNL semaphore not held, we can sleep
++ * safely in order to wait for the netdev refcnt to drop to zero.
++ *
++ * We must not return until all unregister events added during
++ * the interval the lock was held have been completed.
++ */
++void netdev_run_todo(void)
++{
++ struct list_head list;
++
++ /* Snapshot list, allow later requests */
++ list_replace_init(&net_todo_list, &list);
++
++ __rtnl_unlock();
++
++
++ /* Wait for rcu callbacks to finish before next phase */
++ if (!list_empty(&list))
++ rcu_barrier();
++
++ while (!list_empty(&list)) {
++ struct net_device *dev
++ = list_first_entry(&list, struct net_device, todo_list);
++ list_del(&dev->todo_list);
++
++ rtnl_lock();
++ call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
++ __rtnl_unlock();
++
++ if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
++ pr_err("network todo '%s' but state %d\n",
++ dev->name, dev->reg_state);
++ dump_stack();
++ continue;
++ }
++
++ dev->reg_state = NETREG_UNREGISTERED;
++
++ netdev_wait_allrefs(dev);
++
++ /* paranoia */
++ BUG_ON(netdev_refcnt_read(dev));
++ BUG_ON(!list_empty(&dev->ptype_all));
++ BUG_ON(!list_empty(&dev->ptype_specific));
++ WARN_ON(rcu_access_pointer(dev->ip_ptr));
++ WARN_ON(rcu_access_pointer(dev->ip6_ptr));
++ WARN_ON(dev->dn_ptr);
++
++ if (dev->destructor)
++ dev->destructor(dev);
++
++ /* Report a network device has been unregistered */
++ rtnl_lock();
++ dev_net(dev)->dev_unreg_count--;
++ __rtnl_unlock();
++ wake_up(&netdev_unregistering_wq);
++
++ /* Free network device */
++ kobject_put(&dev->dev.kobj);
++ }
++}
++
++/* Convert net_device_stats to rtnl_link_stats64. They have the same
++ * fields in the same order, with only the type differing.
++ */
++void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
++ const struct net_device_stats *netdev_stats)
++{
++#if BITS_PER_LONG == 64
++ BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
++ memcpy(stats64, netdev_stats, sizeof(*stats64));
++#else
++ size_t i, n = sizeof(*stats64) / sizeof(u64);
++ const unsigned long *src = (const unsigned long *)netdev_stats;
++ u64 *dst = (u64 *)stats64;
++
++ BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
++ sizeof(*stats64) / sizeof(u64));
++ for (i = 0; i < n; i++)
++ dst[i] = src[i];
++#endif
++}
++EXPORT_SYMBOL(netdev_stats_to_stats64);
++
++/**
++ * dev_get_stats - get network device statistics
++ * @dev: device to get statistics from
++ * @storage: place to store stats
++ *
++ * Get network statistics from device. Return @storage.
++ * The device driver may provide its own method by setting
++ * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
++ * otherwise the internal statistics structure is used.
++ */
++struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
++ struct rtnl_link_stats64 *storage)
++{
++ const struct net_device_ops *ops = dev->netdev_ops;
++
++ if (ops->ndo_get_stats64) {
++ memset(storage, 0, sizeof(*storage));
++ ops->ndo_get_stats64(dev, storage);
++ } else if (ops->ndo_get_stats) {
++ netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
++ } else {
++ netdev_stats_to_stats64(storage, &dev->stats);
++ }
++ storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
++ storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
++ return storage;
++}
++EXPORT_SYMBOL(dev_get_stats);
++
++struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
++{
++ struct netdev_queue *queue = dev_ingress_queue(dev);
++
++#ifdef CONFIG_NET_CLS_ACT
++ if (queue)
++ return queue;
++ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
++ if (!queue)
++ return NULL;
++ netdev_init_one_queue(dev, queue, NULL);
++ RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
++ queue->qdisc_sleeping = &noop_qdisc;
++ rcu_assign_pointer(dev->ingress_queue, queue);
++#endif
++ return queue;
++}
++
++static const struct ethtool_ops default_ethtool_ops;
++
++void netdev_set_default_ethtool_ops(struct net_device *dev,
++ const struct ethtool_ops *ops)
++{
++ if (dev->ethtool_ops == &default_ethtool_ops)
++ dev->ethtool_ops = ops;
++}
++EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
++
++void netdev_freemem(struct net_device *dev)
++{
++ char *addr = (char *)dev - dev->padded;
++
++ kvfree(addr);
++}
++
++/**
++ * alloc_netdev_mqs - allocate network device
++ * @sizeof_priv: size of private data to allocate space for
++ * @name: device name format string
++ * @name_assign_type: origin of device name
++ * @setup: callback to initialize device
++ * @txqs: the number of TX subqueues to allocate
++ * @rxqs: the number of RX subqueues to allocate
++ *
++ * Allocates a struct net_device with private data area for driver use
++ * and performs basic initialization. Also allocates subqueue structs
++ * for each queue on the device.
++ */
++struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
++ unsigned char name_assign_type,
++ void (*setup)(struct net_device *),
++ unsigned int txqs, unsigned int rxqs)
++{
++ struct net_device *dev;
++ size_t alloc_size;
++ struct net_device *p;
++
++ BUG_ON(strlen(name) >= sizeof(dev->name));
++
++ if (txqs < 1) {
++ pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
++ return NULL;
++ }
++
++#ifdef CONFIG_SYSFS
++ if (rxqs < 1) {
++ pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
++ return NULL;
++ }
++#endif
++
++ alloc_size = sizeof(struct net_device);
++ if (sizeof_priv) {
++ /* ensure 32-byte alignment of private area */
++ alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
++ alloc_size += sizeof_priv;
++ }
++ /* ensure 32-byte alignment of whole construct */
++ alloc_size += NETDEV_ALIGN - 1;
++
++ p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
++ if (!p)
++ p = vzalloc(alloc_size);
++ if (!p)
++ return NULL;
++
++ dev = PTR_ALIGN(p, NETDEV_ALIGN);
++ dev->padded = (char *)dev - (char *)p;
++
++ dev->pcpu_refcnt = alloc_percpu(int);
++ if (!dev->pcpu_refcnt)
++ goto free_dev;
++
++ if (dev_addr_init(dev))
++ goto free_pcpu;
++
++ dev_mc_init(dev);
++ dev_uc_init(dev);
++
++ dev_net_set(dev, &init_net);
++
++ dev->gso_max_size = GSO_MAX_SIZE;
++ dev->gso_max_segs = GSO_MAX_SEGS;
++ dev->gso_min_segs = 0;
++
++ INIT_LIST_HEAD(&dev->napi_list);
++ INIT_LIST_HEAD(&dev->unreg_list);
++ INIT_LIST_HEAD(&dev->close_list);
++ INIT_LIST_HEAD(&dev->link_watch_list);
++ INIT_LIST_HEAD(&dev->adj_list.upper);
++ INIT_LIST_HEAD(&dev->adj_list.lower);
++ INIT_LIST_HEAD(&dev->all_adj_list.upper);
++ INIT_LIST_HEAD(&dev->all_adj_list.lower);
++ INIT_LIST_HEAD(&dev->ptype_all);
++ INIT_LIST_HEAD(&dev->ptype_specific);
++ dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
++ setup(dev);
++
++ dev->num_tx_queues = txqs;
++ dev->real_num_tx_queues = txqs;
++ if (netif_alloc_netdev_queues(dev))
++ goto free_all;
++
++#ifdef CONFIG_SYSFS
++ dev->num_rx_queues = rxqs;
++ dev->real_num_rx_queues = rxqs;
++ if (netif_alloc_rx_queues(dev))
++ goto free_all;
++#endif
++
++ strcpy(dev->name, name);
++ dev->name_assign_type = name_assign_type;
++ dev->group = INIT_NETDEV_GROUP;
++ if (!dev->ethtool_ops)
++ dev->ethtool_ops = &default_ethtool_ops;
++ return dev;
++
++free_all:
++ free_netdev(dev);
++ return NULL;
++
++free_pcpu:
++ free_percpu(dev->pcpu_refcnt);
++free_dev:
++ netdev_freemem(dev);
++ return NULL;
++}
++EXPORT_SYMBOL(alloc_netdev_mqs);
++
++/**
++ * free_netdev - free network device
++ * @dev: device
++ *
++ * This function does the last stage of destroying an allocated device
++ * interface. The reference to the device object is released.
++ * If this is the last reference then it will be freed.
++ */
++void free_netdev(struct net_device *dev)
++{
++ struct napi_struct *p, *n;
++
++ netif_free_tx_queues(dev);
++#ifdef CONFIG_SYSFS
++ kvfree(dev->_rx);
++#endif
++
++ kfree(rcu_dereference_protected(dev->ingress_queue, 1));
++
++ /* Flush device addresses */
++ dev_addr_flush(dev);
++
++ list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
++ netif_napi_del(p);
++
++ free_percpu(dev->pcpu_refcnt);
++ dev->pcpu_refcnt = NULL;
++
++ /* Compatibility with error handling in drivers */
++ if (dev->reg_state == NETREG_UNINITIALIZED) {
++ netdev_freemem(dev);
++ return;
++ }
++
++ BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
++ dev->reg_state = NETREG_RELEASED;
++
++ /* will free via device release */
++ put_device(&dev->dev);
++}
++EXPORT_SYMBOL(free_netdev);
++
++/**
++ * synchronize_net - Synchronize with packet receive processing
++ *
++ * Wait for packets currently being received to be done.
++ * Does not block later packets from starting.
++ */
++void synchronize_net(void)
++{
++ might_sleep();
++ if (rtnl_is_locked())
++ synchronize_rcu_expedited();
++ else
++ synchronize_rcu();
++}
++EXPORT_SYMBOL(synchronize_net);
++
++/**
++ * unregister_netdevice_queue - remove device from the kernel
++ * @dev: device
++ * @head: list
++ *
++ * This function shuts down a device interface and removes it
++ * from the kernel tables.
++ * If head not NULL, device is queued to be unregistered later.
++ *
++ * Callers must hold the rtnl semaphore. You may want
++ * unregister_netdev() instead of this.
++ */
++
++void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
++{
++ ASSERT_RTNL();
++
++ if (head) {
++ list_move_tail(&dev->unreg_list, head);
++ } else {
++ rollback_registered(dev);
++ /* Finish processing unregister after unlock */
++ net_set_todo(dev);
++ }
++}
++EXPORT_SYMBOL(unregister_netdevice_queue);
++
++/**
++ * unregister_netdevice_many - unregister many devices
++ * @head: list of devices
++ *
++ * Note: As most callers use a stack allocated list_head,
++ * we force a list_del() to make sure stack wont be corrupted later.
++ */
++void unregister_netdevice_many(struct list_head *head)
++{
++ struct net_device *dev;
++
++ if (!list_empty(head)) {
++ rollback_registered_many(head);
++ list_for_each_entry(dev, head, unreg_list)
++ net_set_todo(dev);
++ list_del(head);
++ }
++}
++EXPORT_SYMBOL(unregister_netdevice_many);
++
++/**
++ * unregister_netdev - remove device from the kernel
++ * @dev: device
++ *
++ * This function shuts down a device interface and removes it
++ * from the kernel tables.
++ *
++ * This is just a wrapper for unregister_netdevice that takes
++ * the rtnl semaphore. In general you want to use this and not
++ * unregister_netdevice.
++ */
++void unregister_netdev(struct net_device *dev)
++{
++ rtnl_lock();
++ unregister_netdevice(dev);
++ rtnl_unlock();
++}
++EXPORT_SYMBOL(unregister_netdev);
++
++/**
++ * dev_change_net_namespace - move device to different nethost namespace
++ * @dev: device
++ * @net: network namespace
++ * @pat: If not NULL name pattern to try if the current device name
++ * is already taken in the destination network namespace.
++ *
++ * This function shuts down a device interface and moves it
++ * to a new network namespace. On success 0 is returned, on
++ * a failure a netagive errno code is returned.
++ *
++ * Callers must hold the rtnl semaphore.
++ */
++
++int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
++{
++ int err;
++
++ ASSERT_RTNL();
++
++ /* Don't allow namespace local devices to be moved. */
++ err = -EINVAL;
++ if (dev->features & NETIF_F_NETNS_LOCAL)
++ goto out;
++
++ /* Ensure the device has been registrered */
++ if (dev->reg_state != NETREG_REGISTERED)
++ goto out;
++
++ /* Get out if there is nothing todo */
++ err = 0;
++ if (net_eq(dev_net(dev), net))
++ goto out;
++
++ /* Pick the destination device name, and ensure
++ * we can use it in the destination network namespace.
++ */
++ err = -EEXIST;
++ if (__dev_get_by_name(net, dev->name)) {
++ /* We get here if we can't use the current device name */
++ if (!pat)
++ goto out;
++ if (dev_get_valid_name(net, dev, pat) < 0)
++ goto out;
++ }
++
++ /*
++ * And now a mini version of register_netdevice unregister_netdevice.
++ */
++
++ /* If device is running close it first. */
++ dev_close(dev);
++
++ /* And unlink it from device chain */
++ err = -ENODEV;
++ unlist_netdevice(dev);
++
++ synchronize_net();
++
++ /* Shutdown queueing discipline. */
++ dev_shutdown(dev);
++
++ /* Notify protocols, that we are about to destroy
++ this device. They should clean all the things.
++
++ Note that dev->reg_state stays at NETREG_REGISTERED.
++ This is wanted because this way 8021q and macvlan know
++ the device is just moving and can keep their slaves up.
++ */
++ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
++ rcu_barrier();
++ call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
++ rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
++
++ /*
++ * Flush the unicast and multicast chains
++ */
++ dev_uc_flush(dev);
++ dev_mc_flush(dev);
++
++ /* Send a netdev-removed uevent to the old namespace */
++ kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
++ netdev_adjacent_del_links(dev);
++
++ /* Actually switch the network namespace */
++ dev_net_set(dev, net);
++
++ /* If there is an ifindex conflict assign a new one */
++ if (__dev_get_by_index(net, dev->ifindex))
++ dev->ifindex = dev_new_index(net);
++
++ /* Send a netdev-add uevent to the new namespace */
++ kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
++ netdev_adjacent_add_links(dev);
++
++ /* Fixup kobjects */
++ err = device_rename(&dev->dev, dev->name);
++ WARN_ON(err);
++
++ /* Add the device back in the hashes */
++ list_netdevice(dev);
++
++ /* Notify protocols, that a new device appeared. */
++ call_netdevice_notifiers(NETDEV_REGISTER, dev);
++
++ /*
++ * Prevent userspace races by waiting until the network
++ * device is fully setup before sending notifications.
++ */
++ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
++
++ synchronize_net();
++ err = 0;
++out:
++ return err;
++}
++EXPORT_SYMBOL_GPL(dev_change_net_namespace);
++
++static int dev_cpu_callback(struct notifier_block *nfb,
++ unsigned long action,
++ void *ocpu)
++{
++ struct sk_buff **list_skb;
++ struct sk_buff *skb;
++ unsigned int cpu, oldcpu = (unsigned long)ocpu;
++ struct softnet_data *sd, *oldsd;
++
++ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
++ return NOTIFY_OK;
++
++ local_irq_disable();
++ cpu = smp_processor_id();
++ sd = &per_cpu(softnet_data, cpu);
++ oldsd = &per_cpu(softnet_data, oldcpu);
++
++ /* Find end of our completion_queue. */
++ list_skb = &sd->completion_queue;
++ while (*list_skb)
++ list_skb = &(*list_skb)->next;
++ /* Append completion queue from offline CPU. */
++ *list_skb = oldsd->completion_queue;
++ oldsd->completion_queue = NULL;
++
++ /* Append output queue from offline CPU. */
++ if (oldsd->output_queue) {
++ *sd->output_queue_tailp = oldsd->output_queue;
++ sd->output_queue_tailp = oldsd->output_queue_tailp;
++ oldsd->output_queue = NULL;
++ oldsd->output_queue_tailp = &oldsd->output_queue;
++ }
++ /* Append NAPI poll list from offline CPU, with one exception :
++ * process_backlog() must be called by cpu owning percpu backlog.
++ * We properly handle process_queue & input_pkt_queue later.
++ */
++ while (!list_empty(&oldsd->poll_list)) {
++ struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
++ struct napi_struct,
++ poll_list);
++
++ list_del_init(&napi->poll_list);
++ if (napi->poll == process_backlog)
++ napi->state = 0;
++ else
++ ____napi_schedule(sd, napi);
++ }
++
++ raise_softirq_irqoff(NET_TX_SOFTIRQ);
++ local_irq_enable();
++
++ /* Process offline CPU's input_pkt_queue */
++ while ((skb = __skb_dequeue(&oldsd->process_queue))) {
++ netif_rx_ni(skb);
++ input_queue_head_incr(oldsd);
++ }
++ while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
++ netif_rx_ni(skb);
++ input_queue_head_incr(oldsd);
++ }
++
++ return NOTIFY_OK;
++}
++
++
++/**
++ * netdev_increment_features - increment feature set by one
++ * @all: current feature set
++ * @one: new feature set
++ * @mask: mask feature set
++ *
++ * Computes a new feature set after adding a device with feature set
++ * @one to the master device with current feature set @all. Will not
++ * enable anything that is off in @mask. Returns the new feature set.
++ */
++netdev_features_t netdev_increment_features(netdev_features_t all,
++ netdev_features_t one, netdev_features_t mask)
++{
++ if (mask & NETIF_F_GEN_CSUM)
++ mask |= NETIF_F_ALL_CSUM;
++ mask |= NETIF_F_VLAN_CHALLENGED;
++
++ all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
++ all &= one | ~NETIF_F_ALL_FOR_ALL;
++
++ /* If one device supports hw checksumming, set for all. */
++ if (all & NETIF_F_GEN_CSUM)
++ all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
++
++ return all;
++}
++EXPORT_SYMBOL(netdev_increment_features);
++
++static struct hlist_head * __net_init netdev_create_hash(void)
++{
++ int i;
++ struct hlist_head *hash;
++
++ hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
++ if (hash != NULL)
++ for (i = 0; i < NETDEV_HASHENTRIES; i++)
++ INIT_HLIST_HEAD(&hash[i]);
++
++ return hash;
++}
++
++/* Initialize per network namespace state */
++static int __net_init netdev_init(struct net *net)
++{
++ if (net != &init_net)
++ INIT_LIST_HEAD(&net->dev_base_head);
++
++ net->dev_name_head = netdev_create_hash();
++ if (net->dev_name_head == NULL)
++ goto err_name;
++
++ net->dev_index_head = netdev_create_hash();
++ if (net->dev_index_head == NULL)
++ goto err_idx;
++
++ return 0;
++
++err_idx:
++ kfree(net->dev_name_head);
++err_name:
++ return -ENOMEM;
++}
++
++/**
++ * netdev_drivername - network driver for the device
++ * @dev: network device
++ *
++ * Determine network driver for device.
++ */
++const char *netdev_drivername(const struct net_device *dev)
++{
++ const struct device_driver *driver;
++ const struct device *parent;
++ const char *empty = "";
++
++ parent = dev->dev.parent;
++ if (!parent)
++ return empty;
++
++ driver = parent->driver;
++ if (driver && driver->name)
++ return driver->name;
++ return empty;
++}
++
++static void __netdev_printk(const char *level, const struct net_device *dev,
++ struct va_format *vaf)
++{
++ if (dev && dev->dev.parent) {
++ dev_printk_emit(level[1] - '0',
++ dev->dev.parent,
++ "%s %s %s%s: %pV",
++ dev_driver_string(dev->dev.parent),
++ dev_name(dev->dev.parent),
++ netdev_name(dev), netdev_reg_state(dev),
++ vaf);
++ } else if (dev) {
++ printk("%s%s%s: %pV",
++ level, netdev_name(dev), netdev_reg_state(dev), vaf);
++ } else {
++ printk("%s(NULL net_device): %pV", level, vaf);
++ }
++}
++
++void netdev_printk(const char *level, const struct net_device *dev,
++ const char *format, ...)
++{
++ struct va_format vaf;
++ va_list args;
++
++ va_start(args, format);
++
++ vaf.fmt = format;
++ vaf.va = &args;
++
++ __netdev_printk(level, dev, &vaf);
++
++ va_end(args);
++}
++EXPORT_SYMBOL(netdev_printk);
++
++#define define_netdev_printk_level(func, level) \
++void func(const struct net_device *dev, const char *fmt, ...) \
++{ \
++ struct va_format vaf; \
++ va_list args; \
++ \
++ va_start(args, fmt); \
++ \
++ vaf.fmt = fmt; \
++ vaf.va = &args; \
++ \
++ __netdev_printk(level, dev, &vaf); \
++ \
++ va_end(args); \
++} \
++EXPORT_SYMBOL(func);
++
++define_netdev_printk_level(netdev_emerg, KERN_EMERG);
++define_netdev_printk_level(netdev_alert, KERN_ALERT);
++define_netdev_printk_level(netdev_crit, KERN_CRIT);
++define_netdev_printk_level(netdev_err, KERN_ERR);
++define_netdev_printk_level(netdev_warn, KERN_WARNING);
++define_netdev_printk_level(netdev_notice, KERN_NOTICE);
++define_netdev_printk_level(netdev_info, KERN_INFO);
++
++static void __net_exit netdev_exit(struct net *net)
++{
++ kfree(net->dev_name_head);
++ kfree(net->dev_index_head);
++}
++
++static struct pernet_operations __net_initdata netdev_net_ops = {
++ .init = netdev_init,
++ .exit = netdev_exit,
++};
++
++static void __net_exit default_device_exit(struct net *net)
++{
++ struct net_device *dev, *aux;
++ /*
++ * Push all migratable network devices back to the
++ * initial network namespace
++ */
++ rtnl_lock();
++ for_each_netdev_safe(net, dev, aux) {
++ int err;
++ char fb_name[IFNAMSIZ];
++
++ /* Ignore unmoveable devices (i.e. loopback) */
++ if (dev->features & NETIF_F_NETNS_LOCAL)
++ continue;
++
++ /* Leave virtual devices for the generic cleanup */
++ if (dev->rtnl_link_ops)
++ continue;
++
++ /* Push remaining network devices to init_net */
++ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
++ err = dev_change_net_namespace(dev, &init_net, fb_name);
++ if (err) {
++ pr_emerg("%s: failed to move %s to init_net: %d\n",
++ __func__, dev->name, err);
++ BUG();
++ }
++ }
++ rtnl_unlock();
++}
++
++static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
++{
++ /* Return with the rtnl_lock held when there are no network
++ * devices unregistering in any network namespace in net_list.
++ */
++ struct net *net;
++ bool unregistering;
++ DEFINE_WAIT_FUNC(wait, woken_wake_function);
++
++ add_wait_queue(&netdev_unregistering_wq, &wait);
++ for (;;) {
++ unregistering = false;
++ rtnl_lock();
++ list_for_each_entry(net, net_list, exit_list) {
++ if (net->dev_unreg_count > 0) {
++ unregistering = true;
++ break;
++ }
++ }
++ if (!unregistering)
++ break;
++ __rtnl_unlock();
++
++ wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
++ }
++ remove_wait_queue(&netdev_unregistering_wq, &wait);
++}
++
++static void __net_exit default_device_exit_batch(struct list_head *net_list)
++{
++ /* At exit all network devices most be removed from a network
++ * namespace. Do this in the reverse order of registration.
++ * Do this across as many network namespaces as possible to
++ * improve batching efficiency.
++ */
++ struct net_device *dev;
++ struct net *net;
++ LIST_HEAD(dev_kill_list);
++
++ /* To prevent network device cleanup code from dereferencing
++ * loopback devices or network devices that have been freed
++ * wait here for all pending unregistrations to complete,
++ * before unregistring the loopback device and allowing the
++ * network namespace be freed.
++ *
++ * The netdev todo list containing all network devices
++ * unregistrations that happen in default_device_exit_batch
++ * will run in the rtnl_unlock() at the end of
++ * default_device_exit_batch.
++ */
++ rtnl_lock_unregistering(net_list);
++ list_for_each_entry(net, net_list, exit_list) {
++ for_each_netdev_reverse(net, dev) {
++ if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
++ dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
++ else
++ unregister_netdevice_queue(dev, &dev_kill_list);
++ }
++ }
++ unregister_netdevice_many(&dev_kill_list);
++ rtnl_unlock();
++}
++
++static struct pernet_operations __net_initdata default_device_ops = {
++ .exit = default_device_exit,
++ .exit_batch = default_device_exit_batch,
++};
++
++/*
++ * Initialize the DEV module. At boot time this walks the device list and
++ * unhooks any devices that fail to initialise (normally hardware not
++ * present) and leaves us with a valid list of present and active devices.
++ *
++ */
++
++/*
++ * This is called single threaded during boot, so no need
++ * to take the rtnl semaphore.
++ */
++static int __init net_dev_init(void)
++{
++ int i, rc = -ENOMEM;
++
++ BUG_ON(!dev_boot_phase);
++
++ if (dev_proc_init())
++ goto out;
++
++ if (netdev_kobject_init())
++ goto out;
++
++ INIT_LIST_HEAD(&ptype_all);
++ for (i = 0; i < PTYPE_HASH_SIZE; i++)
++ INIT_LIST_HEAD(&ptype_base[i]);
++
++ INIT_LIST_HEAD(&offload_base);
++
++ if (register_pernet_subsys(&netdev_net_ops))
++ goto out;
++
++ /*
++ * Initialise the packet receive queues.
++ */
++
++ for_each_possible_cpu(i) {
++ struct softnet_data *sd = &per_cpu(softnet_data, i);
++
++ skb_queue_head_init(&sd->input_pkt_queue);
++ skb_queue_head_init(&sd->process_queue);
++ INIT_LIST_HEAD(&sd->poll_list);
++ sd->output_queue_tailp = &sd->output_queue;
++#ifdef CONFIG_RPS
++ sd->csd.func = rps_trigger_softirq;
++ sd->csd.info = sd;
++ sd->cpu = i;
++#endif
++
++ sd->backlog.poll = process_backlog;
++ sd->backlog.weight = weight_p;
++ }
++
++ dev_boot_phase = 0;
++
++ /* The loopback device is special if any other network devices
++ * is present in a network namespace the loopback device must
++ * be present. Since we now dynamically allocate and free the
++ * loopback device ensure this invariant is maintained by
++ * keeping the loopback device as the first device on the
++ * list of network devices. Ensuring the loopback devices
++ * is the first device that appears and the last network device
++ * that disappears.
++ */
++ if (register_pernet_device(&loopback_net_ops))
++ goto out;
++
++ if (register_pernet_device(&default_device_ops))
++ goto out;
++
++ open_softirq(NET_TX_SOFTIRQ, net_tx_action);
++ open_softirq(NET_RX_SOFTIRQ, net_rx_action);
++
++ hotcpu_notifier(dev_cpu_callback, 0);
++ dst_init();
++ rc = 0;
++out:
++ return rc;
++}
++
++subsys_initcall(net_dev_init);
+diff -Nur linux-4.1.10.orig/net/core/skbuff.c linux-4.1.10/net/core/skbuff.c
+--- linux-4.1.10.orig/net/core/skbuff.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/net/core/skbuff.c 2015-10-07 18:00:08.000000000 +0200
@@ -63,6 +63,7 @@
#include <linux/errqueue.h>
#include <linux/prefetch.h>
@@ -26299,9 +70088,9 @@ diff -Nur linux-4.1.6.orig/net/core/skbuff.c linux-4.1.6/net/core/skbuff.c
return data;
}
-diff -Nur linux-4.1.6.orig/net/core/sock.c linux-4.1.6/net/core/sock.c
---- linux-4.1.6.orig/net/core/sock.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/net/core/sock.c 2015-09-08 23:49:08.445829687 +0200
+diff -Nur linux-4.1.10.orig/net/core/sock.c linux-4.1.10/net/core/sock.c
+--- linux-4.1.10.orig/net/core/sock.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/net/core/sock.c 2015-10-07 18:00:08.000000000 +0200
@@ -2370,12 +2370,11 @@
if (sk->sk_lock.owned)
__lock_sock(sk);
@@ -26316,9 +70105,9 @@ diff -Nur linux-4.1.6.orig/net/core/sock.c linux-4.1.6/net/core/sock.c
}
EXPORT_SYMBOL(lock_sock_nested);
-diff -Nur linux-4.1.6.orig/net/ipv4/icmp.c linux-4.1.6/net/ipv4/icmp.c
---- linux-4.1.6.orig/net/ipv4/icmp.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/net/ipv4/icmp.c 2015-09-08 23:49:08.461827913 +0200
+diff -Nur linux-4.1.10.orig/net/ipv4/icmp.c linux-4.1.10/net/ipv4/icmp.c
+--- linux-4.1.10.orig/net/ipv4/icmp.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/net/ipv4/icmp.c 2015-10-07 18:00:08.000000000 +0200
@@ -69,6 +69,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -26370,9 +70159,9 @@ diff -Nur linux-4.1.6.orig/net/ipv4/icmp.c linux-4.1.6/net/ipv4/icmp.c
}
/* should there be an ICMP stat for ignored echos? */
return true;
-diff -Nur linux-4.1.6.orig/net/ipv4/sysctl_net_ipv4.c linux-4.1.6/net/ipv4/sysctl_net_ipv4.c
---- linux-4.1.6.orig/net/ipv4/sysctl_net_ipv4.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/net/ipv4/sysctl_net_ipv4.c 2015-09-08 23:49:08.461827913 +0200
+diff -Nur linux-4.1.10.orig/net/ipv4/sysctl_net_ipv4.c linux-4.1.10/net/ipv4/sysctl_net_ipv4.c
+--- linux-4.1.10.orig/net/ipv4/sysctl_net_ipv4.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/net/ipv4/sysctl_net_ipv4.c 2015-10-07 18:00:08.000000000 +0200
@@ -779,6 +779,13 @@
.proc_handler = proc_dointvec
},
@@ -26387,9 +70176,9 @@ diff -Nur linux-4.1.6.orig/net/ipv4/sysctl_net_ipv4.c linux-4.1.6/net/ipv4/sysct
.procname = "icmp_ignore_bogus_error_responses",
.data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
.maxlen = sizeof(int),
-diff -Nur linux-4.1.6.orig/net/mac80211/rx.c linux-4.1.6/net/mac80211/rx.c
---- linux-4.1.6.orig/net/mac80211/rx.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/net/mac80211/rx.c 2015-09-08 23:49:08.461827913 +0200
+diff -Nur linux-4.1.10.orig/net/mac80211/rx.c linux-4.1.10/net/mac80211/rx.c
+--- linux-4.1.10.orig/net/mac80211/rx.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/net/mac80211/rx.c 2015-10-07 18:00:08.000000000 +0200
@@ -3554,7 +3554,7 @@
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -26399,9 +70188,9 @@ diff -Nur linux-4.1.6.orig/net/mac80211/rx.c linux-4.1.6/net/mac80211/rx.c
if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
goto drop;
-diff -Nur linux-4.1.6.orig/net/netfilter/core.c linux-4.1.6/net/netfilter/core.c
---- linux-4.1.6.orig/net/netfilter/core.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/net/netfilter/core.c 2015-09-08 23:49:08.461827913 +0200
+diff -Nur linux-4.1.10.orig/net/netfilter/core.c linux-4.1.10/net/netfilter/core.c
+--- linux-4.1.10.orig/net/netfilter/core.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/net/netfilter/core.c 2015-10-07 18:00:08.000000000 +0200
@@ -22,11 +22,17 @@
#include <linux/proc_fs.h>
#include <linux/mutex.h>
@@ -26420,9 +70209,9 @@ diff -Nur linux-4.1.6.orig/net/netfilter/core.c linux-4.1.6/net/netfilter/core.c
static DEFINE_MUTEX(afinfo_mutex);
const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
-diff -Nur linux-4.1.6.orig/net/packet/af_packet.c linux-4.1.6/net/packet/af_packet.c
---- linux-4.1.6.orig/net/packet/af_packet.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/net/packet/af_packet.c 2015-09-08 23:49:08.465827470 +0200
+diff -Nur linux-4.1.10.orig/net/packet/af_packet.c linux-4.1.10/net/packet/af_packet.c
+--- linux-4.1.10.orig/net/packet/af_packet.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/net/packet/af_packet.c 2015-10-07 18:00:08.000000000 +0200
@@ -63,6 +63,7 @@
#include <linux/if_packet.h>
#include <linux/wireless.h>
@@ -26449,9 +70238,9 @@ diff -Nur linux-4.1.6.orig/net/packet/af_packet.c linux-4.1.6/net/packet/af_pack
}
}
prb_close_block(pkc, pbd, po, status);
-diff -Nur linux-4.1.6.orig/net/rds/ib_rdma.c linux-4.1.6/net/rds/ib_rdma.c
---- linux-4.1.6.orig/net/rds/ib_rdma.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/net/rds/ib_rdma.c 2015-09-08 23:49:08.465827470 +0200
+diff -Nur linux-4.1.10.orig/net/rds/ib_rdma.c linux-4.1.10/net/rds/ib_rdma.c
+--- linux-4.1.10.orig/net/rds/ib_rdma.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/net/rds/ib_rdma.c 2015-10-07 18:00:08.000000000 +0200
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/rculist.h>
@@ -26469,9 +70258,9 @@ diff -Nur linux-4.1.6.orig/net/rds/ib_rdma.c linux-4.1.6/net/rds/ib_rdma.c
}
}
-diff -Nur linux-4.1.6.orig/net/sched/sch_generic.c linux-4.1.6/net/sched/sch_generic.c
---- linux-4.1.6.orig/net/sched/sch_generic.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/net/sched/sch_generic.c 2015-09-08 23:49:08.465827470 +0200
+diff -Nur linux-4.1.10.orig/net/sched/sch_generic.c linux-4.1.10/net/sched/sch_generic.c
+--- linux-4.1.10.orig/net/sched/sch_generic.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/net/sched/sch_generic.c 2015-10-07 18:00:08.000000000 +0200
@@ -894,7 +894,7 @@
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list)
@@ -26481,9 +70270,9 @@ diff -Nur linux-4.1.6.orig/net/sched/sch_generic.c linux-4.1.6/net/sched/sch_gen
}
void dev_deactivate(struct net_device *dev)
-diff -Nur linux-4.1.6.orig/net/sunrpc/svc_xprt.c linux-4.1.6/net/sunrpc/svc_xprt.c
---- linux-4.1.6.orig/net/sunrpc/svc_xprt.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/net/sunrpc/svc_xprt.c 2015-09-08 23:49:08.465827470 +0200
+diff -Nur linux-4.1.10.orig/net/sunrpc/svc_xprt.c linux-4.1.10/net/sunrpc/svc_xprt.c
+--- linux-4.1.10.orig/net/sunrpc/svc_xprt.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/net/sunrpc/svc_xprt.c 2015-10-07 18:00:08.000000000 +0200
@@ -341,7 +341,7 @@
goto out;
}
@@ -26511,9 +70300,9 @@ diff -Nur linux-4.1.6.orig/net/sunrpc/svc_xprt.c linux-4.1.6/net/sunrpc/svc_xprt
out:
trace_svc_xprt_do_enqueue(xprt, rqstp);
}
-diff -Nur linux-4.1.6.orig/scripts/mkcompile_h linux-4.1.6/scripts/mkcompile_h
---- linux-4.1.6.orig/scripts/mkcompile_h 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/scripts/mkcompile_h 2015-09-08 23:49:08.465827470 +0200
+diff -Nur linux-4.1.10.orig/scripts/mkcompile_h linux-4.1.10/scripts/mkcompile_h
+--- linux-4.1.10.orig/scripts/mkcompile_h 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/scripts/mkcompile_h 2015-10-07 18:00:08.000000000 +0200
@@ -4,7 +4,8 @@
ARCH=$2
SMP=$3
@@ -26532,9 +70321,9 @@ diff -Nur linux-4.1.6.orig/scripts/mkcompile_h linux-4.1.6/scripts/mkcompile_h
UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
# Truncate to maximum length
-diff -Nur linux-4.1.6.orig/sound/core/pcm_native.c linux-4.1.6/sound/core/pcm_native.c
---- linux-4.1.6.orig/sound/core/pcm_native.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/sound/core/pcm_native.c 2015-09-08 23:49:08.465827470 +0200
+diff -Nur linux-4.1.10.orig/sound/core/pcm_native.c linux-4.1.10/sound/core/pcm_native.c
+--- linux-4.1.10.orig/sound/core/pcm_native.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/sound/core/pcm_native.c 2015-10-07 18:00:08.000000000 +0200
@@ -123,7 +123,7 @@
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
{
@@ -26571,9 +70360,9 @@ diff -Nur linux-4.1.6.orig/sound/core/pcm_native.c linux-4.1.6/sound/core/pcm_na
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
-diff -Nur linux-4.1.6.orig/sound/soc/intel/atom/sst/sst.c linux-4.1.6/sound/soc/intel/atom/sst/sst.c
---- linux-4.1.6.orig/sound/soc/intel/atom/sst/sst.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/sound/soc/intel/atom/sst/sst.c 2015-09-08 23:49:08.465827470 +0200
+diff -Nur linux-4.1.10.orig/sound/soc/intel/atom/sst/sst.c linux-4.1.10/sound/soc/intel/atom/sst/sst.c
+--- linux-4.1.10.orig/sound/soc/intel/atom/sst/sst.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/sound/soc/intel/atom/sst/sst.c 2015-10-07 18:00:08.000000000 +0200
@@ -368,8 +368,8 @@
* initialize by FW or driver when firmware is loaded
*/
@@ -26585,9 +70374,9 @@ diff -Nur linux-4.1.6.orig/sound/soc/intel/atom/sst/sst.c linux-4.1.6/sound/soc/
spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
}
-diff -Nur linux-4.1.6.orig/virt/kvm/async_pf.c linux-4.1.6/virt/kvm/async_pf.c
---- linux-4.1.6.orig/virt/kvm/async_pf.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/virt/kvm/async_pf.c 2015-09-08 23:49:08.465827470 +0200
+diff -Nur linux-4.1.10.orig/virt/kvm/async_pf.c linux-4.1.10/virt/kvm/async_pf.c
+--- linux-4.1.10.orig/virt/kvm/async_pf.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/virt/kvm/async_pf.c 2015-10-07 18:00:08.000000000 +0200
@@ -94,8 +94,8 @@
trace_kvm_async_pf_completed(addr, gva);
@@ -26599,9 +70388,9 @@ diff -Nur linux-4.1.6.orig/virt/kvm/async_pf.c linux-4.1.6/virt/kvm/async_pf.c
mmput(mm);
kvm_put_kvm(vcpu->kvm);
-diff -Nur linux-4.1.6.orig/virt/kvm/kvm_main.c linux-4.1.6/virt/kvm/kvm_main.c
---- linux-4.1.6.orig/virt/kvm/kvm_main.c 2015-08-17 05:52:51.000000000 +0200
-+++ linux-4.1.6/virt/kvm/kvm_main.c 2015-09-08 23:49:08.465827470 +0200
+diff -Nur linux-4.1.10.orig/virt/kvm/kvm_main.c linux-4.1.10/virt/kvm/kvm_main.c
+--- linux-4.1.10.orig/virt/kvm/kvm_main.c 2015-10-03 13:49:38.000000000 +0200
++++ linux-4.1.10/virt/kvm/kvm_main.c 2015-10-07 18:00:08.000000000 +0200
@@ -218,7 +218,7 @@
vcpu->kvm = kvm;
vcpu->vcpu_id = id;