summaryrefslogtreecommitdiff
path: root/target/linux/patches/3.18.9
diff options
context:
space:
mode:
authorWaldemar Brodkorb <wbx@openadk.org>2015-03-15 16:21:24 -0500
committerWaldemar Brodkorb <wbx@openadk.org>2015-03-15 16:58:33 -0500
commit6dad6b4c802f6b1082dc7b7ff6dac0ac4f80bb40 (patch)
tree4cc8e48b7af71f321994d540c4efc2ee20c0c788 /target/linux/patches/3.18.9
parentd6a728822ca8b1e3cd3ab741e8cc11d15efc6041 (diff)
convert addon patch to default patch
Diffstat (limited to 'target/linux/patches/3.18.9')
-rw-r--r--target/linux/patches/3.18.9/realtime.patch (renamed from target/linux/patches/3.18.9/patch-linuxrt)17140
1 files changed, 16058 insertions, 1082 deletions
diff --git a/target/linux/patches/3.18.9/patch-linuxrt b/target/linux/patches/3.18.9/realtime.patch
index 444acdc6c..3d8984076 100644
--- a/target/linux/patches/3.18.9/patch-linuxrt
+++ b/target/linux/patches/3.18.9/realtime.patch
@@ -1,6 +1,6 @@
-diff -Nur linux-3.18.8.orig/arch/alpha/mm/fault.c linux-3.18.8/arch/alpha/mm/fault.c
---- linux-3.18.8.orig/arch/alpha/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/alpha/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/alpha/mm/fault.c linux-3.18.9/arch/alpha/mm/fault.c
+--- linux-3.18.9.orig/arch/alpha/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/alpha/mm/fault.c 2015-03-15 16:03:03.672094877 -0500
@@ -107,7 +107,7 @@
/* If we're in an interrupt context, or have no user context,
@@ -10,9 +10,9 @@ diff -Nur linux-3.18.8.orig/arch/alpha/mm/fault.c linux-3.18.8/arch/alpha/mm/fau
goto no_context;
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
-diff -Nur linux-3.18.8.orig/arch/arm/include/asm/cmpxchg.h linux-3.18.8/arch/arm/include/asm/cmpxchg.h
---- linux-3.18.8.orig/arch/arm/include/asm/cmpxchg.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/include/asm/cmpxchg.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/include/asm/cmpxchg.h linux-3.18.9/arch/arm/include/asm/cmpxchg.h
+--- linux-3.18.9.orig/arch/arm/include/asm/cmpxchg.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/include/asm/cmpxchg.h 2015-03-15 16:03:03.672094877 -0500
@@ -129,6 +129,8 @@
#else /* min ARCH >= ARMv6 */
@@ -22,9 +22,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/include/asm/cmpxchg.h linux-3.18.8/arch/arm
extern void __bad_cmpxchg(volatile void *ptr, int size);
/*
-diff -Nur linux-3.18.8.orig/arch/arm/include/asm/futex.h linux-3.18.8/arch/arm/include/asm/futex.h
---- linux-3.18.8.orig/arch/arm/include/asm/futex.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/include/asm/futex.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/include/asm/futex.h linux-3.18.9/arch/arm/include/asm/futex.h
+--- linux-3.18.9.orig/arch/arm/include/asm/futex.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/include/asm/futex.h 2015-03-15 16:03:03.672094877 -0500
@@ -93,6 +93,8 @@
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
@@ -43,9 +43,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/include/asm/futex.h linux-3.18.8/arch/arm/i
return ret;
}
-diff -Nur linux-3.18.8.orig/arch/arm/include/asm/switch_to.h linux-3.18.8/arch/arm/include/asm/switch_to.h
---- linux-3.18.8.orig/arch/arm/include/asm/switch_to.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/include/asm/switch_to.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/include/asm/switch_to.h linux-3.18.9/arch/arm/include/asm/switch_to.h
+--- linux-3.18.9.orig/arch/arm/include/asm/switch_to.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/include/asm/switch_to.h 2015-03-15 16:03:03.672094877 -0500
@@ -3,6 +3,13 @@
#include <linux/thread_info.h>
@@ -68,9 +68,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/include/asm/switch_to.h linux-3.18.8/arch/a
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
-diff -Nur linux-3.18.8.orig/arch/arm/include/asm/thread_info.h linux-3.18.8/arch/arm/include/asm/thread_info.h
---- linux-3.18.8.orig/arch/arm/include/asm/thread_info.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/include/asm/thread_info.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/include/asm/thread_info.h linux-3.18.9/arch/arm/include/asm/thread_info.h
+--- linux-3.18.9.orig/arch/arm/include/asm/thread_info.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/include/asm/thread_info.h 2015-03-15 16:03:03.672094877 -0500
@@ -51,6 +51,7 @@
struct thread_info {
unsigned long flags; /* low level flags */
@@ -95,9 +95,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/include/asm/thread_info.h linux-3.18.8/arch
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-diff -Nur linux-3.18.8.orig/arch/arm/Kconfig linux-3.18.8/arch/arm/Kconfig
---- linux-3.18.8.orig/arch/arm/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/Kconfig 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/Kconfig linux-3.18.9/arch/arm/Kconfig
+--- linux-3.18.9.orig/arch/arm/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/Kconfig 2015-03-15 16:03:03.672094877 -0500
@@ -62,6 +62,7 @@
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -106,9 +106,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/Kconfig linux-3.18.8/arch/arm/Kconfig
select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
-diff -Nur linux-3.18.8.orig/arch/arm/kernel/asm-offsets.c linux-3.18.8/arch/arm/kernel/asm-offsets.c
---- linux-3.18.8.orig/arch/arm/kernel/asm-offsets.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/kernel/asm-offsets.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/kernel/asm-offsets.c linux-3.18.9/arch/arm/kernel/asm-offsets.c
+--- linux-3.18.9.orig/arch/arm/kernel/asm-offsets.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/kernel/asm-offsets.c 2015-03-15 16:03:03.672094877 -0500
@@ -64,6 +64,7 @@
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
@@ -117,9 +117,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/kernel/asm-offsets.c linux-3.18.8/arch/arm/
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
-diff -Nur linux-3.18.8.orig/arch/arm/kernel/entry-armv.S linux-3.18.8/arch/arm/kernel/entry-armv.S
---- linux-3.18.8.orig/arch/arm/kernel/entry-armv.S 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/kernel/entry-armv.S 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/kernel/entry-armv.S linux-3.18.9/arch/arm/kernel/entry-armv.S
+--- linux-3.18.9.orig/arch/arm/kernel/entry-armv.S 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/kernel/entry-armv.S 2015-03-15 16:03:03.672094877 -0500
@@ -207,11 +207,18 @@
#ifdef CONFIG_PREEMPT
get_thread_info tsk
@@ -150,9 +150,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/kernel/entry-armv.S linux-3.18.8/arch/arm/k
reteq r8 @ go again
b 1b
#endif
-diff -Nur linux-3.18.8.orig/arch/arm/kernel/process.c linux-3.18.8/arch/arm/kernel/process.c
---- linux-3.18.8.orig/arch/arm/kernel/process.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/kernel/process.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/kernel/process.c linux-3.18.9/arch/arm/kernel/process.c
+--- linux-3.18.9.orig/arch/arm/kernel/process.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/kernel/process.c 2015-03-15 16:03:03.672094877 -0500
@@ -431,6 +431,30 @@
}
@@ -184,9 +184,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/kernel/process.c linux-3.18.8/arch/arm/kern
#ifdef CONFIG_KUSER_HELPERS
/*
* The vectors page is always readable from user space for the
-diff -Nur linux-3.18.8.orig/arch/arm/kernel/signal.c linux-3.18.8/arch/arm/kernel/signal.c
---- linux-3.18.8.orig/arch/arm/kernel/signal.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/kernel/signal.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/kernel/signal.c linux-3.18.9/arch/arm/kernel/signal.c
+--- linux-3.18.9.orig/arch/arm/kernel/signal.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/kernel/signal.c 2015-03-15 16:03:03.672094877 -0500
@@ -574,7 +574,8 @@
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
@@ -197,9 +197,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/kernel/signal.c linux-3.18.8/arch/arm/kerne
schedule();
} else {
if (unlikely(!user_mode(regs)))
-diff -Nur linux-3.18.8.orig/arch/arm/kernel/smp.c linux-3.18.8/arch/arm/kernel/smp.c
---- linux-3.18.8.orig/arch/arm/kernel/smp.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/kernel/smp.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/kernel/smp.c linux-3.18.9/arch/arm/kernel/smp.c
+--- linux-3.18.9.orig/arch/arm/kernel/smp.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/kernel/smp.c 2015-03-15 16:03:03.672094877 -0500
@@ -506,12 +506,14 @@
}
@@ -215,9 +215,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/kernel/smp.c linux-3.18.8/arch/arm/kernel/s
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
-diff -Nur linux-3.18.8.orig/arch/arm/kernel/unwind.c linux-3.18.8/arch/arm/kernel/unwind.c
---- linux-3.18.8.orig/arch/arm/kernel/unwind.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/kernel/unwind.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/kernel/unwind.c linux-3.18.9/arch/arm/kernel/unwind.c
+--- linux-3.18.9.orig/arch/arm/kernel/unwind.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/kernel/unwind.c 2015-03-15 16:03:03.672094877 -0500
@@ -93,7 +93,7 @@
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
@@ -269,9 +269,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/kernel/unwind.c linux-3.18.8/arch/arm/kerne
kfree(tab);
}
-diff -Nur linux-3.18.8.orig/arch/arm/mach-at91/at91rm9200_time.c linux-3.18.8/arch/arm/mach-at91/at91rm9200_time.c
---- linux-3.18.8.orig/arch/arm/mach-at91/at91rm9200_time.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/mach-at91/at91rm9200_time.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/mach-at91/at91rm9200_time.c linux-3.18.9/arch/arm/mach-at91/at91rm9200_time.c
+--- linux-3.18.9.orig/arch/arm/mach-at91/at91rm9200_time.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/mach-at91/at91rm9200_time.c 2015-03-15 16:03:03.672094877 -0500
@@ -135,6 +135,7 @@
break;
case CLOCK_EVT_MODE_SHUTDOWN:
@@ -280,9 +280,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/mach-at91/at91rm9200_time.c linux-3.18.8/ar
case CLOCK_EVT_MODE_RESUME:
irqmask = 0;
break;
-diff -Nur linux-3.18.8.orig/arch/arm/mach-exynos/platsmp.c linux-3.18.8/arch/arm/mach-exynos/platsmp.c
---- linux-3.18.8.orig/arch/arm/mach-exynos/platsmp.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/mach-exynos/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/mach-exynos/platsmp.c linux-3.18.9/arch/arm/mach-exynos/platsmp.c
+--- linux-3.18.9.orig/arch/arm/mach-exynos/platsmp.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/mach-exynos/platsmp.c 2015-03-15 16:03:03.672094877 -0500
@@ -137,7 +137,7 @@
return (void __iomem *)(S5P_VA_SCU);
}
@@ -330,9 +330,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/mach-exynos/platsmp.c linux-3.18.8/arch/arm
return pen_release != -1 ? ret : 0;
}
-diff -Nur linux-3.18.8.orig/arch/arm/mach-hisi/platmcpm.c linux-3.18.8/arch/arm/mach-hisi/platmcpm.c
---- linux-3.18.8.orig/arch/arm/mach-hisi/platmcpm.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/mach-hisi/platmcpm.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/mach-hisi/platmcpm.c linux-3.18.9/arch/arm/mach-hisi/platmcpm.c
+--- linux-3.18.9.orig/arch/arm/mach-hisi/platmcpm.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/mach-hisi/platmcpm.c 2015-03-15 16:03:03.672094877 -0500
@@ -57,7 +57,7 @@
static void __iomem *sysctrl, *fabric;
@@ -435,9 +435,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/mach-hisi/platmcpm.c linux-3.18.8/arch/arm/
}
static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level)
-diff -Nur linux-3.18.8.orig/arch/arm/mach-omap2/omap-smp.c linux-3.18.8/arch/arm/mach-omap2/omap-smp.c
---- linux-3.18.8.orig/arch/arm/mach-omap2/omap-smp.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/mach-omap2/omap-smp.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/mach-omap2/omap-smp.c linux-3.18.9/arch/arm/mach-omap2/omap-smp.c
+--- linux-3.18.9.orig/arch/arm/mach-omap2/omap-smp.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/mach-omap2/omap-smp.c 2015-03-15 16:03:03.672094877 -0500
@@ -43,7 +43,7 @@
/* SCU base address */
static void __iomem *scu_base;
@@ -476,9 +476,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/mach-omap2/omap-smp.c linux-3.18.8/arch/arm
return 0;
}
-diff -Nur linux-3.18.8.orig/arch/arm/mach-prima2/platsmp.c linux-3.18.8/arch/arm/mach-prima2/platsmp.c
---- linux-3.18.8.orig/arch/arm/mach-prima2/platsmp.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/mach-prima2/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/mach-prima2/platsmp.c linux-3.18.9/arch/arm/mach-prima2/platsmp.c
+--- linux-3.18.9.orig/arch/arm/mach-prima2/platsmp.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/mach-prima2/platsmp.c 2015-03-15 16:03:03.676094877 -0500
@@ -23,7 +23,7 @@
static void __iomem *scu_base;
static void __iomem *rsc_base;
@@ -517,9 +517,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/mach-prima2/platsmp.c linux-3.18.8/arch/arm
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-3.18.8.orig/arch/arm/mach-qcom/platsmp.c linux-3.18.8/arch/arm/mach-qcom/platsmp.c
---- linux-3.18.8.orig/arch/arm/mach-qcom/platsmp.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/mach-qcom/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/mach-qcom/platsmp.c linux-3.18.9/arch/arm/mach-qcom/platsmp.c
+--- linux-3.18.9.orig/arch/arm/mach-qcom/platsmp.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/mach-qcom/platsmp.c 2015-03-15 16:03:03.676094877 -0500
@@ -46,7 +46,7 @@
extern void secondary_startup(void);
@@ -558,9 +558,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/mach-qcom/platsmp.c linux-3.18.8/arch/arm/m
return ret;
}
-diff -Nur linux-3.18.8.orig/arch/arm/mach-spear/platsmp.c linux-3.18.8/arch/arm/mach-spear/platsmp.c
---- linux-3.18.8.orig/arch/arm/mach-spear/platsmp.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/mach-spear/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/mach-spear/platsmp.c linux-3.18.9/arch/arm/mach-spear/platsmp.c
+--- linux-3.18.9.orig/arch/arm/mach-spear/platsmp.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/mach-spear/platsmp.c 2015-03-15 16:03:03.676094877 -0500
@@ -32,7 +32,7 @@
sync_cache_w(&pen_release);
}
@@ -599,9 +599,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/mach-spear/platsmp.c linux-3.18.8/arch/arm/
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-3.18.8.orig/arch/arm/mach-sti/platsmp.c linux-3.18.8/arch/arm/mach-sti/platsmp.c
---- linux-3.18.8.orig/arch/arm/mach-sti/platsmp.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/mach-sti/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/mach-sti/platsmp.c linux-3.18.9/arch/arm/mach-sti/platsmp.c
+--- linux-3.18.9.orig/arch/arm/mach-sti/platsmp.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/mach-sti/platsmp.c 2015-03-15 16:03:03.676094877 -0500
@@ -34,7 +34,7 @@
sync_cache_w(&pen_release);
}
@@ -640,9 +640,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/mach-sti/platsmp.c linux-3.18.8/arch/arm/ma
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-3.18.8.orig/arch/arm/mach-ux500/platsmp.c linux-3.18.8/arch/arm/mach-ux500/platsmp.c
---- linux-3.18.8.orig/arch/arm/mach-ux500/platsmp.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/mach-ux500/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/mach-ux500/platsmp.c linux-3.18.9/arch/arm/mach-ux500/platsmp.c
+--- linux-3.18.9.orig/arch/arm/mach-ux500/platsmp.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/mach-ux500/platsmp.c 2015-03-15 16:03:03.676094877 -0500
@@ -51,7 +51,7 @@
return NULL;
}
@@ -681,9 +681,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/mach-ux500/platsmp.c linux-3.18.8/arch/arm/
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-3.18.8.orig/arch/arm/mm/fault.c linux-3.18.8/arch/arm/mm/fault.c
---- linux-3.18.8.orig/arch/arm/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/mm/fault.c linux-3.18.9/arch/arm/mm/fault.c
+--- linux-3.18.9.orig/arch/arm/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/mm/fault.c 2015-03-15 16:03:03.676094877 -0500
@@ -277,7 +277,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -713,9 +713,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/mm/fault.c linux-3.18.8/arch/arm/mm/fault.c
do_bad_area(addr, fsr, regs);
return 0;
}
-diff -Nur linux-3.18.8.orig/arch/arm/mm/highmem.c linux-3.18.8/arch/arm/mm/highmem.c
---- linux-3.18.8.orig/arch/arm/mm/highmem.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/mm/highmem.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/mm/highmem.c linux-3.18.9/arch/arm/mm/highmem.c
+--- linux-3.18.9.orig/arch/arm/mm/highmem.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/mm/highmem.c 2015-03-15 16:03:03.676094877 -0500
@@ -53,6 +53,7 @@
void *kmap_atomic(struct page *page)
@@ -802,9 +802,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/mm/highmem.c linux-3.18.8/arch/arm/mm/highm
+ }
+}
+#endif
-diff -Nur linux-3.18.8.orig/arch/arm/plat-versatile/platsmp.c linux-3.18.8/arch/arm/plat-versatile/platsmp.c
---- linux-3.18.8.orig/arch/arm/plat-versatile/platsmp.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm/plat-versatile/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm/plat-versatile/platsmp.c linux-3.18.9/arch/arm/plat-versatile/platsmp.c
+--- linux-3.18.9.orig/arch/arm/plat-versatile/platsmp.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm/plat-versatile/platsmp.c 2015-03-15 16:03:03.676094877 -0500
@@ -30,7 +30,7 @@
sync_cache_w(&pen_release);
}
@@ -843,9 +843,9 @@ diff -Nur linux-3.18.8.orig/arch/arm/plat-versatile/platsmp.c linux-3.18.8/arch/
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-3.18.8.orig/arch/arm64/kernel/smp.c linux-3.18.8/arch/arm64/kernel/smp.c
---- linux-3.18.8.orig/arch/arm64/kernel/smp.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/arm64/kernel/smp.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/arm64/kernel/smp.c linux-3.18.9/arch/arm64/kernel/smp.c
+--- linux-3.18.9.orig/arch/arm64/kernel/smp.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/arm64/kernel/smp.c 2015-03-15 16:03:03.676094877 -0500
@@ -529,12 +529,14 @@
}
@@ -861,9 +861,9 @@ diff -Nur linux-3.18.8.orig/arch/arm64/kernel/smp.c linux-3.18.8/arch/arm64/kern
static DEFINE_RAW_SPINLOCK(stop_lock);
-diff -Nur linux-3.18.8.orig/arch/avr32/mm/fault.c linux-3.18.8/arch/avr32/mm/fault.c
---- linux-3.18.8.orig/arch/avr32/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/avr32/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/avr32/mm/fault.c linux-3.18.9/arch/avr32/mm/fault.c
+--- linux-3.18.9.orig/arch/avr32/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/avr32/mm/fault.c 2015-03-15 16:03:03.676094877 -0500
@@ -81,7 +81,7 @@
* If we're in an interrupt or have no user context, we must
* not take the fault...
@@ -873,9 +873,9 @@ diff -Nur linux-3.18.8.orig/arch/avr32/mm/fault.c linux-3.18.8/arch/avr32/mm/fau
goto no_context;
local_irq_enable();
-diff -Nur linux-3.18.8.orig/arch/cris/mm/fault.c linux-3.18.8/arch/cris/mm/fault.c
---- linux-3.18.8.orig/arch/cris/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/cris/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/cris/mm/fault.c linux-3.18.9/arch/cris/mm/fault.c
+--- linux-3.18.9.orig/arch/cris/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/cris/mm/fault.c 2015-03-15 16:03:03.676094877 -0500
@@ -113,7 +113,7 @@
* user context, we must not take the fault.
*/
@@ -885,9 +885,9 @@ diff -Nur linux-3.18.8.orig/arch/cris/mm/fault.c linux-3.18.8/arch/cris/mm/fault
goto no_context;
if (user_mode(regs))
-diff -Nur linux-3.18.8.orig/arch/frv/mm/fault.c linux-3.18.8/arch/frv/mm/fault.c
---- linux-3.18.8.orig/arch/frv/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/frv/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/frv/mm/fault.c linux-3.18.9/arch/frv/mm/fault.c
+--- linux-3.18.9.orig/arch/frv/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/frv/mm/fault.c 2015-03-15 16:03:03.676094877 -0500
@@ -78,7 +78,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -897,9 +897,9 @@ diff -Nur linux-3.18.8.orig/arch/frv/mm/fault.c linux-3.18.8/arch/frv/mm/fault.c
goto no_context;
if (user_mode(__frame))
-diff -Nur linux-3.18.8.orig/arch/ia64/mm/fault.c linux-3.18.8/arch/ia64/mm/fault.c
---- linux-3.18.8.orig/arch/ia64/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/ia64/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/ia64/mm/fault.c linux-3.18.9/arch/ia64/mm/fault.c
+--- linux-3.18.9.orig/arch/ia64/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/ia64/mm/fault.c 2015-03-15 16:03:03.676094877 -0500
@@ -96,7 +96,7 @@
/*
* If we're in an interrupt or have no user context, we must not take the fault..
@@ -909,9 +909,9 @@ diff -Nur linux-3.18.8.orig/arch/ia64/mm/fault.c linux-3.18.8/arch/ia64/mm/fault
goto no_context;
#ifdef CONFIG_VIRTUAL_MEM_MAP
-diff -Nur linux-3.18.8.orig/arch/Kconfig linux-3.18.8/arch/Kconfig
---- linux-3.18.8.orig/arch/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/Kconfig 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/Kconfig linux-3.18.9/arch/Kconfig
+--- linux-3.18.9.orig/arch/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/Kconfig 2015-03-15 16:03:03.676094877 -0500
@@ -6,6 +6,7 @@
tristate "OProfile system profiling"
depends on PROFILING
@@ -920,9 +920,9 @@ diff -Nur linux-3.18.8.orig/arch/Kconfig linux-3.18.8/arch/Kconfig
select RING_BUFFER
select RING_BUFFER_ALLOW_SWAP
help
-diff -Nur linux-3.18.8.orig/arch/m32r/mm/fault.c linux-3.18.8/arch/m32r/mm/fault.c
---- linux-3.18.8.orig/arch/m32r/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/m32r/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/m32r/mm/fault.c linux-3.18.9/arch/m32r/mm/fault.c
+--- linux-3.18.9.orig/arch/m32r/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/m32r/mm/fault.c 2015-03-15 16:03:03.676094877 -0500
@@ -114,7 +114,7 @@
* If we're in an interrupt or have no user context or are running in an
* atomic region then we must not take the fault..
@@ -932,9 +932,9 @@ diff -Nur linux-3.18.8.orig/arch/m32r/mm/fault.c linux-3.18.8/arch/m32r/mm/fault
goto bad_area_nosemaphore;
if (error_code & ACE_USERMODE)
-diff -Nur linux-3.18.8.orig/arch/m68k/mm/fault.c linux-3.18.8/arch/m68k/mm/fault.c
---- linux-3.18.8.orig/arch/m68k/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/m68k/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/m68k/mm/fault.c linux-3.18.9/arch/m68k/mm/fault.c
+--- linux-3.18.9.orig/arch/m68k/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/m68k/mm/fault.c 2015-03-15 16:03:03.676094877 -0500
@@ -81,7 +81,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -944,9 +944,9 @@ diff -Nur linux-3.18.8.orig/arch/m68k/mm/fault.c linux-3.18.8/arch/m68k/mm/fault
goto no_context;
if (user_mode(regs))
-diff -Nur linux-3.18.8.orig/arch/microblaze/mm/fault.c linux-3.18.8/arch/microblaze/mm/fault.c
---- linux-3.18.8.orig/arch/microblaze/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/microblaze/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/microblaze/mm/fault.c linux-3.18.9/arch/microblaze/mm/fault.c
+--- linux-3.18.9.orig/arch/microblaze/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/microblaze/mm/fault.c 2015-03-15 16:03:03.676094877 -0500
@@ -107,7 +107,7 @@
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
is_write = 0;
@@ -956,9 +956,9 @@ diff -Nur linux-3.18.8.orig/arch/microblaze/mm/fault.c linux-3.18.8/arch/microbl
if (kernel_mode(regs))
goto bad_area_nosemaphore;
-diff -Nur linux-3.18.8.orig/arch/mips/Kconfig linux-3.18.8/arch/mips/Kconfig
---- linux-3.18.8.orig/arch/mips/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/mips/Kconfig 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/mips/Kconfig linux-3.18.9/arch/mips/Kconfig
+--- linux-3.18.9.orig/arch/mips/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/mips/Kconfig 2015-03-15 16:03:03.676094877 -0500
@@ -2196,7 +2196,7 @@
#
config HIGHMEM
@@ -968,9 +968,9 @@ diff -Nur linux-3.18.8.orig/arch/mips/Kconfig linux-3.18.8/arch/mips/Kconfig
config CPU_SUPPORTS_HIGHMEM
bool
-diff -Nur linux-3.18.8.orig/arch/mips/kernel/signal.c linux-3.18.8/arch/mips/kernel/signal.c
---- linux-3.18.8.orig/arch/mips/kernel/signal.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/mips/kernel/signal.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/mips/kernel/signal.c linux-3.18.9/arch/mips/kernel/signal.c
+--- linux-3.18.9.orig/arch/mips/kernel/signal.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/mips/kernel/signal.c 2015-03-15 16:03:03.676094877 -0500
@@ -613,6 +613,7 @@
__u32 thread_info_flags)
{
@@ -979,9 +979,9 @@ diff -Nur linux-3.18.8.orig/arch/mips/kernel/signal.c linux-3.18.8/arch/mips/ker
user_exit();
-diff -Nur linux-3.18.8.orig/arch/mips/mm/fault.c linux-3.18.8/arch/mips/mm/fault.c
---- linux-3.18.8.orig/arch/mips/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/mips/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/mips/mm/fault.c linux-3.18.9/arch/mips/mm/fault.c
+--- linux-3.18.9.orig/arch/mips/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/mips/mm/fault.c 2015-03-15 16:03:03.676094877 -0500
@@ -89,7 +89,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -991,9 +991,9 @@ diff -Nur linux-3.18.8.orig/arch/mips/mm/fault.c linux-3.18.8/arch/mips/mm/fault
goto bad_area_nosemaphore;
if (user_mode(regs))
-diff -Nur linux-3.18.8.orig/arch/mips/mm/init.c linux-3.18.8/arch/mips/mm/init.c
---- linux-3.18.8.orig/arch/mips/mm/init.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/mips/mm/init.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/mips/mm/init.c linux-3.18.9/arch/mips/mm/init.c
+--- linux-3.18.9.orig/arch/mips/mm/init.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/mips/mm/init.c 2015-03-15 16:03:03.676094877 -0500
@@ -90,7 +90,7 @@
BUG_ON(Page_dcache_dirty(page));
@@ -1012,9 +1012,9 @@ diff -Nur linux-3.18.8.orig/arch/mips/mm/init.c linux-3.18.8/arch/mips/mm/init.c
}
void copy_user_highpage(struct page *to, struct page *from,
-diff -Nur linux-3.18.8.orig/arch/mn10300/mm/fault.c linux-3.18.8/arch/mn10300/mm/fault.c
---- linux-3.18.8.orig/arch/mn10300/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/mn10300/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/mn10300/mm/fault.c linux-3.18.9/arch/mn10300/mm/fault.c
+--- linux-3.18.9.orig/arch/mn10300/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/mn10300/mm/fault.c 2015-03-15 16:03:03.676094877 -0500
@@ -168,7 +168,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -1024,9 +1024,9 @@ diff -Nur linux-3.18.8.orig/arch/mn10300/mm/fault.c linux-3.18.8/arch/mn10300/mm
goto no_context;
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
-diff -Nur linux-3.18.8.orig/arch/parisc/mm/fault.c linux-3.18.8/arch/parisc/mm/fault.c
---- linux-3.18.8.orig/arch/parisc/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/parisc/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/parisc/mm/fault.c linux-3.18.9/arch/parisc/mm/fault.c
+--- linux-3.18.9.orig/arch/parisc/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/parisc/mm/fault.c 2015-03-15 16:03:03.676094877 -0500
@@ -207,7 +207,7 @@
int fault;
unsigned int flags;
@@ -1036,9 +1036,9 @@ diff -Nur linux-3.18.8.orig/arch/parisc/mm/fault.c linux-3.18.8/arch/parisc/mm/f
goto no_context;
tsk = current;
-diff -Nur linux-3.18.8.orig/arch/powerpc/include/asm/thread_info.h linux-3.18.8/arch/powerpc/include/asm/thread_info.h
---- linux-3.18.8.orig/arch/powerpc/include/asm/thread_info.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/powerpc/include/asm/thread_info.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/powerpc/include/asm/thread_info.h linux-3.18.9/arch/powerpc/include/asm/thread_info.h
+--- linux-3.18.9.orig/arch/powerpc/include/asm/thread_info.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/powerpc/include/asm/thread_info.h 2015-03-15 16:03:03.676094877 -0500
@@ -43,6 +43,8 @@
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
@@ -1085,9 +1085,9 @@ diff -Nur linux-3.18.8.orig/arch/powerpc/include/asm/thread_info.h linux-3.18.8/
/* Bits in local_flags */
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
-diff -Nur linux-3.18.8.orig/arch/powerpc/Kconfig linux-3.18.8/arch/powerpc/Kconfig
---- linux-3.18.8.orig/arch/powerpc/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/powerpc/Kconfig 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/powerpc/Kconfig linux-3.18.9/arch/powerpc/Kconfig
+--- linux-3.18.9.orig/arch/powerpc/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/powerpc/Kconfig 2015-03-15 16:03:03.680094877 -0500
@@ -60,10 +60,11 @@
config RWSEM_GENERIC_SPINLOCK
@@ -1118,9 +1118,9 @@ diff -Nur linux-3.18.8.orig/arch/powerpc/Kconfig linux-3.18.8/arch/powerpc/Kconf
source kernel/Kconfig.hz
source kernel/Kconfig.preempt
-diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/asm-offsets.c linux-3.18.8/arch/powerpc/kernel/asm-offsets.c
---- linux-3.18.8.orig/arch/powerpc/kernel/asm-offsets.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/powerpc/kernel/asm-offsets.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/powerpc/kernel/asm-offsets.c linux-3.18.9/arch/powerpc/kernel/asm-offsets.c
+--- linux-3.18.9.orig/arch/powerpc/kernel/asm-offsets.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/powerpc/kernel/asm-offsets.c 2015-03-15 16:03:03.680094877 -0500
@@ -159,6 +159,7 @@
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
@@ -1129,9 +1129,9 @@ diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/asm-offsets.c linux-3.18.8/arch/
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/entry_32.S linux-3.18.8/arch/powerpc/kernel/entry_32.S
---- linux-3.18.8.orig/arch/powerpc/kernel/entry_32.S 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/powerpc/kernel/entry_32.S 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/powerpc/kernel/entry_32.S linux-3.18.9/arch/powerpc/kernel/entry_32.S
+--- linux-3.18.9.orig/arch/powerpc/kernel/entry_32.S 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/powerpc/kernel/entry_32.S 2015-03-15 16:03:03.680094877 -0500
@@ -890,7 +890,14 @@
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
@@ -1180,9 +1180,9 @@ diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/entry_32.S linux-3.18.8/arch/pow
bne- do_resched
andi. r0,r9,_TIF_USER_WORK_MASK
beq restore_user
-diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/entry_64.S linux-3.18.8/arch/powerpc/kernel/entry_64.S
---- linux-3.18.8.orig/arch/powerpc/kernel/entry_64.S 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/powerpc/kernel/entry_64.S 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/powerpc/kernel/entry_64.S linux-3.18.9/arch/powerpc/kernel/entry_64.S
+--- linux-3.18.9.orig/arch/powerpc/kernel/entry_64.S 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/powerpc/kernel/entry_64.S 2015-03-15 16:03:03.680094877 -0500
@@ -644,7 +644,7 @@
#else
beq restore
@@ -1221,9 +1221,9 @@ diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/entry_64.S linux-3.18.8/arch/pow
bne 1b
/*
-diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/irq.c linux-3.18.8/arch/powerpc/kernel/irq.c
---- linux-3.18.8.orig/arch/powerpc/kernel/irq.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/powerpc/kernel/irq.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/powerpc/kernel/irq.c linux-3.18.9/arch/powerpc/kernel/irq.c
+--- linux-3.18.9.orig/arch/powerpc/kernel/irq.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/powerpc/kernel/irq.c 2015-03-15 16:03:03.680094877 -0500
@@ -615,6 +615,7 @@
}
}
@@ -1240,9 +1240,9 @@ diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/irq.c linux-3.18.8/arch/powerpc/
irq_hw_number_t virq_to_hw(unsigned int virq)
{
-diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/misc_32.S linux-3.18.8/arch/powerpc/kernel/misc_32.S
---- linux-3.18.8.orig/arch/powerpc/kernel/misc_32.S 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/powerpc/kernel/misc_32.S 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/powerpc/kernel/misc_32.S linux-3.18.9/arch/powerpc/kernel/misc_32.S
+--- linux-3.18.9.orig/arch/powerpc/kernel/misc_32.S 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/powerpc/kernel/misc_32.S 2015-03-15 16:03:03.680094877 -0500
@@ -40,6 +40,7 @@
* We store the saved ksp_limit in the unused part
* of the STACK_FRAME_OVERHEAD
@@ -1259,9 +1259,9 @@ diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/misc_32.S linux-3.18.8/arch/powe
/*
* void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
-diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/misc_64.S linux-3.18.8/arch/powerpc/kernel/misc_64.S
---- linux-3.18.8.orig/arch/powerpc/kernel/misc_64.S 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/powerpc/kernel/misc_64.S 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/powerpc/kernel/misc_64.S linux-3.18.9/arch/powerpc/kernel/misc_64.S
+--- linux-3.18.9.orig/arch/powerpc/kernel/misc_64.S 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/powerpc/kernel/misc_64.S 2015-03-15 16:03:03.680094877 -0500
@@ -29,6 +29,7 @@
.text
@@ -1278,9 +1278,9 @@ diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/misc_64.S linux-3.18.8/arch/powe
_GLOBAL(call_do_irq)
mflr r0
-diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/time.c linux-3.18.8/arch/powerpc/kernel/time.c
---- linux-3.18.8.orig/arch/powerpc/kernel/time.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/powerpc/kernel/time.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/powerpc/kernel/time.c linux-3.18.9/arch/powerpc/kernel/time.c
+--- linux-3.18.9.orig/arch/powerpc/kernel/time.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/powerpc/kernel/time.c 2015-03-15 16:03:03.680094877 -0500
@@ -424,7 +424,7 @@
EXPORT_SYMBOL(profile_pc);
#endif
@@ -1290,9 +1290,9 @@ diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/time.c linux-3.18.8/arch/powerpc
/*
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
-diff -Nur linux-3.18.8.orig/arch/powerpc/mm/fault.c linux-3.18.8/arch/powerpc/mm/fault.c
---- linux-3.18.8.orig/arch/powerpc/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/powerpc/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/powerpc/mm/fault.c linux-3.18.9/arch/powerpc/mm/fault.c
+--- linux-3.18.9.orig/arch/powerpc/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/powerpc/mm/fault.c 2015-03-15 16:03:03.680094877 -0500
@@ -273,7 +273,7 @@
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
@@ -1302,9 +1302,9 @@ diff -Nur linux-3.18.8.orig/arch/powerpc/mm/fault.c linux-3.18.8/arch/powerpc/mm
if (!user_mode(regs)) {
rc = SIGSEGV;
goto bail;
-diff -Nur linux-3.18.8.orig/arch/s390/mm/fault.c linux-3.18.8/arch/s390/mm/fault.c
---- linux-3.18.8.orig/arch/s390/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/s390/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/s390/mm/fault.c linux-3.18.9/arch/s390/mm/fault.c
+--- linux-3.18.9.orig/arch/s390/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/s390/mm/fault.c 2015-03-15 16:03:03.680094877 -0500
@@ -435,7 +435,8 @@
* user context.
*/
@@ -1315,9 +1315,9 @@ diff -Nur linux-3.18.8.orig/arch/s390/mm/fault.c linux-3.18.8/arch/s390/mm/fault
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
-diff -Nur linux-3.18.8.orig/arch/score/mm/fault.c linux-3.18.8/arch/score/mm/fault.c
---- linux-3.18.8.orig/arch/score/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/score/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/score/mm/fault.c linux-3.18.9/arch/score/mm/fault.c
+--- linux-3.18.9.orig/arch/score/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/score/mm/fault.c 2015-03-15 16:03:03.680094877 -0500
@@ -73,7 +73,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -1327,9 +1327,9 @@ diff -Nur linux-3.18.8.orig/arch/score/mm/fault.c linux-3.18.8/arch/score/mm/fau
goto bad_area_nosemaphore;
if (user_mode(regs))
-diff -Nur linux-3.18.8.orig/arch/sh/kernel/irq.c linux-3.18.8/arch/sh/kernel/irq.c
---- linux-3.18.8.orig/arch/sh/kernel/irq.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/sh/kernel/irq.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/sh/kernel/irq.c linux-3.18.9/arch/sh/kernel/irq.c
+--- linux-3.18.9.orig/arch/sh/kernel/irq.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/sh/kernel/irq.c 2015-03-15 16:03:03.680094877 -0500
@@ -149,6 +149,7 @@
hardirq_ctx[cpu] = NULL;
}
@@ -1346,9 +1346,9 @@ diff -Nur linux-3.18.8.orig/arch/sh/kernel/irq.c linux-3.18.8/arch/sh/kernel/irq
#else
static inline void handle_one_irq(unsigned int irq)
{
-diff -Nur linux-3.18.8.orig/arch/sh/mm/fault.c linux-3.18.8/arch/sh/mm/fault.c
---- linux-3.18.8.orig/arch/sh/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/sh/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/sh/mm/fault.c linux-3.18.9/arch/sh/mm/fault.c
+--- linux-3.18.9.orig/arch/sh/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/sh/mm/fault.c 2015-03-15 16:03:03.680094877 -0500
@@ -440,7 +440,7 @@
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
@@ -1358,9 +1358,9 @@ diff -Nur linux-3.18.8.orig/arch/sh/mm/fault.c linux-3.18.8/arch/sh/mm/fault.c
bad_area_nosemaphore(regs, error_code, address);
return;
}
-diff -Nur linux-3.18.8.orig/arch/sparc/Kconfig linux-3.18.8/arch/sparc/Kconfig
---- linux-3.18.8.orig/arch/sparc/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/sparc/Kconfig 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/sparc/Kconfig linux-3.18.9/arch/sparc/Kconfig
+--- linux-3.18.9.orig/arch/sparc/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/sparc/Kconfig 2015-03-15 16:03:03.680094877 -0500
@@ -182,12 +182,10 @@
source kernel/Kconfig.hz
@@ -1387,9 +1387,9 @@ diff -Nur linux-3.18.8.orig/arch/sparc/Kconfig linux-3.18.8/arch/sparc/Kconfig
config COMPAT
bool
depends on SPARC64
-diff -Nur linux-3.18.8.orig/arch/sparc/kernel/irq_64.c linux-3.18.8/arch/sparc/kernel/irq_64.c
---- linux-3.18.8.orig/arch/sparc/kernel/irq_64.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/sparc/kernel/irq_64.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/sparc/kernel/irq_64.c linux-3.18.9/arch/sparc/kernel/irq_64.c
+--- linux-3.18.9.orig/arch/sparc/kernel/irq_64.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/sparc/kernel/irq_64.c 2015-03-15 16:03:03.680094877 -0500
@@ -849,6 +849,7 @@
set_irq_regs(old_regs);
}
@@ -1406,9 +1406,9 @@ diff -Nur linux-3.18.8.orig/arch/sparc/kernel/irq_64.c linux-3.18.8/arch/sparc/k
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
-diff -Nur linux-3.18.8.orig/arch/sparc/kernel/pcr.c linux-3.18.8/arch/sparc/kernel/pcr.c
---- linux-3.18.8.orig/arch/sparc/kernel/pcr.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/sparc/kernel/pcr.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/sparc/kernel/pcr.c linux-3.18.9/arch/sparc/kernel/pcr.c
+--- linux-3.18.9.orig/arch/sparc/kernel/pcr.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/sparc/kernel/pcr.c 2015-03-15 16:03:03.684094876 -0500
@@ -43,10 +43,12 @@
set_irq_regs(old_regs);
}
@@ -1422,9 +1422,9 @@ diff -Nur linux-3.18.8.orig/arch/sparc/kernel/pcr.c linux-3.18.8/arch/sparc/kern
const struct pcr_ops *pcr_ops;
EXPORT_SYMBOL_GPL(pcr_ops);
-diff -Nur linux-3.18.8.orig/arch/sparc/kernel/setup_32.c linux-3.18.8/arch/sparc/kernel/setup_32.c
---- linux-3.18.8.orig/arch/sparc/kernel/setup_32.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/sparc/kernel/setup_32.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/sparc/kernel/setup_32.c linux-3.18.9/arch/sparc/kernel/setup_32.c
+--- linux-3.18.9.orig/arch/sparc/kernel/setup_32.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/sparc/kernel/setup_32.c 2015-03-15 16:03:03.684094876 -0500
@@ -309,6 +309,7 @@
boot_flags_init(*cmdline_p);
@@ -1433,9 +1433,9 @@ diff -Nur linux-3.18.8.orig/arch/sparc/kernel/setup_32.c linux-3.18.8/arch/sparc
register_console(&prom_early_console);
printk("ARCH: ");
-diff -Nur linux-3.18.8.orig/arch/sparc/kernel/setup_64.c linux-3.18.8/arch/sparc/kernel/setup_64.c
---- linux-3.18.8.orig/arch/sparc/kernel/setup_64.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/sparc/kernel/setup_64.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/sparc/kernel/setup_64.c linux-3.18.9/arch/sparc/kernel/setup_64.c
+--- linux-3.18.9.orig/arch/sparc/kernel/setup_64.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/sparc/kernel/setup_64.c 2015-03-15 16:03:03.684094876 -0500
@@ -563,6 +563,12 @@
pause_patch();
}
@@ -1458,9 +1458,9 @@ diff -Nur linux-3.18.8.orig/arch/sparc/kernel/setup_64.c linux-3.18.8/arch/sparc
if (tlb_type == hypervisor)
printk("ARCH: SUN4V\n");
-diff -Nur linux-3.18.8.orig/arch/sparc/mm/fault_32.c linux-3.18.8/arch/sparc/mm/fault_32.c
---- linux-3.18.8.orig/arch/sparc/mm/fault_32.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/sparc/mm/fault_32.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/sparc/mm/fault_32.c linux-3.18.9/arch/sparc/mm/fault_32.c
+--- linux-3.18.9.orig/arch/sparc/mm/fault_32.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/sparc/mm/fault_32.c 2015-03-15 16:03:03.684094876 -0500
@@ -196,7 +196,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -1470,9 +1470,9 @@ diff -Nur linux-3.18.8.orig/arch/sparc/mm/fault_32.c linux-3.18.8/arch/sparc/mm/
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-diff -Nur linux-3.18.8.orig/arch/sparc/mm/fault_64.c linux-3.18.8/arch/sparc/mm/fault_64.c
---- linux-3.18.8.orig/arch/sparc/mm/fault_64.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/sparc/mm/fault_64.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/sparc/mm/fault_64.c linux-3.18.9/arch/sparc/mm/fault_64.c
+--- linux-3.18.9.orig/arch/sparc/mm/fault_64.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/sparc/mm/fault_64.c 2015-03-15 16:03:03.684094876 -0500
@@ -330,7 +330,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -1482,9 +1482,9 @@ diff -Nur linux-3.18.8.orig/arch/sparc/mm/fault_64.c linux-3.18.8/arch/sparc/mm/
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-diff -Nur linux-3.18.8.orig/arch/tile/mm/fault.c linux-3.18.8/arch/tile/mm/fault.c
---- linux-3.18.8.orig/arch/tile/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/tile/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/tile/mm/fault.c linux-3.18.9/arch/tile/mm/fault.c
+--- linux-3.18.9.orig/arch/tile/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/tile/mm/fault.c 2015-03-15 16:03:03.684094876 -0500
@@ -357,7 +357,7 @@
* If we're in an interrupt, have no user context or are running in an
* atomic region then we must not take the fault.
@@ -1494,9 +1494,9 @@ diff -Nur linux-3.18.8.orig/arch/tile/mm/fault.c linux-3.18.8/arch/tile/mm/fault
vma = NULL; /* happy compiler */
goto bad_area_nosemaphore;
}
-diff -Nur linux-3.18.8.orig/arch/um/kernel/trap.c linux-3.18.8/arch/um/kernel/trap.c
---- linux-3.18.8.orig/arch/um/kernel/trap.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/um/kernel/trap.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/um/kernel/trap.c linux-3.18.9/arch/um/kernel/trap.c
+--- linux-3.18.9.orig/arch/um/kernel/trap.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/um/kernel/trap.c 2015-03-15 16:03:03.684094876 -0500
@@ -38,7 +38,7 @@
* If the fault was during atomic operation, don't take the fault, just
* fail.
@@ -1506,9 +1506,9 @@ diff -Nur linux-3.18.8.orig/arch/um/kernel/trap.c linux-3.18.8/arch/um/kernel/tr
goto out_nosemaphore;
if (is_user)
-diff -Nur linux-3.18.8.orig/arch/x86/crypto/aesni-intel_glue.c linux-3.18.8/arch/x86/crypto/aesni-intel_glue.c
---- linux-3.18.8.orig/arch/x86/crypto/aesni-intel_glue.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/crypto/aesni-intel_glue.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/crypto/aesni-intel_glue.c linux-3.18.9/arch/x86/crypto/aesni-intel_glue.c
+--- linux-3.18.9.orig/arch/x86/crypto/aesni-intel_glue.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/crypto/aesni-intel_glue.c 2015-03-15 16:03:03.684094876 -0500
@@ -381,14 +381,14 @@
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -1601,9 +1601,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/crypto/aesni-intel_glue.c linux-3.18.8/arch
return err;
}
-diff -Nur linux-3.18.8.orig/arch/x86/crypto/cast5_avx_glue.c linux-3.18.8/arch/x86/crypto/cast5_avx_glue.c
---- linux-3.18.8.orig/arch/x86/crypto/cast5_avx_glue.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/crypto/cast5_avx_glue.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/crypto/cast5_avx_glue.c linux-3.18.9/arch/x86/crypto/cast5_avx_glue.c
+--- linux-3.18.9.orig/arch/x86/crypto/cast5_avx_glue.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/crypto/cast5_avx_glue.c 2015-03-15 16:03:03.684094876 -0500
@@ -60,7 +60,7 @@
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
bool enc)
@@ -1683,9 +1683,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/crypto/cast5_avx_glue.c linux-3.18.8/arch/x
if (walk.nbytes) {
ctr_crypt_final(desc, &walk);
err = blkcipher_walk_done(desc, &walk, 0);
-diff -Nur linux-3.18.8.orig/arch/x86/crypto/glue_helper.c linux-3.18.8/arch/x86/crypto/glue_helper.c
---- linux-3.18.8.orig/arch/x86/crypto/glue_helper.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/crypto/glue_helper.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/crypto/glue_helper.c linux-3.18.9/arch/x86/crypto/glue_helper.c
+--- linux-3.18.9.orig/arch/x86/crypto/glue_helper.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/crypto/glue_helper.c 2015-03-15 16:03:03.684094876 -0500
@@ -39,7 +39,7 @@
void *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = 128 / 8;
@@ -1801,9 +1801,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/crypto/glue_helper.c linux-3.18.8/arch/x86/
return err;
}
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
-diff -Nur linux-3.18.8.orig/arch/x86/include/asm/preempt.h linux-3.18.8/arch/x86/include/asm/preempt.h
---- linux-3.18.8.orig/arch/x86/include/asm/preempt.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/include/asm/preempt.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/include/asm/preempt.h linux-3.18.9/arch/x86/include/asm/preempt.h
+--- linux-3.18.9.orig/arch/x86/include/asm/preempt.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/include/asm/preempt.h 2015-03-15 16:03:03.684094876 -0500
@@ -85,17 +85,33 @@
* a decrement which hits zero means we have no preempt_count and should
* reschedule.
@@ -1839,9 +1839,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/include/asm/preempt.h linux-3.18.8/arch/x86
}
#ifdef CONFIG_PREEMPT
-diff -Nur linux-3.18.8.orig/arch/x86/include/asm/signal.h linux-3.18.8/arch/x86/include/asm/signal.h
---- linux-3.18.8.orig/arch/x86/include/asm/signal.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/include/asm/signal.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/include/asm/signal.h linux-3.18.9/arch/x86/include/asm/signal.h
+--- linux-3.18.9.orig/arch/x86/include/asm/signal.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/include/asm/signal.h 2015-03-15 16:03:03.684094876 -0500
@@ -23,6 +23,19 @@
unsigned long sig[_NSIG_WORDS];
} sigset_t;
@@ -1862,9 +1862,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/include/asm/signal.h linux-3.18.8/arch/x86/
#ifndef CONFIG_COMPAT
typedef sigset_t compat_sigset_t;
#endif
-diff -Nur linux-3.18.8.orig/arch/x86/include/asm/stackprotector.h linux-3.18.8/arch/x86/include/asm/stackprotector.h
---- linux-3.18.8.orig/arch/x86/include/asm/stackprotector.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/include/asm/stackprotector.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/include/asm/stackprotector.h linux-3.18.9/arch/x86/include/asm/stackprotector.h
+--- linux-3.18.9.orig/arch/x86/include/asm/stackprotector.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/include/asm/stackprotector.h 2015-03-15 16:03:03.684094876 -0500
@@ -57,7 +57,7 @@
*/
static __always_inline void boot_init_stack_canary(void)
@@ -1891,9 +1891,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/include/asm/stackprotector.h linux-3.18.8/a
tsc = __native_read_tsc();
canary += tsc + (tsc << 32UL);
-diff -Nur linux-3.18.8.orig/arch/x86/include/asm/thread_info.h linux-3.18.8/arch/x86/include/asm/thread_info.h
---- linux-3.18.8.orig/arch/x86/include/asm/thread_info.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/include/asm/thread_info.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/include/asm/thread_info.h linux-3.18.9/arch/x86/include/asm/thread_info.h
+--- linux-3.18.9.orig/arch/x86/include/asm/thread_info.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/include/asm/thread_info.h 2015-03-15 16:03:03.684094876 -0500
@@ -30,6 +30,8 @@
__u32 status; /* thread synchronous flags */
__u32 cpu; /* current CPU */
@@ -1928,9 +1928,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/include/asm/thread_info.h linux-3.18.8/arch
#define STACK_WARN (THREAD_SIZE/8)
#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
-diff -Nur linux-3.18.8.orig/arch/x86/include/asm/uv/uv_bau.h linux-3.18.8/arch/x86/include/asm/uv/uv_bau.h
---- linux-3.18.8.orig/arch/x86/include/asm/uv/uv_bau.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/include/asm/uv/uv_bau.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/include/asm/uv/uv_bau.h linux-3.18.9/arch/x86/include/asm/uv/uv_bau.h
+--- linux-3.18.9.orig/arch/x86/include/asm/uv/uv_bau.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/include/asm/uv/uv_bau.h 2015-03-15 16:03:03.684094876 -0500
@@ -615,9 +615,9 @@
cycles_t send_message;
cycles_t period_end;
@@ -1964,9 +1964,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/include/asm/uv/uv_bau.h linux-3.18.8/arch/x
return 1;
}
-diff -Nur linux-3.18.8.orig/arch/x86/include/asm/uv/uv_hub.h linux-3.18.8/arch/x86/include/asm/uv/uv_hub.h
---- linux-3.18.8.orig/arch/x86/include/asm/uv/uv_hub.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/include/asm/uv/uv_hub.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/include/asm/uv/uv_hub.h linux-3.18.9/arch/x86/include/asm/uv/uv_hub.h
+--- linux-3.18.9.orig/arch/x86/include/asm/uv/uv_hub.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/include/asm/uv/uv_hub.h 2015-03-15 16:03:03.684094876 -0500
@@ -492,7 +492,7 @@
unsigned short nr_online_cpus;
unsigned short pnode;
@@ -1976,9 +1976,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/include/asm/uv/uv_hub.h linux-3.18.8/arch/x
unsigned long nmi_count; /* obsolete, see uv_hub_nmi */
};
extern struct uv_blade_info *uv_blade_info;
-diff -Nur linux-3.18.8.orig/arch/x86/Kconfig linux-3.18.8/arch/x86/Kconfig
---- linux-3.18.8.orig/arch/x86/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/Kconfig 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/Kconfig linux-3.18.9/arch/x86/Kconfig
+--- linux-3.18.9.orig/arch/x86/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/Kconfig 2015-03-15 16:03:03.684094876 -0500
@@ -21,6 +21,7 @@
### Arch settings
config X86
@@ -2009,9 +2009,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/Kconfig linux-3.18.8/arch/x86/Kconfig
---help---
Enable maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
-diff -Nur linux-3.18.8.orig/arch/x86/kernel/apic/io_apic.c linux-3.18.8/arch/x86/kernel/apic/io_apic.c
---- linux-3.18.8.orig/arch/x86/kernel/apic/io_apic.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/kernel/apic/io_apic.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/kernel/apic/io_apic.c linux-3.18.9/arch/x86/kernel/apic/io_apic.c
+--- linux-3.18.9.orig/arch/x86/kernel/apic/io_apic.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/kernel/apic/io_apic.c 2015-03-15 16:03:03.688094875 -0500
@@ -2494,7 +2494,8 @@
static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
{
@@ -2022,9 +2022,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/kernel/apic/io_apic.c linux-3.18.8/arch/x86
mask_ioapic(cfg);
return true;
}
-diff -Nur linux-3.18.8.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-3.18.8/arch/x86/kernel/apic/x2apic_uv_x.c
---- linux-3.18.8.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/kernel/apic/x2apic_uv_x.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-3.18.9/arch/x86/kernel/apic/x2apic_uv_x.c
+--- linux-3.18.9.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/kernel/apic/x2apic_uv_x.c 2015-03-15 16:03:03.688094875 -0500
@@ -918,7 +918,7 @@
uv_blade_info[blade].pnode = pnode;
uv_blade_info[blade].nr_possible_cpus = 0;
@@ -2034,9 +2034,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-3.18.8/arch
min_pnode = min(pnode, min_pnode);
max_pnode = max(pnode, max_pnode);
blade++;
-diff -Nur linux-3.18.8.orig/arch/x86/kernel/asm-offsets.c linux-3.18.8/arch/x86/kernel/asm-offsets.c
---- linux-3.18.8.orig/arch/x86/kernel/asm-offsets.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/kernel/asm-offsets.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/kernel/asm-offsets.c linux-3.18.9/arch/x86/kernel/asm-offsets.c
+--- linux-3.18.9.orig/arch/x86/kernel/asm-offsets.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/kernel/asm-offsets.c 2015-03-15 16:03:03.688094875 -0500
@@ -32,6 +32,7 @@
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_status, thread_info, status);
@@ -2051,9 +2051,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/kernel/asm-offsets.c linux-3.18.8/arch/x86/
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
}
-diff -Nur linux-3.18.8.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-3.18.8/arch/x86/kernel/cpu/mcheck/mce.c
---- linux-3.18.8.orig/arch/x86/kernel/cpu/mcheck/mce.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/kernel/cpu/mcheck/mce.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-3.18.9/arch/x86/kernel/cpu/mcheck/mce.c
+--- linux-3.18.9.orig/arch/x86/kernel/cpu/mcheck/mce.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/kernel/cpu/mcheck/mce.c 2015-03-15 16:03:03.688094875 -0500
@@ -18,6 +18,7 @@
#include <linux/rcupdate.h>
#include <linux/kobject.h>
@@ -2330,9 +2330,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-3.18.8/arch/x
err_register:
unregister_syscore_ops(&mce_syscore_ops);
-diff -Nur linux-3.18.8.orig/arch/x86/kernel/entry_32.S linux-3.18.8/arch/x86/kernel/entry_32.S
---- linux-3.18.8.orig/arch/x86/kernel/entry_32.S 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/kernel/entry_32.S 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/kernel/entry_32.S linux-3.18.9/arch/x86/kernel/entry_32.S
+--- linux-3.18.9.orig/arch/x86/kernel/entry_32.S 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/kernel/entry_32.S 2015-03-15 16:03:03.688094875 -0500
@@ -359,8 +359,24 @@
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
@@ -2376,9 +2376,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/kernel/entry_32.S linux-3.18.8/arch/x86/ker
jnz work_resched
work_notifysig: # deal with pending signals and
-diff -Nur linux-3.18.8.orig/arch/x86/kernel/entry_64.S linux-3.18.8/arch/x86/kernel/entry_64.S
---- linux-3.18.8.orig/arch/x86/kernel/entry_64.S 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/kernel/entry_64.S 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/kernel/entry_64.S linux-3.18.9/arch/x86/kernel/entry_64.S
+--- linux-3.18.9.orig/arch/x86/kernel/entry_64.S 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/kernel/entry_64.S 2015-03-15 16:03:03.688094875 -0500
@@ -451,8 +451,8 @@
/* Handle reschedules */
/* edx: work, edi: workmask */
@@ -2460,9 +2460,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/kernel/entry_64.S linux-3.18.8/arch/x86/ker
jnz paranoid_schedule
movl %ebx,%edx /* arg3: thread flags */
TRACE_IRQS_ON
-diff -Nur linux-3.18.8.orig/arch/x86/kernel/irq_32.c linux-3.18.8/arch/x86/kernel/irq_32.c
---- linux-3.18.8.orig/arch/x86/kernel/irq_32.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/kernel/irq_32.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/kernel/irq_32.c linux-3.18.9/arch/x86/kernel/irq_32.c
+--- linux-3.18.9.orig/arch/x86/kernel/irq_32.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/kernel/irq_32.c 2015-03-15 16:03:03.688094875 -0500
@@ -142,6 +142,7 @@
cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
}
@@ -2479,9 +2479,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/kernel/irq_32.c linux-3.18.8/arch/x86/kerne
bool handle_irq(unsigned irq, struct pt_regs *regs)
{
-diff -Nur linux-3.18.8.orig/arch/x86/kernel/irq_work.c linux-3.18.8/arch/x86/kernel/irq_work.c
---- linux-3.18.8.orig/arch/x86/kernel/irq_work.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/kernel/irq_work.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/kernel/irq_work.c linux-3.18.9/arch/x86/kernel/irq_work.c
+--- linux-3.18.9.orig/arch/x86/kernel/irq_work.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/kernel/irq_work.c 2015-03-15 16:03:03.688094875 -0500
@@ -38,6 +38,7 @@
exiting_irq();
}
@@ -2495,9 +2495,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/kernel/irq_work.c linux-3.18.8/arch/x86/ker
#endif
}
+#endif
-diff -Nur linux-3.18.8.orig/arch/x86/kernel/process_32.c linux-3.18.8/arch/x86/kernel/process_32.c
---- linux-3.18.8.orig/arch/x86/kernel/process_32.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/kernel/process_32.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/kernel/process_32.c linux-3.18.9/arch/x86/kernel/process_32.c
+--- linux-3.18.9.orig/arch/x86/kernel/process_32.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/kernel/process_32.c 2015-03-15 16:03:03.688094875 -0500
@@ -35,6 +35,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
@@ -2551,9 +2551,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/kernel/process_32.c linux-3.18.8/arch/x86/k
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
-diff -Nur linux-3.18.8.orig/arch/x86/kernel/signal.c linux-3.18.8/arch/x86/kernel/signal.c
---- linux-3.18.8.orig/arch/x86/kernel/signal.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/kernel/signal.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/kernel/signal.c linux-3.18.9/arch/x86/kernel/signal.c
+--- linux-3.18.9.orig/arch/x86/kernel/signal.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/kernel/signal.c 2015-03-15 16:03:03.688094875 -0500
@@ -746,6 +746,14 @@
mce_notify_process();
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
@@ -2569,9 +2569,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/kernel/signal.c linux-3.18.8/arch/x86/kerne
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
-diff -Nur linux-3.18.8.orig/arch/x86/kernel/traps.c linux-3.18.8/arch/x86/kernel/traps.c
---- linux-3.18.8.orig/arch/x86/kernel/traps.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/kernel/traps.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/kernel/traps.c linux-3.18.9/arch/x86/kernel/traps.c
+--- linux-3.18.9.orig/arch/x86/kernel/traps.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/kernel/traps.c 2015-03-15 16:03:03.688094875 -0500
@@ -87,9 +87,21 @@
local_irq_enable();
}
@@ -2646,10 +2646,10 @@ diff -Nur linux-3.18.8.orig/arch/x86/kernel/traps.c linux-3.18.8/arch/x86/kernel
debug_stack_usage_dec();
exit:
-diff -Nur linux-3.18.8.orig/arch/x86/kvm/x86.c linux-3.18.8/arch/x86/kvm/x86.c
---- linux-3.18.8.orig/arch/x86/kvm/x86.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/kvm/x86.c 2015-03-03 08:05:17.000000000 +0100
-@@ -5772,6 +5772,13 @@
+diff -Nur linux-3.18.9.orig/arch/x86/kvm/x86.c linux-3.18.9/arch/x86/kvm/x86.c
+--- linux-3.18.9.orig/arch/x86/kvm/x86.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/kvm/x86.c 2015-03-15 16:03:03.696094875 -0500
+@@ -5773,6 +5773,13 @@
goto out;
}
@@ -2663,9 +2663,7837 @@ diff -Nur linux-3.18.8.orig/arch/x86/kvm/x86.c linux-3.18.8/arch/x86/kvm/x86.c
r = kvm_mmu_module_init();
if (r)
goto out_free_percpu;
-diff -Nur linux-3.18.8.orig/arch/x86/mm/fault.c linux-3.18.8/arch/x86/mm/fault.c
---- linux-3.18.8.orig/arch/x86/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/kvm/x86.c.orig linux-3.18.9/arch/x86/kvm/x86.c.orig
+--- linux-3.18.9.orig/arch/x86/kvm/x86.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/arch/x86/kvm/x86.c.orig 2015-03-06 16:53:42.000000000 -0600
+@@ -0,0 +1,7824 @@
++/*
++ * Kernel-based Virtual Machine driver for Linux
++ *
++ * derived from drivers/kvm/kvm_main.c
++ *
++ * Copyright (C) 2006 Qumranet, Inc.
++ * Copyright (C) 2008 Qumranet, Inc.
++ * Copyright IBM Corporation, 2008
++ * Copyright 2010 Red Hat, Inc. and/or its affiliates.
++ *
++ * Authors:
++ * Avi Kivity <avi@qumranet.com>
++ * Yaniv Kamay <yaniv@qumranet.com>
++ * Amit Shah <amit.shah@qumranet.com>
++ * Ben-Ami Yassour <benami@il.ibm.com>
++ *
++ * This work is licensed under the terms of the GNU GPL, version 2. See
++ * the COPYING file in the top-level directory.
++ *
++ */
++
++#include <linux/kvm_host.h>
++#include "irq.h"
++#include "mmu.h"
++#include "i8254.h"
++#include "tss.h"
++#include "kvm_cache_regs.h"
++#include "x86.h"
++#include "cpuid.h"
++
++#include <linux/clocksource.h>
++#include <linux/interrupt.h>
++#include <linux/kvm.h>
++#include <linux/fs.h>
++#include <linux/vmalloc.h>
++#include <linux/module.h>
++#include <linux/mman.h>
++#include <linux/highmem.h>
++#include <linux/iommu.h>
++#include <linux/intel-iommu.h>
++#include <linux/cpufreq.h>
++#include <linux/user-return-notifier.h>
++#include <linux/srcu.h>
++#include <linux/slab.h>
++#include <linux/perf_event.h>
++#include <linux/uaccess.h>
++#include <linux/hash.h>
++#include <linux/pci.h>
++#include <linux/timekeeper_internal.h>
++#include <linux/pvclock_gtod.h>
++#include <trace/events/kvm.h>
++
++#define CREATE_TRACE_POINTS
++#include "trace.h"
++
++#include <asm/debugreg.h>
++#include <asm/msr.h>
++#include <asm/desc.h>
++#include <asm/mtrr.h>
++#include <asm/mce.h>
++#include <asm/i387.h>
++#include <asm/fpu-internal.h> /* Ugh! */
++#include <asm/xcr.h>
++#include <asm/pvclock.h>
++#include <asm/div64.h>
++
++#define MAX_IO_MSRS 256
++#define KVM_MAX_MCE_BANKS 32
++#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
++
++#define emul_to_vcpu(ctxt) \
++ container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
++
++/* EFER defaults:
++ * - enable syscall per default because its emulated by KVM
++ * - enable LME and LMA per default on 64 bit KVM
++ */
++#ifdef CONFIG_X86_64
++static
++u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
++#else
++static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
++#endif
++
++#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
++#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
++
++static void update_cr8_intercept(struct kvm_vcpu *vcpu);
++static void process_nmi(struct kvm_vcpu *vcpu);
++static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
++
++struct kvm_x86_ops *kvm_x86_ops;
++EXPORT_SYMBOL_GPL(kvm_x86_ops);
++
++static bool ignore_msrs = 0;
++module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
++
++unsigned int min_timer_period_us = 500;
++module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
++
++bool kvm_has_tsc_control;
++EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
++u32 kvm_max_guest_tsc_khz;
++EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
++
++/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
++static u32 tsc_tolerance_ppm = 250;
++module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
++
++static bool backwards_tsc_observed = false;
++
++#define KVM_NR_SHARED_MSRS 16
++
++struct kvm_shared_msrs_global {
++ int nr;
++ u32 msrs[KVM_NR_SHARED_MSRS];
++};
++
++struct kvm_shared_msrs {
++ struct user_return_notifier urn;
++ bool registered;
++ struct kvm_shared_msr_values {
++ u64 host;
++ u64 curr;
++ } values[KVM_NR_SHARED_MSRS];
++};
++
++static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
++static struct kvm_shared_msrs __percpu *shared_msrs;
++
++struct kvm_stats_debugfs_item debugfs_entries[] = {
++ { "pf_fixed", VCPU_STAT(pf_fixed) },
++ { "pf_guest", VCPU_STAT(pf_guest) },
++ { "tlb_flush", VCPU_STAT(tlb_flush) },
++ { "invlpg", VCPU_STAT(invlpg) },
++ { "exits", VCPU_STAT(exits) },
++ { "io_exits", VCPU_STAT(io_exits) },
++ { "mmio_exits", VCPU_STAT(mmio_exits) },
++ { "signal_exits", VCPU_STAT(signal_exits) },
++ { "irq_window", VCPU_STAT(irq_window_exits) },
++ { "nmi_window", VCPU_STAT(nmi_window_exits) },
++ { "halt_exits", VCPU_STAT(halt_exits) },
++ { "halt_wakeup", VCPU_STAT(halt_wakeup) },
++ { "hypercalls", VCPU_STAT(hypercalls) },
++ { "request_irq", VCPU_STAT(request_irq_exits) },
++ { "irq_exits", VCPU_STAT(irq_exits) },
++ { "host_state_reload", VCPU_STAT(host_state_reload) },
++ { "efer_reload", VCPU_STAT(efer_reload) },
++ { "fpu_reload", VCPU_STAT(fpu_reload) },
++ { "insn_emulation", VCPU_STAT(insn_emulation) },
++ { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
++ { "irq_injections", VCPU_STAT(irq_injections) },
++ { "nmi_injections", VCPU_STAT(nmi_injections) },
++ { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
++ { "mmu_pte_write", VM_STAT(mmu_pte_write) },
++ { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
++ { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
++ { "mmu_flooded", VM_STAT(mmu_flooded) },
++ { "mmu_recycled", VM_STAT(mmu_recycled) },
++ { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
++ { "mmu_unsync", VM_STAT(mmu_unsync) },
++ { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
++ { "largepages", VM_STAT(lpages) },
++ { NULL }
++};
++
++u64 __read_mostly host_xcr0;
++
++static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
++
++static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
++{
++ int i;
++ for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
++ vcpu->arch.apf.gfns[i] = ~0;
++}
++
++static void kvm_on_user_return(struct user_return_notifier *urn)
++{
++ unsigned slot;
++ struct kvm_shared_msrs *locals
++ = container_of(urn, struct kvm_shared_msrs, urn);
++ struct kvm_shared_msr_values *values;
++
++ for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
++ values = &locals->values[slot];
++ if (values->host != values->curr) {
++ wrmsrl(shared_msrs_global.msrs[slot], values->host);
++ values->curr = values->host;
++ }
++ }
++ locals->registered = false;
++ user_return_notifier_unregister(urn);
++}
++
++static void shared_msr_update(unsigned slot, u32 msr)
++{
++ u64 value;
++ unsigned int cpu = smp_processor_id();
++ struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
++
++ /* only read, and nobody should modify it at this time,
++ * so don't need lock */
++ if (slot >= shared_msrs_global.nr) {
++ printk(KERN_ERR "kvm: invalid MSR slot!");
++ return;
++ }
++ rdmsrl_safe(msr, &value);
++ smsr->values[slot].host = value;
++ smsr->values[slot].curr = value;
++}
++
++void kvm_define_shared_msr(unsigned slot, u32 msr)
++{
++ BUG_ON(slot >= KVM_NR_SHARED_MSRS);
++ if (slot >= shared_msrs_global.nr)
++ shared_msrs_global.nr = slot + 1;
++ shared_msrs_global.msrs[slot] = msr;
++ /* we need ensured the shared_msr_global have been updated */
++ smp_wmb();
++}
++EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
++
++static void kvm_shared_msr_cpu_online(void)
++{
++ unsigned i;
++
++ for (i = 0; i < shared_msrs_global.nr; ++i)
++ shared_msr_update(i, shared_msrs_global.msrs[i]);
++}
++
++int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
++{
++ unsigned int cpu = smp_processor_id();
++ struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
++ int err;
++
++ if (((value ^ smsr->values[slot].curr) & mask) == 0)
++ return 0;
++ smsr->values[slot].curr = value;
++ err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
++ if (err)
++ return 1;
++
++ if (!smsr->registered) {
++ smsr->urn.on_user_return = kvm_on_user_return;
++ user_return_notifier_register(&smsr->urn);
++ smsr->registered = true;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
++
++static void drop_user_return_notifiers(void)
++{
++ unsigned int cpu = smp_processor_id();
++ struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
++
++ if (smsr->registered)
++ kvm_on_user_return(&smsr->urn);
++}
++
++u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
++{
++ return vcpu->arch.apic_base;
++}
++EXPORT_SYMBOL_GPL(kvm_get_apic_base);
++
++int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
++{
++ u64 old_state = vcpu->arch.apic_base &
++ (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
++ u64 new_state = msr_info->data &
++ (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
++ u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
++ 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
++
++ if (!msr_info->host_initiated &&
++ ((msr_info->data & reserved_bits) != 0 ||
++ new_state == X2APIC_ENABLE ||
++ (new_state == MSR_IA32_APICBASE_ENABLE &&
++ old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
++ (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
++ old_state == 0)))
++ return 1;
++
++ kvm_lapic_set_base(vcpu, msr_info->data);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_set_apic_base);
++
++asmlinkage __visible void kvm_spurious_fault(void)
++{
++ /* Fault while not rebooting. We want the trace. */
++ BUG();
++}
++EXPORT_SYMBOL_GPL(kvm_spurious_fault);
++
++#define EXCPT_BENIGN 0
++#define EXCPT_CONTRIBUTORY 1
++#define EXCPT_PF 2
++
++static int exception_class(int vector)
++{
++ switch (vector) {
++ case PF_VECTOR:
++ return EXCPT_PF;
++ case DE_VECTOR:
++ case TS_VECTOR:
++ case NP_VECTOR:
++ case SS_VECTOR:
++ case GP_VECTOR:
++ return EXCPT_CONTRIBUTORY;
++ default:
++ break;
++ }
++ return EXCPT_BENIGN;
++}
++
++#define EXCPT_FAULT 0
++#define EXCPT_TRAP 1
++#define EXCPT_ABORT 2
++#define EXCPT_INTERRUPT 3
++
++static int exception_type(int vector)
++{
++ unsigned int mask;
++
++ if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
++ return EXCPT_INTERRUPT;
++
++ mask = 1 << vector;
++
++ /* #DB is trap, as instruction watchpoints are handled elsewhere */
++ if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
++ return EXCPT_TRAP;
++
++ if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
++ return EXCPT_ABORT;
++
++ /* Reserved exceptions will result in fault */
++ return EXCPT_FAULT;
++}
++
++static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
++ unsigned nr, bool has_error, u32 error_code,
++ bool reinject)
++{
++ u32 prev_nr;
++ int class1, class2;
++
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++
++ if (!vcpu->arch.exception.pending) {
++ queue:
++ vcpu->arch.exception.pending = true;
++ vcpu->arch.exception.has_error_code = has_error;
++ vcpu->arch.exception.nr = nr;
++ vcpu->arch.exception.error_code = error_code;
++ vcpu->arch.exception.reinject = reinject;
++ return;
++ }
++
++ /* to check exception */
++ prev_nr = vcpu->arch.exception.nr;
++ if (prev_nr == DF_VECTOR) {
++ /* triple fault -> shutdown */
++ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
++ return;
++ }
++ class1 = exception_class(prev_nr);
++ class2 = exception_class(nr);
++ if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
++ || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
++ /* generate double fault per SDM Table 5-5 */
++ vcpu->arch.exception.pending = true;
++ vcpu->arch.exception.has_error_code = true;
++ vcpu->arch.exception.nr = DF_VECTOR;
++ vcpu->arch.exception.error_code = 0;
++ } else
++ /* replace previous exception with a new one in a hope
++ that instruction re-execution will regenerate lost
++ exception */
++ goto queue;
++}
++
++void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
++{
++ kvm_multiple_exception(vcpu, nr, false, 0, false);
++}
++EXPORT_SYMBOL_GPL(kvm_queue_exception);
++
++void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
++{
++ kvm_multiple_exception(vcpu, nr, false, 0, true);
++}
++EXPORT_SYMBOL_GPL(kvm_requeue_exception);
++
++void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
++{
++ if (err)
++ kvm_inject_gp(vcpu, 0);
++ else
++ kvm_x86_ops->skip_emulated_instruction(vcpu);
++}
++EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
++
++void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
++{
++ ++vcpu->stat.pf_guest;
++ vcpu->arch.cr2 = fault->address;
++ kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
++}
++EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
++
++static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
++{
++ if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
++ vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
++ else
++ vcpu->arch.mmu.inject_page_fault(vcpu, fault);
++
++ return fault->nested_page_fault;
++}
++
++void kvm_inject_nmi(struct kvm_vcpu *vcpu)
++{
++ atomic_inc(&vcpu->arch.nmi_queued);
++ kvm_make_request(KVM_REQ_NMI, vcpu);
++}
++EXPORT_SYMBOL_GPL(kvm_inject_nmi);
++
++void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
++{
++ kvm_multiple_exception(vcpu, nr, true, error_code, false);
++}
++EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
++
++void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
++{
++ kvm_multiple_exception(vcpu, nr, true, error_code, true);
++}
++EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
++
++/*
++ * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
++ * a #GP and return false.
++ */
++bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
++{
++ if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
++ return true;
++ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
++ return false;
++}
++EXPORT_SYMBOL_GPL(kvm_require_cpl);
++
++/*
++ * This function will be used to read from the physical memory of the currently
++ * running guest. The difference to kvm_read_guest_page is that this function
++ * can read from guest physical or from the guest's guest physical memory.
++ */
++int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
++ gfn_t ngfn, void *data, int offset, int len,
++ u32 access)
++{
++ struct x86_exception exception;
++ gfn_t real_gfn;
++ gpa_t ngpa;
++
++ ngpa = gfn_to_gpa(ngfn);
++ real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception);
++ if (real_gfn == UNMAPPED_GVA)
++ return -EFAULT;
++
++ real_gfn = gpa_to_gfn(real_gfn);
++
++ return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
++}
++EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
++
++int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
++ void *data, int offset, int len, u32 access)
++{
++ return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
++ data, offset, len, access);
++}
++
++/*
++ * Load the pae pdptrs. Return true is they are all valid.
++ */
++int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
++{
++ gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
++ unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
++ int i;
++ int ret;
++ u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
++
++ ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
++ offset * sizeof(u64), sizeof(pdpte),
++ PFERR_USER_MASK|PFERR_WRITE_MASK);
++ if (ret < 0) {
++ ret = 0;
++ goto out;
++ }
++ for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
++ if (is_present_gpte(pdpte[i]) &&
++ (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
++ ret = 0;
++ goto out;
++ }
++ }
++ ret = 1;
++
++ memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
++ __set_bit(VCPU_EXREG_PDPTR,
++ (unsigned long *)&vcpu->arch.regs_avail);
++ __set_bit(VCPU_EXREG_PDPTR,
++ (unsigned long *)&vcpu->arch.regs_dirty);
++out:
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(load_pdptrs);
++
++static bool pdptrs_changed(struct kvm_vcpu *vcpu)
++{
++ u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
++ bool changed = true;
++ int offset;
++ gfn_t gfn;
++ int r;
++
++ if (is_long_mode(vcpu) || !is_pae(vcpu))
++ return false;
++
++ if (!test_bit(VCPU_EXREG_PDPTR,
++ (unsigned long *)&vcpu->arch.regs_avail))
++ return true;
++
++ gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
++ offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
++ r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
++ PFERR_USER_MASK | PFERR_WRITE_MASK);
++ if (r < 0)
++ goto out;
++ changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
++out:
++
++ return changed;
++}
++
++int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
++{
++ unsigned long old_cr0 = kvm_read_cr0(vcpu);
++ unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
++ X86_CR0_CD | X86_CR0_NW;
++
++ cr0 |= X86_CR0_ET;
++
++#ifdef CONFIG_X86_64
++ if (cr0 & 0xffffffff00000000UL)
++ return 1;
++#endif
++
++ cr0 &= ~CR0_RESERVED_BITS;
++
++ if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
++ return 1;
++
++ if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
++ return 1;
++
++ if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
++#ifdef CONFIG_X86_64
++ if ((vcpu->arch.efer & EFER_LME)) {
++ int cs_db, cs_l;
++
++ if (!is_pae(vcpu))
++ return 1;
++ kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
++ if (cs_l)
++ return 1;
++ } else
++#endif
++ if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
++ kvm_read_cr3(vcpu)))
++ return 1;
++ }
++
++ if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
++ return 1;
++
++ kvm_x86_ops->set_cr0(vcpu, cr0);
++
++ if ((cr0 ^ old_cr0) & X86_CR0_PG) {
++ kvm_clear_async_pf_completion_queue(vcpu);
++ kvm_async_pf_hash_reset(vcpu);
++ }
++
++ if ((cr0 ^ old_cr0) & update_bits)
++ kvm_mmu_reset_context(vcpu);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_set_cr0);
++
++void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
++{
++ (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
++}
++EXPORT_SYMBOL_GPL(kvm_lmsw);
++
++static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
++{
++ if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
++ !vcpu->guest_xcr0_loaded) {
++ /* kvm_set_xcr() also depends on this */
++ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
++ vcpu->guest_xcr0_loaded = 1;
++ }
++}
++
++static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
++{
++ if (vcpu->guest_xcr0_loaded) {
++ if (vcpu->arch.xcr0 != host_xcr0)
++ xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
++ vcpu->guest_xcr0_loaded = 0;
++ }
++}
++
++int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
++{
++ u64 xcr0 = xcr;
++ u64 old_xcr0 = vcpu->arch.xcr0;
++ u64 valid_bits;
++
++ /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
++ if (index != XCR_XFEATURE_ENABLED_MASK)
++ return 1;
++ if (!(xcr0 & XSTATE_FP))
++ return 1;
++ if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
++ return 1;
++
++ /*
++ * Do not allow the guest to set bits that we do not support
++ * saving. However, xcr0 bit 0 is always set, even if the
++ * emulated CPU does not support XSAVE (see fx_init).
++ */
++ valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP;
++ if (xcr0 & ~valid_bits)
++ return 1;
++
++ if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR)))
++ return 1;
++
++ kvm_put_guest_xcr0(vcpu);
++ vcpu->arch.xcr0 = xcr0;
++
++ if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK)
++ kvm_update_cpuid(vcpu);
++ return 0;
++}
++
++int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
++{
++ if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
++ __kvm_set_xcr(vcpu, index, xcr)) {
++ kvm_inject_gp(vcpu, 0);
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_set_xcr);
++
++int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
++{
++ unsigned long old_cr4 = kvm_read_cr4(vcpu);
++ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
++ X86_CR4_PAE | X86_CR4_SMEP;
++ if (cr4 & CR4_RESERVED_BITS)
++ return 1;
++
++ if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
++ return 1;
++
++ if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
++ return 1;
++
++ if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
++ return 1;
++
++ if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
++ return 1;
++
++ if (is_long_mode(vcpu)) {
++ if (!(cr4 & X86_CR4_PAE))
++ return 1;
++ } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
++ && ((cr4 ^ old_cr4) & pdptr_bits)
++ && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
++ kvm_read_cr3(vcpu)))
++ return 1;
++
++ if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
++ if (!guest_cpuid_has_pcid(vcpu))
++ return 1;
++
++ /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
++ if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
++ return 1;
++ }
++
++ if (kvm_x86_ops->set_cr4(vcpu, cr4))
++ return 1;
++
++ if (((cr4 ^ old_cr4) & pdptr_bits) ||
++ (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
++ kvm_mmu_reset_context(vcpu);
++
++ if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
++ update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
++
++ if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
++ kvm_update_cpuid(vcpu);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_set_cr4);
++
++int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
++{
++ if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
++ kvm_mmu_sync_roots(vcpu);
++ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
++ return 0;
++ }
++
++ if (is_long_mode(vcpu)) {
++ if (cr3 & CR3_L_MODE_RESERVED_BITS)
++ return 1;
++ } else if (is_pae(vcpu) && is_paging(vcpu) &&
++ !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
++ return 1;
++
++ vcpu->arch.cr3 = cr3;
++ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
++ kvm_mmu_new_cr3(vcpu);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_set_cr3);
++
++int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
++{
++ if (cr8 & CR8_RESERVED_BITS)
++ return 1;
++ if (irqchip_in_kernel(vcpu->kvm))
++ kvm_lapic_set_tpr(vcpu, cr8);
++ else
++ vcpu->arch.cr8 = cr8;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_set_cr8);
++
++unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
++{
++ if (irqchip_in_kernel(vcpu->kvm))
++ return kvm_lapic_get_cr8(vcpu);
++ else
++ return vcpu->arch.cr8;
++}
++EXPORT_SYMBOL_GPL(kvm_get_cr8);
++
++static void kvm_update_dr6(struct kvm_vcpu *vcpu)
++{
++ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
++ kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
++}
++
++static void kvm_update_dr7(struct kvm_vcpu *vcpu)
++{
++ unsigned long dr7;
++
++ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
++ dr7 = vcpu->arch.guest_debug_dr7;
++ else
++ dr7 = vcpu->arch.dr7;
++ kvm_x86_ops->set_dr7(vcpu, dr7);
++ vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
++ if (dr7 & DR7_BP_EN_MASK)
++ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
++}
++
++static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
++{
++ u64 fixed = DR6_FIXED_1;
++
++ if (!guest_cpuid_has_rtm(vcpu))
++ fixed |= DR6_RTM;
++ return fixed;
++}
++
++static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
++{
++ switch (dr) {
++ case 0 ... 3:
++ vcpu->arch.db[dr] = val;
++ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
++ vcpu->arch.eff_db[dr] = val;
++ break;
++ case 4:
++ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
++ return 1; /* #UD */
++ /* fall through */
++ case 6:
++ if (val & 0xffffffff00000000ULL)
++ return -1; /* #GP */
++ vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
++ kvm_update_dr6(vcpu);
++ break;
++ case 5:
++ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
++ return 1; /* #UD */
++ /* fall through */
++ default: /* 7 */
++ if (val & 0xffffffff00000000ULL)
++ return -1; /* #GP */
++ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
++ kvm_update_dr7(vcpu);
++ break;
++ }
++
++ return 0;
++}
++
++int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
++{
++ int res;
++
++ res = __kvm_set_dr(vcpu, dr, val);
++ if (res > 0)
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ else if (res < 0)
++ kvm_inject_gp(vcpu, 0);
++
++ return res;
++}
++EXPORT_SYMBOL_GPL(kvm_set_dr);
++
++static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
++{
++ switch (dr) {
++ case 0 ... 3:
++ *val = vcpu->arch.db[dr];
++ break;
++ case 4:
++ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
++ return 1;
++ /* fall through */
++ case 6:
++ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
++ *val = vcpu->arch.dr6;
++ else
++ *val = kvm_x86_ops->get_dr6(vcpu);
++ break;
++ case 5:
++ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
++ return 1;
++ /* fall through */
++ default: /* 7 */
++ *val = vcpu->arch.dr7;
++ break;
++ }
++
++ return 0;
++}
++
++int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
++{
++ if (_kvm_get_dr(vcpu, dr, val)) {
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_get_dr);
++
++bool kvm_rdpmc(struct kvm_vcpu *vcpu)
++{
++ u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
++ u64 data;
++ int err;
++
++ err = kvm_pmu_read_pmc(vcpu, ecx, &data);
++ if (err)
++ return err;
++ kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
++ kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
++ return err;
++}
++EXPORT_SYMBOL_GPL(kvm_rdpmc);
++
++/*
++ * List of msr numbers which we expose to userspace through KVM_GET_MSRS
++ * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
++ *
++ * This list is modified at module load time to reflect the
++ * capabilities of the host cpu. This capabilities test skips MSRs that are
++ * kvm-specific. Those are put in the beginning of the list.
++ */
++
++#define KVM_SAVE_MSRS_BEGIN 12
++static u32 msrs_to_save[] = {
++ MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
++ MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
++ HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
++ HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
++ HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
++ MSR_KVM_PV_EOI_EN,
++ MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
++ MSR_STAR,
++#ifdef CONFIG_X86_64
++ MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
++#endif
++ MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
++ MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
++};
++
++static unsigned num_msrs_to_save;
++
++static const u32 emulated_msrs[] = {
++ MSR_IA32_TSC_ADJUST,
++ MSR_IA32_TSCDEADLINE,
++ MSR_IA32_MISC_ENABLE,
++ MSR_IA32_MCG_STATUS,
++ MSR_IA32_MCG_CTL,
++};
++
++bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
++{
++ if (efer & efer_reserved_bits)
++ return false;
++
++ if (efer & EFER_FFXSR) {
++ struct kvm_cpuid_entry2 *feat;
++
++ feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
++ if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
++ return false;
++ }
++
++ if (efer & EFER_SVME) {
++ struct kvm_cpuid_entry2 *feat;
++
++ feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
++ if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
++ return false;
++ }
++
++ return true;
++}
++EXPORT_SYMBOL_GPL(kvm_valid_efer);
++
++static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
++{
++ u64 old_efer = vcpu->arch.efer;
++
++ if (!kvm_valid_efer(vcpu, efer))
++ return 1;
++
++ if (is_paging(vcpu)
++ && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
++ return 1;
++
++ efer &= ~EFER_LMA;
++ efer |= vcpu->arch.efer & EFER_LMA;
++
++ kvm_x86_ops->set_efer(vcpu, efer);
++
++ /* Update reserved bits */
++ if ((efer ^ old_efer) & EFER_NX)
++ kvm_mmu_reset_context(vcpu);
++
++ return 0;
++}
++
++void kvm_enable_efer_bits(u64 mask)
++{
++ efer_reserved_bits &= ~mask;
++}
++EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
++
++/*
++ * Writes msr value into into the appropriate "register".
++ * Returns 0 on success, non-0 otherwise.
++ * Assumes vcpu_load() was already called.
++ */
++int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
++{
++ switch (msr->index) {
++ case MSR_FS_BASE:
++ case MSR_GS_BASE:
++ case MSR_KERNEL_GS_BASE:
++ case MSR_CSTAR:
++ case MSR_LSTAR:
++ if (is_noncanonical_address(msr->data))
++ return 1;
++ break;
++ case MSR_IA32_SYSENTER_EIP:
++ case MSR_IA32_SYSENTER_ESP:
++ /*
++ * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
++ * non-canonical address is written on Intel but not on
++ * AMD (which ignores the top 32-bits, because it does
++ * not implement 64-bit SYSENTER).
++ *
++ * 64-bit code should hence be able to write a non-canonical
++ * value on AMD. Making the address canonical ensures that
++ * vmentry does not fail on Intel after writing a non-canonical
++ * value, and that something deterministic happens if the guest
++ * invokes 64-bit SYSENTER.
++ */
++ msr->data = get_canonical(msr->data);
++ }
++ return kvm_x86_ops->set_msr(vcpu, msr);
++}
++EXPORT_SYMBOL_GPL(kvm_set_msr);
++
++/*
++ * Adapt set_msr() to msr_io()'s calling convention
++ */
++static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
++{
++ struct msr_data msr;
++
++ msr.data = *data;
++ msr.index = index;
++ msr.host_initiated = true;
++ return kvm_set_msr(vcpu, &msr);
++}
++
++#ifdef CONFIG_X86_64
++struct pvclock_gtod_data {
++ seqcount_t seq;
++
++ struct { /* extract of a clocksource struct */
++ int vclock_mode;
++ cycle_t cycle_last;
++ cycle_t mask;
++ u32 mult;
++ u32 shift;
++ } clock;
++
++ u64 boot_ns;
++ u64 nsec_base;
++};
++
++static struct pvclock_gtod_data pvclock_gtod_data;
++
++static void update_pvclock_gtod(struct timekeeper *tk)
++{
++ struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
++ u64 boot_ns;
++
++ boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot));
++
++ write_seqcount_begin(&vdata->seq);
++
++ /* copy pvclock gtod data */
++ vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode;
++ vdata->clock.cycle_last = tk->tkr.cycle_last;
++ vdata->clock.mask = tk->tkr.mask;
++ vdata->clock.mult = tk->tkr.mult;
++ vdata->clock.shift = tk->tkr.shift;
++
++ vdata->boot_ns = boot_ns;
++ vdata->nsec_base = tk->tkr.xtime_nsec;
++
++ write_seqcount_end(&vdata->seq);
++}
++#endif
++
++
++static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
++{
++ int version;
++ int r;
++ struct pvclock_wall_clock wc;
++ struct timespec boot;
++
++ if (!wall_clock)
++ return;
++
++ r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
++ if (r)
++ return;
++
++ if (version & 1)
++ ++version; /* first time write, random junk */
++
++ ++version;
++
++ kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
++
++ /*
++ * The guest calculates current wall clock time by adding
++ * system time (updated by kvm_guest_time_update below) to the
++ * wall clock specified here. guest system time equals host
++ * system time for us, thus we must fill in host boot time here.
++ */
++ getboottime(&boot);
++
++ if (kvm->arch.kvmclock_offset) {
++ struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
++ boot = timespec_sub(boot, ts);
++ }
++ wc.sec = boot.tv_sec;
++ wc.nsec = boot.tv_nsec;
++ wc.version = version;
++
++ kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
++
++ version++;
++ kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
++}
++
++static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
++{
++ uint32_t quotient, remainder;
++
++ /* Don't try to replace with do_div(), this one calculates
++ * "(dividend << 32) / divisor" */
++ __asm__ ( "divl %4"
++ : "=a" (quotient), "=d" (remainder)
++ : "0" (0), "1" (dividend), "r" (divisor) );
++ return quotient;
++}
++
++static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
++ s8 *pshift, u32 *pmultiplier)
++{
++ uint64_t scaled64;
++ int32_t shift = 0;
++ uint64_t tps64;
++ uint32_t tps32;
++
++ tps64 = base_khz * 1000LL;
++ scaled64 = scaled_khz * 1000LL;
++ while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
++ tps64 >>= 1;
++ shift--;
++ }
++
++ tps32 = (uint32_t)tps64;
++ while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
++ if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
++ scaled64 >>= 1;
++ else
++ tps32 <<= 1;
++ shift++;
++ }
++
++ *pshift = shift;
++ *pmultiplier = div_frac(scaled64, tps32);
++
++ pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
++ __func__, base_khz, scaled_khz, shift, *pmultiplier);
++}
++
++static inline u64 get_kernel_ns(void)
++{
++ return ktime_get_boot_ns();
++}
++
++#ifdef CONFIG_X86_64
++static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
++#endif
++
++static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
++unsigned long max_tsc_khz;
++
++static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
++{
++ return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
++ vcpu->arch.virtual_tsc_shift);
++}
++
++static u32 adjust_tsc_khz(u32 khz, s32 ppm)
++{
++ u64 v = (u64)khz * (1000000 + ppm);
++ do_div(v, 1000000);
++ return v;
++}
++
++static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
++{
++ u32 thresh_lo, thresh_hi;
++ int use_scaling = 0;
++
++ /* tsc_khz can be zero if TSC calibration fails */
++ if (this_tsc_khz == 0)
++ return;
++
++ /* Compute a scale to convert nanoseconds in TSC cycles */
++ kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
++ &vcpu->arch.virtual_tsc_shift,
++ &vcpu->arch.virtual_tsc_mult);
++ vcpu->arch.virtual_tsc_khz = this_tsc_khz;
++
++ /*
++ * Compute the variation in TSC rate which is acceptable
++ * within the range of tolerance and decide if the
++ * rate being applied is within that bounds of the hardware
++ * rate. If so, no scaling or compensation need be done.
++ */
++ thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
++ thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
++ if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
++ pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
++ use_scaling = 1;
++ }
++ kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
++}
++
++static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
++{
++ u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
++ vcpu->arch.virtual_tsc_mult,
++ vcpu->arch.virtual_tsc_shift);
++ tsc += vcpu->arch.this_tsc_write;
++ return tsc;
++}
++
++void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
++{
++#ifdef CONFIG_X86_64
++ bool vcpus_matched;
++ struct kvm_arch *ka = &vcpu->kvm->arch;
++ struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
++
++ vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
++ atomic_read(&vcpu->kvm->online_vcpus));
++
++ /*
++ * Once the masterclock is enabled, always perform request in
++ * order to update it.
++ *
++ * In order to enable masterclock, the host clocksource must be TSC
++ * and the vcpus need to have matched TSCs. When that happens,
++ * perform request to enable masterclock.
++ */
++ if (ka->use_master_clock ||
++ (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
++ kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
++
++ trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
++ atomic_read(&vcpu->kvm->online_vcpus),
++ ka->use_master_clock, gtod->clock.vclock_mode);
++#endif
++}
++
++static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
++{
++ u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu);
++ vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
++}
++
++void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
++{
++ struct kvm *kvm = vcpu->kvm;
++ u64 offset, ns, elapsed;
++ unsigned long flags;
++ s64 usdiff;
++ bool matched;
++ bool already_matched;
++ u64 data = msr->data;
++
++ raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
++ offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
++ ns = get_kernel_ns();
++ elapsed = ns - kvm->arch.last_tsc_nsec;
++
++ if (vcpu->arch.virtual_tsc_khz) {
++ int faulted = 0;
++
++ /* n.b - signed multiplication and division required */
++ usdiff = data - kvm->arch.last_tsc_write;
++#ifdef CONFIG_X86_64
++ usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
++#else
++ /* do_div() only does unsigned */
++ asm("1: idivl %[divisor]\n"
++ "2: xor %%edx, %%edx\n"
++ " movl $0, %[faulted]\n"
++ "3:\n"
++ ".section .fixup,\"ax\"\n"
++ "4: movl $1, %[faulted]\n"
++ " jmp 3b\n"
++ ".previous\n"
++
++ _ASM_EXTABLE(1b, 4b)
++
++ : "=A"(usdiff), [faulted] "=r" (faulted)
++ : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
++
++#endif
++ do_div(elapsed, 1000);
++ usdiff -= elapsed;
++ if (usdiff < 0)
++ usdiff = -usdiff;
++
++ /* idivl overflow => difference is larger than USEC_PER_SEC */
++ if (faulted)
++ usdiff = USEC_PER_SEC;
++ } else
++ usdiff = USEC_PER_SEC; /* disable TSC match window below */
++
++ /*
++ * Special case: TSC write with a small delta (1 second) of virtual
++ * cycle time against real time is interpreted as an attempt to
++ * synchronize the CPU.
++ *
++ * For a reliable TSC, we can match TSC offsets, and for an unstable
++ * TSC, we add elapsed time in this computation. We could let the
++ * compensation code attempt to catch up if we fall behind, but
++ * it's better to try to match offsets from the beginning.
++ */
++ if (usdiff < USEC_PER_SEC &&
++ vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
++ if (!check_tsc_unstable()) {
++ offset = kvm->arch.cur_tsc_offset;
++ pr_debug("kvm: matched tsc offset for %llu\n", data);
++ } else {
++ u64 delta = nsec_to_cycles(vcpu, elapsed);
++ data += delta;
++ offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
++ pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
++ }
++ matched = true;
++ already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
++ } else {
++ /*
++ * We split periods of matched TSC writes into generations.
++ * For each generation, we track the original measured
++ * nanosecond time, offset, and write, so if TSCs are in
++ * sync, we can match exact offset, and if not, we can match
++ * exact software computation in compute_guest_tsc()
++ *
++ * These values are tracked in kvm->arch.cur_xxx variables.
++ */
++ kvm->arch.cur_tsc_generation++;
++ kvm->arch.cur_tsc_nsec = ns;
++ kvm->arch.cur_tsc_write = data;
++ kvm->arch.cur_tsc_offset = offset;
++ matched = false;
++ pr_debug("kvm: new tsc generation %llu, clock %llu\n",
++ kvm->arch.cur_tsc_generation, data);
++ }
++
++ /*
++ * We also track th most recent recorded KHZ, write and time to
++ * allow the matching interval to be extended at each write.
++ */
++ kvm->arch.last_tsc_nsec = ns;
++ kvm->arch.last_tsc_write = data;
++ kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
++
++ vcpu->arch.last_guest_tsc = data;
++
++ /* Keep track of which generation this VCPU has synchronized to */
++ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
++ vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
++ vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
++
++ if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
++ update_ia32_tsc_adjust_msr(vcpu, offset);
++ kvm_x86_ops->write_tsc_offset(vcpu, offset);
++ raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
++
++ spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
++ if (!matched) {
++ kvm->arch.nr_vcpus_matched_tsc = 0;
++ } else if (!already_matched) {
++ kvm->arch.nr_vcpus_matched_tsc++;
++ }
++
++ kvm_track_tsc_matching(vcpu);
++ spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
++}
++
++EXPORT_SYMBOL_GPL(kvm_write_tsc);
++
++#ifdef CONFIG_X86_64
++
++static cycle_t read_tsc(void)
++{
++ cycle_t ret;
++ u64 last;
++
++ /*
++ * Empirically, a fence (of type that depends on the CPU)
++ * before rdtsc is enough to ensure that rdtsc is ordered
++ * with respect to loads. The various CPU manuals are unclear
++ * as to whether rdtsc can be reordered with later loads,
++ * but no one has ever seen it happen.
++ */
++ rdtsc_barrier();
++ ret = (cycle_t)vget_cycles();
++
++ last = pvclock_gtod_data.clock.cycle_last;
++
++ if (likely(ret >= last))
++ return ret;
++
++ /*
++ * GCC likes to generate cmov here, but this branch is extremely
++ * predictable (it's just a funciton of time and the likely is
++ * very likely) and there's a data dependence, so force GCC
++ * to generate a branch instead. I don't barrier() because
++ * we don't actually need a barrier, and if this function
++ * ever gets inlined it will generate worse code.
++ */
++ asm volatile ("");
++ return last;
++}
++
++static inline u64 vgettsc(cycle_t *cycle_now)
++{
++ long v;
++ struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
++
++ *cycle_now = read_tsc();
++
++ v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
++ return v * gtod->clock.mult;
++}
++
++static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
++{
++ struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
++ unsigned long seq;
++ int mode;
++ u64 ns;
++
++ do {
++ seq = read_seqcount_begin(&gtod->seq);
++ mode = gtod->clock.vclock_mode;
++ ns = gtod->nsec_base;
++ ns += vgettsc(cycle_now);
++ ns >>= gtod->clock.shift;
++ ns += gtod->boot_ns;
++ } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
++ *t = ns;
++
++ return mode;
++}
++
++/* returns true if host is using tsc clocksource */
++static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
++{
++ /* checked again under seqlock below */
++ if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
++ return false;
++
++ return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
++}
++#endif
++
++/*
++ *
++ * Assuming a stable TSC across physical CPUS, and a stable TSC
++ * across virtual CPUs, the following condition is possible.
++ * Each numbered line represents an event visible to both
++ * CPUs at the next numbered event.
++ *
++ * "timespecX" represents host monotonic time. "tscX" represents
++ * RDTSC value.
++ *
++ * VCPU0 on CPU0 | VCPU1 on CPU1
++ *
++ * 1. read timespec0,tsc0
++ * 2. | timespec1 = timespec0 + N
++ * | tsc1 = tsc0 + M
++ * 3. transition to guest | transition to guest
++ * 4. ret0 = timespec0 + (rdtsc - tsc0) |
++ * 5. | ret1 = timespec1 + (rdtsc - tsc1)
++ * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
++ *
++ * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
++ *
++ * - ret0 < ret1
++ * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
++ * ...
++ * - 0 < N - M => M < N
++ *
++ * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
++ * always the case (the difference between two distinct xtime instances
++ * might be smaller then the difference between corresponding TSC reads,
++ * when updating guest vcpus pvclock areas).
++ *
++ * To avoid that problem, do not allow visibility of distinct
++ * system_timestamp/tsc_timestamp values simultaneously: use a master
++ * copy of host monotonic time values. Update that master copy
++ * in lockstep.
++ *
++ * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
++ *
++ */
++
++static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
++{
++#ifdef CONFIG_X86_64
++ struct kvm_arch *ka = &kvm->arch;
++ int vclock_mode;
++ bool host_tsc_clocksource, vcpus_matched;
++
++ vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
++ atomic_read(&kvm->online_vcpus));
++
++ /*
++ * If the host uses TSC clock, then passthrough TSC as stable
++ * to the guest.
++ */
++ host_tsc_clocksource = kvm_get_time_and_clockread(
++ &ka->master_kernel_ns,
++ &ka->master_cycle_now);
++
++ ka->use_master_clock = host_tsc_clocksource && vcpus_matched
++ && !backwards_tsc_observed;
++
++ if (ka->use_master_clock)
++ atomic_set(&kvm_guest_has_master_clock, 1);
++
++ vclock_mode = pvclock_gtod_data.clock.vclock_mode;
++ trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
++ vcpus_matched);
++#endif
++}
++
++static void kvm_gen_update_masterclock(struct kvm *kvm)
++{
++#ifdef CONFIG_X86_64
++ int i;
++ struct kvm_vcpu *vcpu;
++ struct kvm_arch *ka = &kvm->arch;
++
++ spin_lock(&ka->pvclock_gtod_sync_lock);
++ kvm_make_mclock_inprogress_request(kvm);
++ /* no guest entries from this point */
++ pvclock_update_vm_gtod_copy(kvm);
++
++ kvm_for_each_vcpu(i, vcpu, kvm)
++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
++
++ /* guest entries allowed */
++ kvm_for_each_vcpu(i, vcpu, kvm)
++ clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
++
++ spin_unlock(&ka->pvclock_gtod_sync_lock);
++#endif
++}
++
++static int kvm_guest_time_update(struct kvm_vcpu *v)
++{
++ unsigned long flags, this_tsc_khz;
++ struct kvm_vcpu_arch *vcpu = &v->arch;
++ struct kvm_arch *ka = &v->kvm->arch;
++ s64 kernel_ns;
++ u64 tsc_timestamp, host_tsc;
++ struct pvclock_vcpu_time_info guest_hv_clock;
++ u8 pvclock_flags;
++ bool use_master_clock;
++
++ kernel_ns = 0;
++ host_tsc = 0;
++
++ /*
++ * If the host uses TSC clock, then passthrough TSC as stable
++ * to the guest.
++ */
++ spin_lock(&ka->pvclock_gtod_sync_lock);
++ use_master_clock = ka->use_master_clock;
++ if (use_master_clock) {
++ host_tsc = ka->master_cycle_now;
++ kernel_ns = ka->master_kernel_ns;
++ }
++ spin_unlock(&ka->pvclock_gtod_sync_lock);
++
++ /* Keep irq disabled to prevent changes to the clock */
++ local_irq_save(flags);
++ this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
++ if (unlikely(this_tsc_khz == 0)) {
++ local_irq_restore(flags);
++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
++ return 1;
++ }
++ if (!use_master_clock) {
++ host_tsc = native_read_tsc();
++ kernel_ns = get_kernel_ns();
++ }
++
++ tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
++
++ /*
++ * We may have to catch up the TSC to match elapsed wall clock
++ * time for two reasons, even if kvmclock is used.
++ * 1) CPU could have been running below the maximum TSC rate
++ * 2) Broken TSC compensation resets the base at each VCPU
++ * entry to avoid unknown leaps of TSC even when running
++ * again on the same CPU. This may cause apparent elapsed
++ * time to disappear, and the guest to stand still or run
++ * very slowly.
++ */
++ if (vcpu->tsc_catchup) {
++ u64 tsc = compute_guest_tsc(v, kernel_ns);
++ if (tsc > tsc_timestamp) {
++ adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
++ tsc_timestamp = tsc;
++ }
++ }
++
++ local_irq_restore(flags);
++
++ if (!vcpu->pv_time_enabled)
++ return 0;
++
++ if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
++ kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
++ &vcpu->hv_clock.tsc_shift,
++ &vcpu->hv_clock.tsc_to_system_mul);
++ vcpu->hw_tsc_khz = this_tsc_khz;
++ }
++
++ /* With all the info we got, fill in the values */
++ vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
++ vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
++ vcpu->last_guest_tsc = tsc_timestamp;
++
++ /*
++ * The interface expects us to write an even number signaling that the
++ * update is finished. Since the guest won't see the intermediate
++ * state, we just increase by 2 at the end.
++ */
++ vcpu->hv_clock.version += 2;
++
++ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
++ &guest_hv_clock, sizeof(guest_hv_clock))))
++ return 0;
++
++ /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
++ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
++
++ if (vcpu->pvclock_set_guest_stopped_request) {
++ pvclock_flags |= PVCLOCK_GUEST_STOPPED;
++ vcpu->pvclock_set_guest_stopped_request = false;
++ }
++
++ /* If the host uses TSC clocksource, then it is stable */
++ if (use_master_clock)
++ pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
++
++ vcpu->hv_clock.flags = pvclock_flags;
++
++ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
++ &vcpu->hv_clock,
++ sizeof(vcpu->hv_clock));
++ return 0;
++}
++
++/*
++ * kvmclock updates which are isolated to a given vcpu, such as
++ * vcpu->cpu migration, should not allow system_timestamp from
++ * the rest of the vcpus to remain static. Otherwise ntp frequency
++ * correction applies to one vcpu's system_timestamp but not
++ * the others.
++ *
++ * So in those cases, request a kvmclock update for all vcpus.
++ * We need to rate-limit these requests though, as they can
++ * considerably slow guests that have a large number of vcpus.
++ * The time for a remote vcpu to update its kvmclock is bound
++ * by the delay we use to rate-limit the updates.
++ */
++
++#define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
++
++static void kvmclock_update_fn(struct work_struct *work)
++{
++ int i;
++ struct delayed_work *dwork = to_delayed_work(work);
++ struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
++ kvmclock_update_work);
++ struct kvm *kvm = container_of(ka, struct kvm, arch);
++ struct kvm_vcpu *vcpu;
++
++ kvm_for_each_vcpu(i, vcpu, kvm) {
++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
++ kvm_vcpu_kick(vcpu);
++ }
++}
++
++static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
++{
++ struct kvm *kvm = v->kvm;
++
++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
++ schedule_delayed_work(&kvm->arch.kvmclock_update_work,
++ KVMCLOCK_UPDATE_DELAY);
++}
++
++#define KVMCLOCK_SYNC_PERIOD (300 * HZ)
++
++static void kvmclock_sync_fn(struct work_struct *work)
++{
++ struct delayed_work *dwork = to_delayed_work(work);
++ struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
++ kvmclock_sync_work);
++ struct kvm *kvm = container_of(ka, struct kvm, arch);
++
++ schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
++ schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
++ KVMCLOCK_SYNC_PERIOD);
++}
++
++static bool msr_mtrr_valid(unsigned msr)
++{
++ switch (msr) {
++ case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
++ case MSR_MTRRfix64K_00000:
++ case MSR_MTRRfix16K_80000:
++ case MSR_MTRRfix16K_A0000:
++ case MSR_MTRRfix4K_C0000:
++ case MSR_MTRRfix4K_C8000:
++ case MSR_MTRRfix4K_D0000:
++ case MSR_MTRRfix4K_D8000:
++ case MSR_MTRRfix4K_E0000:
++ case MSR_MTRRfix4K_E8000:
++ case MSR_MTRRfix4K_F0000:
++ case MSR_MTRRfix4K_F8000:
++ case MSR_MTRRdefType:
++ case MSR_IA32_CR_PAT:
++ return true;
++ case 0x2f8:
++ return true;
++ }
++ return false;
++}
++
++static bool valid_pat_type(unsigned t)
++{
++ return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
++}
++
++static bool valid_mtrr_type(unsigned t)
++{
++ return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
++}
++
++bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
++{
++ int i;
++ u64 mask;
++
++ if (!msr_mtrr_valid(msr))
++ return false;
++
++ if (msr == MSR_IA32_CR_PAT) {
++ for (i = 0; i < 8; i++)
++ if (!valid_pat_type((data >> (i * 8)) & 0xff))
++ return false;
++ return true;
++ } else if (msr == MSR_MTRRdefType) {
++ if (data & ~0xcff)
++ return false;
++ return valid_mtrr_type(data & 0xff);
++ } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
++ for (i = 0; i < 8 ; i++)
++ if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
++ return false;
++ return true;
++ }
++
++ /* variable MTRRs */
++ WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
++
++ mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
++ if ((msr & 1) == 0) {
++ /* MTRR base */
++ if (!valid_mtrr_type(data & 0xff))
++ return false;
++ mask |= 0xf00;
++ } else
++ /* MTRR mask */
++ mask |= 0x7ff;
++ if (data & mask) {
++ kvm_inject_gp(vcpu, 0);
++ return false;
++ }
++
++ return true;
++}
++EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
++
++static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
++{
++ u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
++
++ if (!kvm_mtrr_valid(vcpu, msr, data))
++ return 1;
++
++ if (msr == MSR_MTRRdefType) {
++ vcpu->arch.mtrr_state.def_type = data;
++ vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
++ } else if (msr == MSR_MTRRfix64K_00000)
++ p[0] = data;
++ else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
++ p[1 + msr - MSR_MTRRfix16K_80000] = data;
++ else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
++ p[3 + msr - MSR_MTRRfix4K_C0000] = data;
++ else if (msr == MSR_IA32_CR_PAT)
++ vcpu->arch.pat = data;
++ else { /* Variable MTRRs */
++ int idx, is_mtrr_mask;
++ u64 *pt;
++
++ idx = (msr - 0x200) / 2;
++ is_mtrr_mask = msr - 0x200 - 2 * idx;
++ if (!is_mtrr_mask)
++ pt =
++ (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
++ else
++ pt =
++ (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
++ *pt = data;
++ }
++
++ kvm_mmu_reset_context(vcpu);
++ return 0;
++}
++
++static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
++{
++ u64 mcg_cap = vcpu->arch.mcg_cap;
++ unsigned bank_num = mcg_cap & 0xff;
++
++ switch (msr) {
++ case MSR_IA32_MCG_STATUS:
++ vcpu->arch.mcg_status = data;
++ break;
++ case MSR_IA32_MCG_CTL:
++ if (!(mcg_cap & MCG_CTL_P))
++ return 1;
++ if (data != 0 && data != ~(u64)0)
++ return -1;
++ vcpu->arch.mcg_ctl = data;
++ break;
++ default:
++ if (msr >= MSR_IA32_MC0_CTL &&
++ msr < MSR_IA32_MCx_CTL(bank_num)) {
++ u32 offset = msr - MSR_IA32_MC0_CTL;
++ /* only 0 or all 1s can be written to IA32_MCi_CTL
++ * some Linux kernels though clear bit 10 in bank 4 to
++ * workaround a BIOS/GART TBL issue on AMD K8s, ignore
++ * this to avoid an uncatched #GP in the guest
++ */
++ if ((offset & 0x3) == 0 &&
++ data != 0 && (data | (1 << 10)) != ~(u64)0)
++ return -1;
++ vcpu->arch.mce_banks[offset] = data;
++ break;
++ }
++ return 1;
++ }
++ return 0;
++}
++
++static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
++{
++ struct kvm *kvm = vcpu->kvm;
++ int lm = is_long_mode(vcpu);
++ u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
++ : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
++ u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
++ : kvm->arch.xen_hvm_config.blob_size_32;
++ u32 page_num = data & ~PAGE_MASK;
++ u64 page_addr = data & PAGE_MASK;
++ u8 *page;
++ int r;
++
++ r = -E2BIG;
++ if (page_num >= blob_size)
++ goto out;
++ r = -ENOMEM;
++ page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
++ if (IS_ERR(page)) {
++ r = PTR_ERR(page);
++ goto out;
++ }
++ if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
++ goto out_free;
++ r = 0;
++out_free:
++ kfree(page);
++out:
++ return r;
++}
++
++static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
++{
++ return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
++}
++
++static bool kvm_hv_msr_partition_wide(u32 msr)
++{
++ bool r = false;
++ switch (msr) {
++ case HV_X64_MSR_GUEST_OS_ID:
++ case HV_X64_MSR_HYPERCALL:
++ case HV_X64_MSR_REFERENCE_TSC:
++ case HV_X64_MSR_TIME_REF_COUNT:
++ r = true;
++ break;
++ }
++
++ return r;
++}
++
++static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
++{
++ struct kvm *kvm = vcpu->kvm;
++
++ switch (msr) {
++ case HV_X64_MSR_GUEST_OS_ID:
++ kvm->arch.hv_guest_os_id = data;
++ /* setting guest os id to zero disables hypercall page */
++ if (!kvm->arch.hv_guest_os_id)
++ kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
++ break;
++ case HV_X64_MSR_HYPERCALL: {
++ u64 gfn;
++ unsigned long addr;
++ u8 instructions[4];
++
++ /* if guest os id is not set hypercall should remain disabled */
++ if (!kvm->arch.hv_guest_os_id)
++ break;
++ if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
++ kvm->arch.hv_hypercall = data;
++ break;
++ }
++ gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
++ addr = gfn_to_hva(kvm, gfn);
++ if (kvm_is_error_hva(addr))
++ return 1;
++ kvm_x86_ops->patch_hypercall(vcpu, instructions);
++ ((unsigned char *)instructions)[3] = 0xc3; /* ret */
++ if (__copy_to_user((void __user *)addr, instructions, 4))
++ return 1;
++ kvm->arch.hv_hypercall = data;
++ mark_page_dirty(kvm, gfn);
++ break;
++ }
++ case HV_X64_MSR_REFERENCE_TSC: {
++ u64 gfn;
++ HV_REFERENCE_TSC_PAGE tsc_ref;
++ memset(&tsc_ref, 0, sizeof(tsc_ref));
++ kvm->arch.hv_tsc_page = data;
++ if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
++ break;
++ gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
++ if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
++ &tsc_ref, sizeof(tsc_ref)))
++ return 1;
++ mark_page_dirty(kvm, gfn);
++ break;
++ }
++ default:
++ vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
++ "data 0x%llx\n", msr, data);
++ return 1;
++ }
++ return 0;
++}
++
++static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
++{
++ switch (msr) {
++ case HV_X64_MSR_APIC_ASSIST_PAGE: {
++ u64 gfn;
++ unsigned long addr;
++
++ if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
++ vcpu->arch.hv_vapic = data;
++ if (kvm_lapic_enable_pv_eoi(vcpu, 0))
++ return 1;
++ break;
++ }
++ gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
++ addr = gfn_to_hva(vcpu->kvm, gfn);
++ if (kvm_is_error_hva(addr))
++ return 1;
++ if (__clear_user((void __user *)addr, PAGE_SIZE))
++ return 1;
++ vcpu->arch.hv_vapic = data;
++ mark_page_dirty(vcpu->kvm, gfn);
++ if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
++ return 1;
++ break;
++ }
++ case HV_X64_MSR_EOI:
++ return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
++ case HV_X64_MSR_ICR:
++ return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
++ case HV_X64_MSR_TPR:
++ return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
++ default:
++ vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
++ "data 0x%llx\n", msr, data);
++ return 1;
++ }
++
++ return 0;
++}
++
++static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
++{
++ gpa_t gpa = data & ~0x3f;
++
++ /* Bits 2:5 are reserved, Should be zero */
++ if (data & 0x3c)
++ return 1;
++
++ vcpu->arch.apf.msr_val = data;
++
++ if (!(data & KVM_ASYNC_PF_ENABLED)) {
++ kvm_clear_async_pf_completion_queue(vcpu);
++ kvm_async_pf_hash_reset(vcpu);
++ return 0;
++ }
++
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
++ sizeof(u32)))
++ return 1;
++
++ vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
++ kvm_async_pf_wakeup_all(vcpu);
++ return 0;
++}
++
++static void kvmclock_reset(struct kvm_vcpu *vcpu)
++{
++ vcpu->arch.pv_time_enabled = false;
++}
++
++static void accumulate_steal_time(struct kvm_vcpu *vcpu)
++{
++ u64 delta;
++
++ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
++ return;
++
++ delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
++ vcpu->arch.st.last_steal = current->sched_info.run_delay;
++ vcpu->arch.st.accum_steal = delta;
++}
++
++static void record_steal_time(struct kvm_vcpu *vcpu)
++{
++ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
++ return;
++
++ if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
++ &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
++ return;
++
++ vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
++ vcpu->arch.st.steal.version += 2;
++ vcpu->arch.st.accum_steal = 0;
++
++ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
++ &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
++}
++
++int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
++{
++ bool pr = false;
++ u32 msr = msr_info->index;
++ u64 data = msr_info->data;
++
++ switch (msr) {
++ case MSR_AMD64_NB_CFG:
++ case MSR_IA32_UCODE_REV:
++ case MSR_IA32_UCODE_WRITE:
++ case MSR_VM_HSAVE_PA:
++ case MSR_AMD64_PATCH_LOADER:
++ case MSR_AMD64_BU_CFG2:
++ break;
++
++ case MSR_EFER:
++ return set_efer(vcpu, data);
++ case MSR_K7_HWCR:
++ data &= ~(u64)0x40; /* ignore flush filter disable */
++ data &= ~(u64)0x100; /* ignore ignne emulation enable */
++ data &= ~(u64)0x8; /* ignore TLB cache disable */
++ data &= ~(u64)0x40000; /* ignore Mc status write enable */
++ if (data != 0) {
++ vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
++ data);
++ return 1;
++ }
++ break;
++ case MSR_FAM10H_MMIO_CONF_BASE:
++ if (data != 0) {
++ vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
++ "0x%llx\n", data);
++ return 1;
++ }
++ break;
++ case MSR_IA32_DEBUGCTLMSR:
++ if (!data) {
++ /* We support the non-activated case already */
++ break;
++ } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
++ /* Values other than LBR and BTF are vendor-specific,
++ thus reserved and should throw a #GP */
++ return 1;
++ }
++ vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
++ __func__, data);
++ break;
++ case 0x200 ... 0x2ff:
++ return set_msr_mtrr(vcpu, msr, data);
++ case MSR_IA32_APICBASE:
++ return kvm_set_apic_base(vcpu, msr_info);
++ case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
++ return kvm_x2apic_msr_write(vcpu, msr, data);
++ case MSR_IA32_TSCDEADLINE:
++ kvm_set_lapic_tscdeadline_msr(vcpu, data);
++ break;
++ case MSR_IA32_TSC_ADJUST:
++ if (guest_cpuid_has_tsc_adjust(vcpu)) {
++ if (!msr_info->host_initiated) {
++ u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
++ kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
++ }
++ vcpu->arch.ia32_tsc_adjust_msr = data;
++ }
++ break;
++ case MSR_IA32_MISC_ENABLE:
++ vcpu->arch.ia32_misc_enable_msr = data;
++ break;
++ case MSR_KVM_WALL_CLOCK_NEW:
++ case MSR_KVM_WALL_CLOCK:
++ vcpu->kvm->arch.wall_clock = data;
++ kvm_write_wall_clock(vcpu->kvm, data);
++ break;
++ case MSR_KVM_SYSTEM_TIME_NEW:
++ case MSR_KVM_SYSTEM_TIME: {
++ u64 gpa_offset;
++ kvmclock_reset(vcpu);
++
++ vcpu->arch.time = data;
++ kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
++
++ /* we verify if the enable bit is set... */
++ if (!(data & 1))
++ break;
++
++ gpa_offset = data & ~(PAGE_MASK | 1);
++
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
++ &vcpu->arch.pv_time, data & ~1ULL,
++ sizeof(struct pvclock_vcpu_time_info)))
++ vcpu->arch.pv_time_enabled = false;
++ else
++ vcpu->arch.pv_time_enabled = true;
++
++ break;
++ }
++ case MSR_KVM_ASYNC_PF_EN:
++ if (kvm_pv_enable_async_pf(vcpu, data))
++ return 1;
++ break;
++ case MSR_KVM_STEAL_TIME:
++
++ if (unlikely(!sched_info_on()))
++ return 1;
++
++ if (data & KVM_STEAL_RESERVED_MASK)
++ return 1;
++
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
++ data & KVM_STEAL_VALID_BITS,
++ sizeof(struct kvm_steal_time)))
++ return 1;
++
++ vcpu->arch.st.msr_val = data;
++
++ if (!(data & KVM_MSR_ENABLED))
++ break;
++
++ vcpu->arch.st.last_steal = current->sched_info.run_delay;
++
++ preempt_disable();
++ accumulate_steal_time(vcpu);
++ preempt_enable();
++
++ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
++
++ break;
++ case MSR_KVM_PV_EOI_EN:
++ if (kvm_lapic_enable_pv_eoi(vcpu, data))
++ return 1;
++ break;
++
++ case MSR_IA32_MCG_CTL:
++ case MSR_IA32_MCG_STATUS:
++ case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
++ return set_msr_mce(vcpu, msr, data);
++
++ /* Performance counters are not protected by a CPUID bit,
++ * so we should check all of them in the generic path for the sake of
++ * cross vendor migration.
++ * Writing a zero into the event select MSRs disables them,
++ * which we perfectly emulate ;-). Any other value should be at least
++ * reported, some guests depend on them.
++ */
++ case MSR_K7_EVNTSEL0:
++ case MSR_K7_EVNTSEL1:
++ case MSR_K7_EVNTSEL2:
++ case MSR_K7_EVNTSEL3:
++ if (data != 0)
++ vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
++ "0x%x data 0x%llx\n", msr, data);
++ break;
++ /* at least RHEL 4 unconditionally writes to the perfctr registers,
++ * so we ignore writes to make it happy.
++ */
++ case MSR_K7_PERFCTR0:
++ case MSR_K7_PERFCTR1:
++ case MSR_K7_PERFCTR2:
++ case MSR_K7_PERFCTR3:
++ vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
++ "0x%x data 0x%llx\n", msr, data);
++ break;
++ case MSR_P6_PERFCTR0:
++ case MSR_P6_PERFCTR1:
++ pr = true;
++ case MSR_P6_EVNTSEL0:
++ case MSR_P6_EVNTSEL1:
++ if (kvm_pmu_msr(vcpu, msr))
++ return kvm_pmu_set_msr(vcpu, msr_info);
++
++ if (pr || data != 0)
++ vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
++ "0x%x data 0x%llx\n", msr, data);
++ break;
++ case MSR_K7_CLK_CTL:
++ /*
++ * Ignore all writes to this no longer documented MSR.
++ * Writes are only relevant for old K7 processors,
++ * all pre-dating SVM, but a recommended workaround from
++ * AMD for these chips. It is possible to specify the
++ * affected processor models on the command line, hence
++ * the need to ignore the workaround.
++ */
++ break;
++ case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
++ if (kvm_hv_msr_partition_wide(msr)) {
++ int r;
++ mutex_lock(&vcpu->kvm->lock);
++ r = set_msr_hyperv_pw(vcpu, msr, data);
++ mutex_unlock(&vcpu->kvm->lock);
++ return r;
++ } else
++ return set_msr_hyperv(vcpu, msr, data);
++ break;
++ case MSR_IA32_BBL_CR_CTL3:
++ /* Drop writes to this legacy MSR -- see rdmsr
++ * counterpart for further detail.
++ */
++ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
++ break;
++ case MSR_AMD64_OSVW_ID_LENGTH:
++ if (!guest_cpuid_has_osvw(vcpu))
++ return 1;
++ vcpu->arch.osvw.length = data;
++ break;
++ case MSR_AMD64_OSVW_STATUS:
++ if (!guest_cpuid_has_osvw(vcpu))
++ return 1;
++ vcpu->arch.osvw.status = data;
++ break;
++ default:
++ if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
++ return xen_hvm_config(vcpu, data);
++ if (kvm_pmu_msr(vcpu, msr))
++ return kvm_pmu_set_msr(vcpu, msr_info);
++ if (!ignore_msrs) {
++ vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
++ msr, data);
++ return 1;
++ } else {
++ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
++ msr, data);
++ break;
++ }
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_set_msr_common);
++
++
++/*
++ * Reads an msr value (of 'msr_index') into 'pdata'.
++ * Returns 0 on success, non-0 otherwise.
++ * Assumes vcpu_load() was already called.
++ */
++int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
++{
++ return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
++}
++
++static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
++{
++ u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
++
++ if (!msr_mtrr_valid(msr))
++ return 1;
++
++ if (msr == MSR_MTRRdefType)
++ *pdata = vcpu->arch.mtrr_state.def_type +
++ (vcpu->arch.mtrr_state.enabled << 10);
++ else if (msr == MSR_MTRRfix64K_00000)
++ *pdata = p[0];
++ else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
++ *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
++ else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
++ *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
++ else if (msr == MSR_IA32_CR_PAT)
++ *pdata = vcpu->arch.pat;
++ else { /* Variable MTRRs */
++ int idx, is_mtrr_mask;
++ u64 *pt;
++
++ idx = (msr - 0x200) / 2;
++ is_mtrr_mask = msr - 0x200 - 2 * idx;
++ if (!is_mtrr_mask)
++ pt =
++ (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
++ else
++ pt =
++ (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
++ *pdata = *pt;
++ }
++
++ return 0;
++}
++
++static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
++{
++ u64 data;
++ u64 mcg_cap = vcpu->arch.mcg_cap;
++ unsigned bank_num = mcg_cap & 0xff;
++
++ switch (msr) {
++ case MSR_IA32_P5_MC_ADDR:
++ case MSR_IA32_P5_MC_TYPE:
++ data = 0;
++ break;
++ case MSR_IA32_MCG_CAP:
++ data = vcpu->arch.mcg_cap;
++ break;
++ case MSR_IA32_MCG_CTL:
++ if (!(mcg_cap & MCG_CTL_P))
++ return 1;
++ data = vcpu->arch.mcg_ctl;
++ break;
++ case MSR_IA32_MCG_STATUS:
++ data = vcpu->arch.mcg_status;
++ break;
++ default:
++ if (msr >= MSR_IA32_MC0_CTL &&
++ msr < MSR_IA32_MCx_CTL(bank_num)) {
++ u32 offset = msr - MSR_IA32_MC0_CTL;
++ data = vcpu->arch.mce_banks[offset];
++ break;
++ }
++ return 1;
++ }
++ *pdata = data;
++ return 0;
++}
++
++static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
++{
++ u64 data = 0;
++ struct kvm *kvm = vcpu->kvm;
++
++ switch (msr) {
++ case HV_X64_MSR_GUEST_OS_ID:
++ data = kvm->arch.hv_guest_os_id;
++ break;
++ case HV_X64_MSR_HYPERCALL:
++ data = kvm->arch.hv_hypercall;
++ break;
++ case HV_X64_MSR_TIME_REF_COUNT: {
++ data =
++ div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
++ break;
++ }
++ case HV_X64_MSR_REFERENCE_TSC:
++ data = kvm->arch.hv_tsc_page;
++ break;
++ default:
++ vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
++ return 1;
++ }
++
++ *pdata = data;
++ return 0;
++}
++
++static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
++{
++ u64 data = 0;
++
++ switch (msr) {
++ case HV_X64_MSR_VP_INDEX: {
++ int r;
++ struct kvm_vcpu *v;
++ kvm_for_each_vcpu(r, v, vcpu->kvm) {
++ if (v == vcpu) {
++ data = r;
++ break;
++ }
++ }
++ break;
++ }
++ case HV_X64_MSR_EOI:
++ return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
++ case HV_X64_MSR_ICR:
++ return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
++ case HV_X64_MSR_TPR:
++ return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
++ case HV_X64_MSR_APIC_ASSIST_PAGE:
++ data = vcpu->arch.hv_vapic;
++ break;
++ default:
++ vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
++ return 1;
++ }
++ *pdata = data;
++ return 0;
++}
++
++int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
++{
++ u64 data;
++
++ switch (msr) {
++ case MSR_IA32_PLATFORM_ID:
++ case MSR_IA32_EBL_CR_POWERON:
++ case MSR_IA32_DEBUGCTLMSR:
++ case MSR_IA32_LASTBRANCHFROMIP:
++ case MSR_IA32_LASTBRANCHTOIP:
++ case MSR_IA32_LASTINTFROMIP:
++ case MSR_IA32_LASTINTTOIP:
++ case MSR_K8_SYSCFG:
++ case MSR_K7_HWCR:
++ case MSR_VM_HSAVE_PA:
++ case MSR_K7_EVNTSEL0:
++ case MSR_K7_EVNTSEL1:
++ case MSR_K7_EVNTSEL2:
++ case MSR_K7_EVNTSEL3:
++ case MSR_K7_PERFCTR0:
++ case MSR_K7_PERFCTR1:
++ case MSR_K7_PERFCTR2:
++ case MSR_K7_PERFCTR3:
++ case MSR_K8_INT_PENDING_MSG:
++ case MSR_AMD64_NB_CFG:
++ case MSR_FAM10H_MMIO_CONF_BASE:
++ case MSR_AMD64_BU_CFG2:
++ data = 0;
++ break;
++ case MSR_P6_PERFCTR0:
++ case MSR_P6_PERFCTR1:
++ case MSR_P6_EVNTSEL0:
++ case MSR_P6_EVNTSEL1:
++ if (kvm_pmu_msr(vcpu, msr))
++ return kvm_pmu_get_msr(vcpu, msr, pdata);
++ data = 0;
++ break;
++ case MSR_IA32_UCODE_REV:
++ data = 0x100000000ULL;
++ break;
++ case MSR_MTRRcap:
++ data = 0x500 | KVM_NR_VAR_MTRR;
++ break;
++ case 0x200 ... 0x2ff:
++ return get_msr_mtrr(vcpu, msr, pdata);
++ case 0xcd: /* fsb frequency */
++ data = 3;
++ break;
++ /*
++ * MSR_EBC_FREQUENCY_ID
++ * Conservative value valid for even the basic CPU models.
++ * Models 0,1: 000 in bits 23:21 indicating a bus speed of
++ * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
++ * and 266MHz for model 3, or 4. Set Core Clock
++ * Frequency to System Bus Frequency Ratio to 1 (bits
++ * 31:24) even though these are only valid for CPU
++ * models > 2, however guests may end up dividing or
++ * multiplying by zero otherwise.
++ */
++ case MSR_EBC_FREQUENCY_ID:
++ data = 1 << 24;
++ break;
++ case MSR_IA32_APICBASE:
++ data = kvm_get_apic_base(vcpu);
++ break;
++ case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
++ return kvm_x2apic_msr_read(vcpu, msr, pdata);
++ break;
++ case MSR_IA32_TSCDEADLINE:
++ data = kvm_get_lapic_tscdeadline_msr(vcpu);
++ break;
++ case MSR_IA32_TSC_ADJUST:
++ data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
++ break;
++ case MSR_IA32_MISC_ENABLE:
++ data = vcpu->arch.ia32_misc_enable_msr;
++ break;
++ case MSR_IA32_PERF_STATUS:
++ /* TSC increment by tick */
++ data = 1000ULL;
++ /* CPU multiplier */
++ data |= (((uint64_t)4ULL) << 40);
++ break;
++ case MSR_EFER:
++ data = vcpu->arch.efer;
++ break;
++ case MSR_KVM_WALL_CLOCK:
++ case MSR_KVM_WALL_CLOCK_NEW:
++ data = vcpu->kvm->arch.wall_clock;
++ break;
++ case MSR_KVM_SYSTEM_TIME:
++ case MSR_KVM_SYSTEM_TIME_NEW:
++ data = vcpu->arch.time;
++ break;
++ case MSR_KVM_ASYNC_PF_EN:
++ data = vcpu->arch.apf.msr_val;
++ break;
++ case MSR_KVM_STEAL_TIME:
++ data = vcpu->arch.st.msr_val;
++ break;
++ case MSR_KVM_PV_EOI_EN:
++ data = vcpu->arch.pv_eoi.msr_val;
++ break;
++ case MSR_IA32_P5_MC_ADDR:
++ case MSR_IA32_P5_MC_TYPE:
++ case MSR_IA32_MCG_CAP:
++ case MSR_IA32_MCG_CTL:
++ case MSR_IA32_MCG_STATUS:
++ case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
++ return get_msr_mce(vcpu, msr, pdata);
++ case MSR_K7_CLK_CTL:
++ /*
++ * Provide expected ramp-up count for K7. All other
++ * are set to zero, indicating minimum divisors for
++ * every field.
++ *
++ * This prevents guest kernels on AMD host with CPU
++ * type 6, model 8 and higher from exploding due to
++ * the rdmsr failing.
++ */
++ data = 0x20000000;
++ break;
++ case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
++ if (kvm_hv_msr_partition_wide(msr)) {
++ int r;
++ mutex_lock(&vcpu->kvm->lock);
++ r = get_msr_hyperv_pw(vcpu, msr, pdata);
++ mutex_unlock(&vcpu->kvm->lock);
++ return r;
++ } else
++ return get_msr_hyperv(vcpu, msr, pdata);
++ break;
++ case MSR_IA32_BBL_CR_CTL3:
++ /* This legacy MSR exists but isn't fully documented in current
++ * silicon. It is however accessed by winxp in very narrow
++ * scenarios where it sets bit #19, itself documented as
++ * a "reserved" bit. Best effort attempt to source coherent
++ * read data here should the balance of the register be
++ * interpreted by the guest:
++ *
++ * L2 cache control register 3: 64GB range, 256KB size,
++ * enabled, latency 0x1, configured
++ */
++ data = 0xbe702111;
++ break;
++ case MSR_AMD64_OSVW_ID_LENGTH:
++ if (!guest_cpuid_has_osvw(vcpu))
++ return 1;
++ data = vcpu->arch.osvw.length;
++ break;
++ case MSR_AMD64_OSVW_STATUS:
++ if (!guest_cpuid_has_osvw(vcpu))
++ return 1;
++ data = vcpu->arch.osvw.status;
++ break;
++ default:
++ if (kvm_pmu_msr(vcpu, msr))
++ return kvm_pmu_get_msr(vcpu, msr, pdata);
++ if (!ignore_msrs) {
++ vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
++ return 1;
++ } else {
++ vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
++ data = 0;
++ }
++ break;
++ }
++ *pdata = data;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_get_msr_common);
++
++/*
++ * Read or write a bunch of msrs. All parameters are kernel addresses.
++ *
++ * @return number of msrs set successfully.
++ */
++static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
++ struct kvm_msr_entry *entries,
++ int (*do_msr)(struct kvm_vcpu *vcpu,
++ unsigned index, u64 *data))
++{
++ int i, idx;
++
++ idx = srcu_read_lock(&vcpu->kvm->srcu);
++ for (i = 0; i < msrs->nmsrs; ++i)
++ if (do_msr(vcpu, entries[i].index, &entries[i].data))
++ break;
++ srcu_read_unlock(&vcpu->kvm->srcu, idx);
++
++ return i;
++}
++
++/*
++ * Read or write a bunch of msrs. Parameters are user addresses.
++ *
++ * @return number of msrs set successfully.
++ */
++static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
++ int (*do_msr)(struct kvm_vcpu *vcpu,
++ unsigned index, u64 *data),
++ int writeback)
++{
++ struct kvm_msrs msrs;
++ struct kvm_msr_entry *entries;
++ int r, n;
++ unsigned size;
++
++ r = -EFAULT;
++ if (copy_from_user(&msrs, user_msrs, sizeof msrs))
++ goto out;
++
++ r = -E2BIG;
++ if (msrs.nmsrs >= MAX_IO_MSRS)
++ goto out;
++
++ size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
++ entries = memdup_user(user_msrs->entries, size);
++ if (IS_ERR(entries)) {
++ r = PTR_ERR(entries);
++ goto out;
++ }
++
++ r = n = __msr_io(vcpu, &msrs, entries, do_msr);
++ if (r < 0)
++ goto out_free;
++
++ r = -EFAULT;
++ if (writeback && copy_to_user(user_msrs->entries, entries, size))
++ goto out_free;
++
++ r = n;
++
++out_free:
++ kfree(entries);
++out:
++ return r;
++}
++
++int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
++{
++ int r;
++
++ switch (ext) {
++ case KVM_CAP_IRQCHIP:
++ case KVM_CAP_HLT:
++ case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
++ case KVM_CAP_SET_TSS_ADDR:
++ case KVM_CAP_EXT_CPUID:
++ case KVM_CAP_EXT_EMUL_CPUID:
++ case KVM_CAP_CLOCKSOURCE:
++ case KVM_CAP_PIT:
++ case KVM_CAP_NOP_IO_DELAY:
++ case KVM_CAP_MP_STATE:
++ case KVM_CAP_SYNC_MMU:
++ case KVM_CAP_USER_NMI:
++ case KVM_CAP_REINJECT_CONTROL:
++ case KVM_CAP_IRQ_INJECT_STATUS:
++ case KVM_CAP_IRQFD:
++ case KVM_CAP_IOEVENTFD:
++ case KVM_CAP_IOEVENTFD_NO_LENGTH:
++ case KVM_CAP_PIT2:
++ case KVM_CAP_PIT_STATE2:
++ case KVM_CAP_SET_IDENTITY_MAP_ADDR:
++ case KVM_CAP_XEN_HVM:
++ case KVM_CAP_ADJUST_CLOCK:
++ case KVM_CAP_VCPU_EVENTS:
++ case KVM_CAP_HYPERV:
++ case KVM_CAP_HYPERV_VAPIC:
++ case KVM_CAP_HYPERV_SPIN:
++ case KVM_CAP_PCI_SEGMENT:
++ case KVM_CAP_DEBUGREGS:
++ case KVM_CAP_X86_ROBUST_SINGLESTEP:
++ case KVM_CAP_XSAVE:
++ case KVM_CAP_ASYNC_PF:
++ case KVM_CAP_GET_TSC_KHZ:
++ case KVM_CAP_KVMCLOCK_CTRL:
++ case KVM_CAP_READONLY_MEM:
++ case KVM_CAP_HYPERV_TIME:
++ case KVM_CAP_IOAPIC_POLARITY_IGNORED:
++#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
++ case KVM_CAP_ASSIGN_DEV_IRQ:
++ case KVM_CAP_PCI_2_3:
++#endif
++ r = 1;
++ break;
++ case KVM_CAP_COALESCED_MMIO:
++ r = KVM_COALESCED_MMIO_PAGE_OFFSET;
++ break;
++ case KVM_CAP_VAPIC:
++ r = !kvm_x86_ops->cpu_has_accelerated_tpr();
++ break;
++ case KVM_CAP_NR_VCPUS:
++ r = KVM_SOFT_MAX_VCPUS;
++ break;
++ case KVM_CAP_MAX_VCPUS:
++ r = KVM_MAX_VCPUS;
++ break;
++ case KVM_CAP_NR_MEMSLOTS:
++ r = KVM_USER_MEM_SLOTS;
++ break;
++ case KVM_CAP_PV_MMU: /* obsolete */
++ r = 0;
++ break;
++#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
++ case KVM_CAP_IOMMU:
++ r = iommu_present(&pci_bus_type);
++ break;
++#endif
++ case KVM_CAP_MCE:
++ r = KVM_MAX_MCE_BANKS;
++ break;
++ case KVM_CAP_XCRS:
++ r = cpu_has_xsave;
++ break;
++ case KVM_CAP_TSC_CONTROL:
++ r = kvm_has_tsc_control;
++ break;
++ case KVM_CAP_TSC_DEADLINE_TIMER:
++ r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
++ break;
++ default:
++ r = 0;
++ break;
++ }
++ return r;
++
++}
++
++long kvm_arch_dev_ioctl(struct file *filp,
++ unsigned int ioctl, unsigned long arg)
++{
++ void __user *argp = (void __user *)arg;
++ long r;
++
++ switch (ioctl) {
++ case KVM_GET_MSR_INDEX_LIST: {
++ struct kvm_msr_list __user *user_msr_list = argp;
++ struct kvm_msr_list msr_list;
++ unsigned n;
++
++ r = -EFAULT;
++ if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
++ goto out;
++ n = msr_list.nmsrs;
++ msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
++ if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
++ goto out;
++ r = -E2BIG;
++ if (n < msr_list.nmsrs)
++ goto out;
++ r = -EFAULT;
++ if (copy_to_user(user_msr_list->indices, &msrs_to_save,
++ num_msrs_to_save * sizeof(u32)))
++ goto out;
++ if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
++ &emulated_msrs,
++ ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
++ goto out;
++ r = 0;
++ break;
++ }
++ case KVM_GET_SUPPORTED_CPUID:
++ case KVM_GET_EMULATED_CPUID: {
++ struct kvm_cpuid2 __user *cpuid_arg = argp;
++ struct kvm_cpuid2 cpuid;
++
++ r = -EFAULT;
++ if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
++ goto out;
++
++ r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
++ ioctl);
++ if (r)
++ goto out;
++
++ r = -EFAULT;
++ if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
++ goto out;
++ r = 0;
++ break;
++ }
++ case KVM_X86_GET_MCE_CAP_SUPPORTED: {
++ u64 mce_cap;
++
++ mce_cap = KVM_MCE_CAP_SUPPORTED;
++ r = -EFAULT;
++ if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
++ goto out;
++ r = 0;
++ break;
++ }
++ default:
++ r = -EINVAL;
++ }
++out:
++ return r;
++}
++
++static void wbinvd_ipi(void *garbage)
++{
++ wbinvd();
++}
++
++static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
++{
++ return kvm_arch_has_noncoherent_dma(vcpu->kvm);
++}
++
++void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
++{
++ /* Address WBINVD may be executed by guest */
++ if (need_emulate_wbinvd(vcpu)) {
++ if (kvm_x86_ops->has_wbinvd_exit())
++ cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
++ else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
++ smp_call_function_single(vcpu->cpu,
++ wbinvd_ipi, NULL, 1);
++ }
++
++ kvm_x86_ops->vcpu_load(vcpu, cpu);
++
++ /* Apply any externally detected TSC adjustments (due to suspend) */
++ if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
++ adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
++ vcpu->arch.tsc_offset_adjustment = 0;
++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
++ }
++
++ if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
++ s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
++ native_read_tsc() - vcpu->arch.last_host_tsc;
++ if (tsc_delta < 0)
++ mark_tsc_unstable("KVM discovered backwards TSC");
++ if (check_tsc_unstable()) {
++ u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
++ vcpu->arch.last_guest_tsc);
++ kvm_x86_ops->write_tsc_offset(vcpu, offset);
++ vcpu->arch.tsc_catchup = 1;
++ }
++ /*
++ * On a host with synchronized TSC, there is no need to update
++ * kvmclock on vcpu->cpu migration
++ */
++ if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
++ kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
++ if (vcpu->cpu != cpu)
++ kvm_migrate_timers(vcpu);
++ vcpu->cpu = cpu;
++ }
++
++ accumulate_steal_time(vcpu);
++ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
++}
++
++void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
++{
++ kvm_x86_ops->vcpu_put(vcpu);
++ kvm_put_guest_fpu(vcpu);
++ vcpu->arch.last_host_tsc = native_read_tsc();
++}
++
++static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
++ struct kvm_lapic_state *s)
++{
++ kvm_x86_ops->sync_pir_to_irr(vcpu);
++ memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
++
++ return 0;
++}
++
++static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
++ struct kvm_lapic_state *s)
++{
++ kvm_apic_post_state_restore(vcpu, s);
++ update_cr8_intercept(vcpu);
++
++ return 0;
++}
++
++static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
++ struct kvm_interrupt *irq)
++{
++ if (irq->irq >= KVM_NR_INTERRUPTS)
++ return -EINVAL;
++ if (irqchip_in_kernel(vcpu->kvm))
++ return -ENXIO;
++
++ kvm_queue_interrupt(vcpu, irq->irq, false);
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++
++ return 0;
++}
++
++static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
++{
++ kvm_inject_nmi(vcpu);
++
++ return 0;
++}
++
++static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
++ struct kvm_tpr_access_ctl *tac)
++{
++ if (tac->flags)
++ return -EINVAL;
++ vcpu->arch.tpr_access_reporting = !!tac->enabled;
++ return 0;
++}
++
++static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
++ u64 mcg_cap)
++{
++ int r;
++ unsigned bank_num = mcg_cap & 0xff, bank;
++
++ r = -EINVAL;
++ if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
++ goto out;
++ if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
++ goto out;
++ r = 0;
++ vcpu->arch.mcg_cap = mcg_cap;
++ /* Init IA32_MCG_CTL to all 1s */
++ if (mcg_cap & MCG_CTL_P)
++ vcpu->arch.mcg_ctl = ~(u64)0;
++ /* Init IA32_MCi_CTL to all 1s */
++ for (bank = 0; bank < bank_num; bank++)
++ vcpu->arch.mce_banks[bank*4] = ~(u64)0;
++out:
++ return r;
++}
++
++static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
++ struct kvm_x86_mce *mce)
++{
++ u64 mcg_cap = vcpu->arch.mcg_cap;
++ unsigned bank_num = mcg_cap & 0xff;
++ u64 *banks = vcpu->arch.mce_banks;
++
++ if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
++ return -EINVAL;
++ /*
++ * if IA32_MCG_CTL is not all 1s, the uncorrected error
++ * reporting is disabled
++ */
++ if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
++ vcpu->arch.mcg_ctl != ~(u64)0)
++ return 0;
++ banks += 4 * mce->bank;
++ /*
++ * if IA32_MCi_CTL is not all 1s, the uncorrected error
++ * reporting is disabled for the bank
++ */
++ if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
++ return 0;
++ if (mce->status & MCI_STATUS_UC) {
++ if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
++ !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
++ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
++ return 0;
++ }
++ if (banks[1] & MCI_STATUS_VAL)
++ mce->status |= MCI_STATUS_OVER;
++ banks[2] = mce->addr;
++ banks[3] = mce->misc;
++ vcpu->arch.mcg_status = mce->mcg_status;
++ banks[1] = mce->status;
++ kvm_queue_exception(vcpu, MC_VECTOR);
++ } else if (!(banks[1] & MCI_STATUS_VAL)
++ || !(banks[1] & MCI_STATUS_UC)) {
++ if (banks[1] & MCI_STATUS_VAL)
++ mce->status |= MCI_STATUS_OVER;
++ banks[2] = mce->addr;
++ banks[3] = mce->misc;
++ banks[1] = mce->status;
++ } else
++ banks[1] |= MCI_STATUS_OVER;
++ return 0;
++}
++
++static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
++ struct kvm_vcpu_events *events)
++{
++ process_nmi(vcpu);
++ events->exception.injected =
++ vcpu->arch.exception.pending &&
++ !kvm_exception_is_soft(vcpu->arch.exception.nr);
++ events->exception.nr = vcpu->arch.exception.nr;
++ events->exception.has_error_code = vcpu->arch.exception.has_error_code;
++ events->exception.pad = 0;
++ events->exception.error_code = vcpu->arch.exception.error_code;
++
++ events->interrupt.injected =
++ vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
++ events->interrupt.nr = vcpu->arch.interrupt.nr;
++ events->interrupt.soft = 0;
++ events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
++
++ events->nmi.injected = vcpu->arch.nmi_injected;
++ events->nmi.pending = vcpu->arch.nmi_pending != 0;
++ events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
++ events->nmi.pad = 0;
++
++ events->sipi_vector = 0; /* never valid when reporting to user space */
++
++ events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
++ | KVM_VCPUEVENT_VALID_SHADOW);
++ memset(&events->reserved, 0, sizeof(events->reserved));
++}
++
++static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
++ struct kvm_vcpu_events *events)
++{
++ if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
++ | KVM_VCPUEVENT_VALID_SIPI_VECTOR
++ | KVM_VCPUEVENT_VALID_SHADOW))
++ return -EINVAL;
++
++ process_nmi(vcpu);
++ vcpu->arch.exception.pending = events->exception.injected;
++ vcpu->arch.exception.nr = events->exception.nr;
++ vcpu->arch.exception.has_error_code = events->exception.has_error_code;
++ vcpu->arch.exception.error_code = events->exception.error_code;
++
++ vcpu->arch.interrupt.pending = events->interrupt.injected;
++ vcpu->arch.interrupt.nr = events->interrupt.nr;
++ vcpu->arch.interrupt.soft = events->interrupt.soft;
++ if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
++ kvm_x86_ops->set_interrupt_shadow(vcpu,
++ events->interrupt.shadow);
++
++ vcpu->arch.nmi_injected = events->nmi.injected;
++ if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
++ vcpu->arch.nmi_pending = events->nmi.pending;
++ kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
++
++ if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
++ kvm_vcpu_has_lapic(vcpu))
++ vcpu->arch.apic->sipi_vector = events->sipi_vector;
++
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++
++ return 0;
++}
++
++static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
++ struct kvm_debugregs *dbgregs)
++{
++ unsigned long val;
++
++ memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
++ _kvm_get_dr(vcpu, 6, &val);
++ dbgregs->dr6 = val;
++ dbgregs->dr7 = vcpu->arch.dr7;
++ dbgregs->flags = 0;
++ memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
++}
++
++static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
++ struct kvm_debugregs *dbgregs)
++{
++ if (dbgregs->flags)
++ return -EINVAL;
++
++ memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
++ vcpu->arch.dr6 = dbgregs->dr6;
++ kvm_update_dr6(vcpu);
++ vcpu->arch.dr7 = dbgregs->dr7;
++ kvm_update_dr7(vcpu);
++
++ return 0;
++}
++
++#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
++
++static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
++{
++ struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
++ u64 xstate_bv = xsave->xsave_hdr.xstate_bv;
++ u64 valid;
++
++ /*
++ * Copy legacy XSAVE area, to avoid complications with CPUID
++ * leaves 0 and 1 in the loop below.
++ */
++ memcpy(dest, xsave, XSAVE_HDR_OFFSET);
++
++ /* Set XSTATE_BV */
++ *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
++
++ /*
++ * Copy each region from the possibly compacted offset to the
++ * non-compacted offset.
++ */
++ valid = xstate_bv & ~XSTATE_FPSSE;
++ while (valid) {
++ u64 feature = valid & -valid;
++ int index = fls64(feature) - 1;
++ void *src = get_xsave_addr(xsave, feature);
++
++ if (src) {
++ u32 size, offset, ecx, edx;
++ cpuid_count(XSTATE_CPUID, index,
++ &size, &offset, &ecx, &edx);
++ memcpy(dest + offset, src, size);
++ }
++
++ valid -= feature;
++ }
++}
++
++static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
++{
++ struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
++ u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
++ u64 valid;
++
++ /*
++ * Copy legacy XSAVE area, to avoid complications with CPUID
++ * leaves 0 and 1 in the loop below.
++ */
++ memcpy(xsave, src, XSAVE_HDR_OFFSET);
++
++ /* Set XSTATE_BV and possibly XCOMP_BV. */
++ xsave->xsave_hdr.xstate_bv = xstate_bv;
++ if (cpu_has_xsaves)
++ xsave->xsave_hdr.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
++
++ /*
++ * Copy each region from the non-compacted offset to the
++ * possibly compacted offset.
++ */
++ valid = xstate_bv & ~XSTATE_FPSSE;
++ while (valid) {
++ u64 feature = valid & -valid;
++ int index = fls64(feature) - 1;
++ void *dest = get_xsave_addr(xsave, feature);
++
++ if (dest) {
++ u32 size, offset, ecx, edx;
++ cpuid_count(XSTATE_CPUID, index,
++ &size, &offset, &ecx, &edx);
++ memcpy(dest, src + offset, size);
++ } else
++ WARN_ON_ONCE(1);
++
++ valid -= feature;
++ }
++}
++
++static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
++ struct kvm_xsave *guest_xsave)
++{
++ if (cpu_has_xsave) {
++ memset(guest_xsave, 0, sizeof(struct kvm_xsave));
++ fill_xsave((u8 *) guest_xsave->region, vcpu);
++ } else {
++ memcpy(guest_xsave->region,
++ &vcpu->arch.guest_fpu.state->fxsave,
++ sizeof(struct i387_fxsave_struct));
++ *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
++ XSTATE_FPSSE;
++ }
++}
++
++static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
++ struct kvm_xsave *guest_xsave)
++{
++ u64 xstate_bv =
++ *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
++
++ if (cpu_has_xsave) {
++ /*
++ * Here we allow setting states that are not present in
++ * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
++ * with old userspace.
++ */
++ if (xstate_bv & ~kvm_supported_xcr0())
++ return -EINVAL;
++ load_xsave(vcpu, (u8 *)guest_xsave->region);
++ } else {
++ if (xstate_bv & ~XSTATE_FPSSE)
++ return -EINVAL;
++ memcpy(&vcpu->arch.guest_fpu.state->fxsave,
++ guest_xsave->region, sizeof(struct i387_fxsave_struct));
++ }
++ return 0;
++}
++
++static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
++ struct kvm_xcrs *guest_xcrs)
++{
++ if (!cpu_has_xsave) {
++ guest_xcrs->nr_xcrs = 0;
++ return;
++ }
++
++ guest_xcrs->nr_xcrs = 1;
++ guest_xcrs->flags = 0;
++ guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
++ guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
++}
++
++static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
++ struct kvm_xcrs *guest_xcrs)
++{
++ int i, r = 0;
++
++ if (!cpu_has_xsave)
++ return -EINVAL;
++
++ if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
++ return -EINVAL;
++
++ for (i = 0; i < guest_xcrs->nr_xcrs; i++)
++ /* Only support XCR0 currently */
++ if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
++ r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
++ guest_xcrs->xcrs[i].value);
++ break;
++ }
++ if (r)
++ r = -EINVAL;
++ return r;
++}
++
++/*
++ * kvm_set_guest_paused() indicates to the guest kernel that it has been
++ * stopped by the hypervisor. This function will be called from the host only.
++ * EINVAL is returned when the host attempts to set the flag for a guest that
++ * does not support pv clocks.
++ */
++static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
++{
++ if (!vcpu->arch.pv_time_enabled)
++ return -EINVAL;
++ vcpu->arch.pvclock_set_guest_stopped_request = true;
++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
++ return 0;
++}
++
++long kvm_arch_vcpu_ioctl(struct file *filp,
++ unsigned int ioctl, unsigned long arg)
++{
++ struct kvm_vcpu *vcpu = filp->private_data;
++ void __user *argp = (void __user *)arg;
++ int r;
++ union {
++ struct kvm_lapic_state *lapic;
++ struct kvm_xsave *xsave;
++ struct kvm_xcrs *xcrs;
++ void *buffer;
++ } u;
++
++ u.buffer = NULL;
++ switch (ioctl) {
++ case KVM_GET_LAPIC: {
++ r = -EINVAL;
++ if (!vcpu->arch.apic)
++ goto out;
++ u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
++
++ r = -ENOMEM;
++ if (!u.lapic)
++ goto out;
++ r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
++ if (r)
++ goto out;
++ r = -EFAULT;
++ if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
++ goto out;
++ r = 0;
++ break;
++ }
++ case KVM_SET_LAPIC: {
++ r = -EINVAL;
++ if (!vcpu->arch.apic)
++ goto out;
++ u.lapic = memdup_user(argp, sizeof(*u.lapic));
++ if (IS_ERR(u.lapic))
++ return PTR_ERR(u.lapic);
++
++ r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
++ break;
++ }
++ case KVM_INTERRUPT: {
++ struct kvm_interrupt irq;
++
++ r = -EFAULT;
++ if (copy_from_user(&irq, argp, sizeof irq))
++ goto out;
++ r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
++ break;
++ }
++ case KVM_NMI: {
++ r = kvm_vcpu_ioctl_nmi(vcpu);
++ break;
++ }
++ case KVM_SET_CPUID: {
++ struct kvm_cpuid __user *cpuid_arg = argp;
++ struct kvm_cpuid cpuid;
++
++ r = -EFAULT;
++ if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
++ goto out;
++ r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
++ break;
++ }
++ case KVM_SET_CPUID2: {
++ struct kvm_cpuid2 __user *cpuid_arg = argp;
++ struct kvm_cpuid2 cpuid;
++
++ r = -EFAULT;
++ if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
++ goto out;
++ r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
++ cpuid_arg->entries);
++ break;
++ }
++ case KVM_GET_CPUID2: {
++ struct kvm_cpuid2 __user *cpuid_arg = argp;
++ struct kvm_cpuid2 cpuid;
++
++ r = -EFAULT;
++ if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
++ goto out;
++ r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
++ cpuid_arg->entries);
++ if (r)
++ goto out;
++ r = -EFAULT;
++ if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
++ goto out;
++ r = 0;
++ break;
++ }
++ case KVM_GET_MSRS:
++ r = msr_io(vcpu, argp, kvm_get_msr, 1);
++ break;
++ case KVM_SET_MSRS:
++ r = msr_io(vcpu, argp, do_set_msr, 0);
++ break;
++ case KVM_TPR_ACCESS_REPORTING: {
++ struct kvm_tpr_access_ctl tac;
++
++ r = -EFAULT;
++ if (copy_from_user(&tac, argp, sizeof tac))
++ goto out;
++ r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
++ if (r)
++ goto out;
++ r = -EFAULT;
++ if (copy_to_user(argp, &tac, sizeof tac))
++ goto out;
++ r = 0;
++ break;
++ };
++ case KVM_SET_VAPIC_ADDR: {
++ struct kvm_vapic_addr va;
++
++ r = -EINVAL;
++ if (!irqchip_in_kernel(vcpu->kvm))
++ goto out;
++ r = -EFAULT;
++ if (copy_from_user(&va, argp, sizeof va))
++ goto out;
++ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
++ break;
++ }
++ case KVM_X86_SETUP_MCE: {
++ u64 mcg_cap;
++
++ r = -EFAULT;
++ if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
++ goto out;
++ r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
++ break;
++ }
++ case KVM_X86_SET_MCE: {
++ struct kvm_x86_mce mce;
++
++ r = -EFAULT;
++ if (copy_from_user(&mce, argp, sizeof mce))
++ goto out;
++ r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
++ break;
++ }
++ case KVM_GET_VCPU_EVENTS: {
++ struct kvm_vcpu_events events;
++
++ kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
++
++ r = -EFAULT;
++ if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
++ break;
++ r = 0;
++ break;
++ }
++ case KVM_SET_VCPU_EVENTS: {
++ struct kvm_vcpu_events events;
++
++ r = -EFAULT;
++ if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
++ break;
++
++ r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
++ break;
++ }
++ case KVM_GET_DEBUGREGS: {
++ struct kvm_debugregs dbgregs;
++
++ kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
++
++ r = -EFAULT;
++ if (copy_to_user(argp, &dbgregs,
++ sizeof(struct kvm_debugregs)))
++ break;
++ r = 0;
++ break;
++ }
++ case KVM_SET_DEBUGREGS: {
++ struct kvm_debugregs dbgregs;
++
++ r = -EFAULT;
++ if (copy_from_user(&dbgregs, argp,
++ sizeof(struct kvm_debugregs)))
++ break;
++
++ r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
++ break;
++ }
++ case KVM_GET_XSAVE: {
++ u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
++ r = -ENOMEM;
++ if (!u.xsave)
++ break;
++
++ kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
++
++ r = -EFAULT;
++ if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
++ break;
++ r = 0;
++ break;
++ }
++ case KVM_SET_XSAVE: {
++ u.xsave = memdup_user(argp, sizeof(*u.xsave));
++ if (IS_ERR(u.xsave))
++ return PTR_ERR(u.xsave);
++
++ r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
++ break;
++ }
++ case KVM_GET_XCRS: {
++ u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
++ r = -ENOMEM;
++ if (!u.xcrs)
++ break;
++
++ kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
++
++ r = -EFAULT;
++ if (copy_to_user(argp, u.xcrs,
++ sizeof(struct kvm_xcrs)))
++ break;
++ r = 0;
++ break;
++ }
++ case KVM_SET_XCRS: {
++ u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
++ if (IS_ERR(u.xcrs))
++ return PTR_ERR(u.xcrs);
++
++ r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
++ break;
++ }
++ case KVM_SET_TSC_KHZ: {
++ u32 user_tsc_khz;
++
++ r = -EINVAL;
++ user_tsc_khz = (u32)arg;
++
++ if (user_tsc_khz >= kvm_max_guest_tsc_khz)
++ goto out;
++
++ if (user_tsc_khz == 0)
++ user_tsc_khz = tsc_khz;
++
++ kvm_set_tsc_khz(vcpu, user_tsc_khz);
++
++ r = 0;
++ goto out;
++ }
++ case KVM_GET_TSC_KHZ: {
++ r = vcpu->arch.virtual_tsc_khz;
++ goto out;
++ }
++ case KVM_KVMCLOCK_CTRL: {
++ r = kvm_set_guest_paused(vcpu);
++ goto out;
++ }
++ default:
++ r = -EINVAL;
++ }
++out:
++ kfree(u.buffer);
++ return r;
++}
++
++int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
++{
++ return VM_FAULT_SIGBUS;
++}
++
++static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
++{
++ int ret;
++
++ if (addr > (unsigned int)(-3 * PAGE_SIZE))
++ return -EINVAL;
++ ret = kvm_x86_ops->set_tss_addr(kvm, addr);
++ return ret;
++}
++
++static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
++ u64 ident_addr)
++{
++ kvm->arch.ept_identity_map_addr = ident_addr;
++ return 0;
++}
++
++static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
++ u32 kvm_nr_mmu_pages)
++{
++ if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
++ return -EINVAL;
++
++ mutex_lock(&kvm->slots_lock);
++
++ kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
++ kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
++
++ mutex_unlock(&kvm->slots_lock);
++ return 0;
++}
++
++static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
++{
++ return kvm->arch.n_max_mmu_pages;
++}
++
++static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
++{
++ int r;
++
++ r = 0;
++ switch (chip->chip_id) {
++ case KVM_IRQCHIP_PIC_MASTER:
++ memcpy(&chip->chip.pic,
++ &pic_irqchip(kvm)->pics[0],
++ sizeof(struct kvm_pic_state));
++ break;
++ case KVM_IRQCHIP_PIC_SLAVE:
++ memcpy(&chip->chip.pic,
++ &pic_irqchip(kvm)->pics[1],
++ sizeof(struct kvm_pic_state));
++ break;
++ case KVM_IRQCHIP_IOAPIC:
++ r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
++ break;
++ default:
++ r = -EINVAL;
++ break;
++ }
++ return r;
++}
++
++static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
++{
++ int r;
++
++ r = 0;
++ switch (chip->chip_id) {
++ case KVM_IRQCHIP_PIC_MASTER:
++ spin_lock(&pic_irqchip(kvm)->lock);
++ memcpy(&pic_irqchip(kvm)->pics[0],
++ &chip->chip.pic,
++ sizeof(struct kvm_pic_state));
++ spin_unlock(&pic_irqchip(kvm)->lock);
++ break;
++ case KVM_IRQCHIP_PIC_SLAVE:
++ spin_lock(&pic_irqchip(kvm)->lock);
++ memcpy(&pic_irqchip(kvm)->pics[1],
++ &chip->chip.pic,
++ sizeof(struct kvm_pic_state));
++ spin_unlock(&pic_irqchip(kvm)->lock);
++ break;
++ case KVM_IRQCHIP_IOAPIC:
++ r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
++ break;
++ default:
++ r = -EINVAL;
++ break;
++ }
++ kvm_pic_update_irq(pic_irqchip(kvm));
++ return r;
++}
++
++static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
++{
++ int r = 0;
++
++ mutex_lock(&kvm->arch.vpit->pit_state.lock);
++ memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
++ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
++ return r;
++}
++
++static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
++{
++ int r = 0;
++
++ mutex_lock(&kvm->arch.vpit->pit_state.lock);
++ memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
++ kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
++ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
++ return r;
++}
++
++static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
++{
++ int r = 0;
++
++ mutex_lock(&kvm->arch.vpit->pit_state.lock);
++ memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
++ sizeof(ps->channels));
++ ps->flags = kvm->arch.vpit->pit_state.flags;
++ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
++ memset(&ps->reserved, 0, sizeof(ps->reserved));
++ return r;
++}
++
++static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
++{
++ int r = 0, start = 0;
++ u32 prev_legacy, cur_legacy;
++ mutex_lock(&kvm->arch.vpit->pit_state.lock);
++ prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
++ cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
++ if (!prev_legacy && cur_legacy)
++ start = 1;
++ memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
++ sizeof(kvm->arch.vpit->pit_state.channels));
++ kvm->arch.vpit->pit_state.flags = ps->flags;
++ kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
++ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
++ return r;
++}
++
++static int kvm_vm_ioctl_reinject(struct kvm *kvm,
++ struct kvm_reinject_control *control)
++{
++ if (!kvm->arch.vpit)
++ return -ENXIO;
++ mutex_lock(&kvm->arch.vpit->pit_state.lock);
++ kvm->arch.vpit->pit_state.reinject = control->pit_reinject;
++ mutex_unlock(&kvm->arch.vpit->pit_state.lock);
++ return 0;
++}
++
++/**
++ * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
++ * @kvm: kvm instance
++ * @log: slot id and address to which we copy the log
++ *
++ * We need to keep it in mind that VCPU threads can write to the bitmap
++ * concurrently. So, to avoid losing data, we keep the following order for
++ * each bit:
++ *
++ * 1. Take a snapshot of the bit and clear it if needed.
++ * 2. Write protect the corresponding page.
++ * 3. Flush TLB's if needed.
++ * 4. Copy the snapshot to the userspace.
++ *
++ * Between 2 and 3, the guest may write to the page using the remaining TLB
++ * entry. This is not a problem because the page will be reported dirty at
++ * step 4 using the snapshot taken before and step 3 ensures that successive
++ * writes will be logged for the next call.
++ */
++int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
++{
++ int r;
++ struct kvm_memory_slot *memslot;
++ unsigned long n, i;
++ unsigned long *dirty_bitmap;
++ unsigned long *dirty_bitmap_buffer;
++ bool is_dirty = false;
++
++ mutex_lock(&kvm->slots_lock);
++
++ r = -EINVAL;
++ if (log->slot >= KVM_USER_MEM_SLOTS)
++ goto out;
++
++ memslot = id_to_memslot(kvm->memslots, log->slot);
++
++ dirty_bitmap = memslot->dirty_bitmap;
++ r = -ENOENT;
++ if (!dirty_bitmap)
++ goto out;
++
++ n = kvm_dirty_bitmap_bytes(memslot);
++
++ dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
++ memset(dirty_bitmap_buffer, 0, n);
++
++ spin_lock(&kvm->mmu_lock);
++
++ for (i = 0; i < n / sizeof(long); i++) {
++ unsigned long mask;
++ gfn_t offset;
++
++ if (!dirty_bitmap[i])
++ continue;
++
++ is_dirty = true;
++
++ mask = xchg(&dirty_bitmap[i], 0);
++ dirty_bitmap_buffer[i] = mask;
++
++ offset = i * BITS_PER_LONG;
++ kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
++ }
++
++ spin_unlock(&kvm->mmu_lock);
++
++ /* See the comments in kvm_mmu_slot_remove_write_access(). */
++ lockdep_assert_held(&kvm->slots_lock);
++
++ /*
++ * All the TLBs can be flushed out of mmu lock, see the comments in
++ * kvm_mmu_slot_remove_write_access().
++ */
++ if (is_dirty)
++ kvm_flush_remote_tlbs(kvm);
++
++ r = -EFAULT;
++ if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
++ goto out;
++
++ r = 0;
++out:
++ mutex_unlock(&kvm->slots_lock);
++ return r;
++}
++
++int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
++ bool line_status)
++{
++ if (!irqchip_in_kernel(kvm))
++ return -ENXIO;
++
++ irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
++ irq_event->irq, irq_event->level,
++ line_status);
++ return 0;
++}
++
++long kvm_arch_vm_ioctl(struct file *filp,
++ unsigned int ioctl, unsigned long arg)
++{
++ struct kvm *kvm = filp->private_data;
++ void __user *argp = (void __user *)arg;
++ int r = -ENOTTY;
++ /*
++ * This union makes it completely explicit to gcc-3.x
++ * that these two variables' stack usage should be
++ * combined, not added together.
++ */
++ union {
++ struct kvm_pit_state ps;
++ struct kvm_pit_state2 ps2;
++ struct kvm_pit_config pit_config;
++ } u;
++
++ switch (ioctl) {
++ case KVM_SET_TSS_ADDR:
++ r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
++ break;
++ case KVM_SET_IDENTITY_MAP_ADDR: {
++ u64 ident_addr;
++
++ r = -EFAULT;
++ if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
++ goto out;
++ r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
++ break;
++ }
++ case KVM_SET_NR_MMU_PAGES:
++ r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
++ break;
++ case KVM_GET_NR_MMU_PAGES:
++ r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
++ break;
++ case KVM_CREATE_IRQCHIP: {
++ struct kvm_pic *vpic;
++
++ mutex_lock(&kvm->lock);
++ r = -EEXIST;
++ if (kvm->arch.vpic)
++ goto create_irqchip_unlock;
++ r = -EINVAL;
++ if (atomic_read(&kvm->online_vcpus))
++ goto create_irqchip_unlock;
++ r = -ENOMEM;
++ vpic = kvm_create_pic(kvm);
++ if (vpic) {
++ r = kvm_ioapic_init(kvm);
++ if (r) {
++ mutex_lock(&kvm->slots_lock);
++ kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
++ &vpic->dev_master);
++ kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
++ &vpic->dev_slave);
++ kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
++ &vpic->dev_eclr);
++ mutex_unlock(&kvm->slots_lock);
++ kfree(vpic);
++ goto create_irqchip_unlock;
++ }
++ } else
++ goto create_irqchip_unlock;
++ smp_wmb();
++ kvm->arch.vpic = vpic;
++ smp_wmb();
++ r = kvm_setup_default_irq_routing(kvm);
++ if (r) {
++ mutex_lock(&kvm->slots_lock);
++ mutex_lock(&kvm->irq_lock);
++ kvm_ioapic_destroy(kvm);
++ kvm_destroy_pic(kvm);
++ mutex_unlock(&kvm->irq_lock);
++ mutex_unlock(&kvm->slots_lock);
++ }
++ create_irqchip_unlock:
++ mutex_unlock(&kvm->lock);
++ break;
++ }
++ case KVM_CREATE_PIT:
++ u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
++ goto create_pit;
++ case KVM_CREATE_PIT2:
++ r = -EFAULT;
++ if (copy_from_user(&u.pit_config, argp,
++ sizeof(struct kvm_pit_config)))
++ goto out;
++ create_pit:
++ mutex_lock(&kvm->slots_lock);
++ r = -EEXIST;
++ if (kvm->arch.vpit)
++ goto create_pit_unlock;
++ r = -ENOMEM;
++ kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
++ if (kvm->arch.vpit)
++ r = 0;
++ create_pit_unlock:
++ mutex_unlock(&kvm->slots_lock);
++ break;
++ case KVM_GET_IRQCHIP: {
++ /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
++ struct kvm_irqchip *chip;
++
++ chip = memdup_user(argp, sizeof(*chip));
++ if (IS_ERR(chip)) {
++ r = PTR_ERR(chip);
++ goto out;
++ }
++
++ r = -ENXIO;
++ if (!irqchip_in_kernel(kvm))
++ goto get_irqchip_out;
++ r = kvm_vm_ioctl_get_irqchip(kvm, chip);
++ if (r)
++ goto get_irqchip_out;
++ r = -EFAULT;
++ if (copy_to_user(argp, chip, sizeof *chip))
++ goto get_irqchip_out;
++ r = 0;
++ get_irqchip_out:
++ kfree(chip);
++ break;
++ }
++ case KVM_SET_IRQCHIP: {
++ /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
++ struct kvm_irqchip *chip;
++
++ chip = memdup_user(argp, sizeof(*chip));
++ if (IS_ERR(chip)) {
++ r = PTR_ERR(chip);
++ goto out;
++ }
++
++ r = -ENXIO;
++ if (!irqchip_in_kernel(kvm))
++ goto set_irqchip_out;
++ r = kvm_vm_ioctl_set_irqchip(kvm, chip);
++ if (r)
++ goto set_irqchip_out;
++ r = 0;
++ set_irqchip_out:
++ kfree(chip);
++ break;
++ }
++ case KVM_GET_PIT: {
++ r = -EFAULT;
++ if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
++ goto out;
++ r = -ENXIO;
++ if (!kvm->arch.vpit)
++ goto out;
++ r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
++ if (r)
++ goto out;
++ r = -EFAULT;
++ if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
++ goto out;
++ r = 0;
++ break;
++ }
++ case KVM_SET_PIT: {
++ r = -EFAULT;
++ if (copy_from_user(&u.ps, argp, sizeof u.ps))
++ goto out;
++ r = -ENXIO;
++ if (!kvm->arch.vpit)
++ goto out;
++ r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
++ break;
++ }
++ case KVM_GET_PIT2: {
++ r = -ENXIO;
++ if (!kvm->arch.vpit)
++ goto out;
++ r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
++ if (r)
++ goto out;
++ r = -EFAULT;
++ if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
++ goto out;
++ r = 0;
++ break;
++ }
++ case KVM_SET_PIT2: {
++ r = -EFAULT;
++ if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
++ goto out;
++ r = -ENXIO;
++ if (!kvm->arch.vpit)
++ goto out;
++ r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
++ break;
++ }
++ case KVM_REINJECT_CONTROL: {
++ struct kvm_reinject_control control;
++ r = -EFAULT;
++ if (copy_from_user(&control, argp, sizeof(control)))
++ goto out;
++ r = kvm_vm_ioctl_reinject(kvm, &control);
++ break;
++ }
++ case KVM_XEN_HVM_CONFIG: {
++ r = -EFAULT;
++ if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
++ sizeof(struct kvm_xen_hvm_config)))
++ goto out;
++ r = -EINVAL;
++ if (kvm->arch.xen_hvm_config.flags)
++ goto out;
++ r = 0;
++ break;
++ }
++ case KVM_SET_CLOCK: {
++ struct kvm_clock_data user_ns;
++ u64 now_ns;
++ s64 delta;
++
++ r = -EFAULT;
++ if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
++ goto out;
++
++ r = -EINVAL;
++ if (user_ns.flags)
++ goto out;
++
++ r = 0;
++ local_irq_disable();
++ now_ns = get_kernel_ns();
++ delta = user_ns.clock - now_ns;
++ local_irq_enable();
++ kvm->arch.kvmclock_offset = delta;
++ kvm_gen_update_masterclock(kvm);
++ break;
++ }
++ case KVM_GET_CLOCK: {
++ struct kvm_clock_data user_ns;
++ u64 now_ns;
++
++ local_irq_disable();
++ now_ns = get_kernel_ns();
++ user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
++ local_irq_enable();
++ user_ns.flags = 0;
++ memset(&user_ns.pad, 0, sizeof(user_ns.pad));
++
++ r = -EFAULT;
++ if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
++ goto out;
++ r = 0;
++ break;
++ }
++
++ default:
++ ;
++ }
++out:
++ return r;
++}
++
++static void kvm_init_msr_list(void)
++{
++ u32 dummy[2];
++ unsigned i, j;
++
++ /* skip the first msrs in the list. KVM-specific */
++ for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
++ if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
++ continue;
++
++ /*
++ * Even MSRs that are valid in the host may not be exposed
++ * to the guests in some cases. We could work around this
++ * in VMX with the generic MSR save/load machinery, but it
++ * is not really worthwhile since it will really only
++ * happen with nested virtualization.
++ */
++ switch (msrs_to_save[i]) {
++ case MSR_IA32_BNDCFGS:
++ if (!kvm_x86_ops->mpx_supported())
++ continue;
++ break;
++ default:
++ break;
++ }
++
++ if (j < i)
++ msrs_to_save[j] = msrs_to_save[i];
++ j++;
++ }
++ num_msrs_to_save = j;
++}
++
++static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
++ const void *v)
++{
++ int handled = 0;
++ int n;
++
++ do {
++ n = min(len, 8);
++ if (!(vcpu->arch.apic &&
++ !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
++ && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
++ break;
++ handled += n;
++ addr += n;
++ len -= n;
++ v += n;
++ } while (len);
++
++ return handled;
++}
++
++static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
++{
++ int handled = 0;
++ int n;
++
++ do {
++ n = min(len, 8);
++ if (!(vcpu->arch.apic &&
++ !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
++ && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
++ break;
++ trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
++ handled += n;
++ addr += n;
++ len -= n;
++ v += n;
++ } while (len);
++
++ return handled;
++}
++
++static void kvm_set_segment(struct kvm_vcpu *vcpu,
++ struct kvm_segment *var, int seg)
++{
++ kvm_x86_ops->set_segment(vcpu, var, seg);
++}
++
++void kvm_get_segment(struct kvm_vcpu *vcpu,
++ struct kvm_segment *var, int seg)
++{
++ kvm_x86_ops->get_segment(vcpu, var, seg);
++}
++
++gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
++ struct x86_exception *exception)
++{
++ gpa_t t_gpa;
++
++ BUG_ON(!mmu_is_nested(vcpu));
++
++ /* NPT walks are always user-walks */
++ access |= PFERR_USER_MASK;
++ t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
++
++ return t_gpa;
++}
++
++gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
++ struct x86_exception *exception)
++{
++ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
++ return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
++}
++
++ gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
++ struct x86_exception *exception)
++{
++ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
++ access |= PFERR_FETCH_MASK;
++ return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
++}
++
++gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
++ struct x86_exception *exception)
++{
++ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
++ access |= PFERR_WRITE_MASK;
++ return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
++}
++
++/* uses this to access any guest's mapped memory without checking CPL */
++gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
++ struct x86_exception *exception)
++{
++ return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
++}
++
++static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
++ struct kvm_vcpu *vcpu, u32 access,
++ struct x86_exception *exception)
++{
++ void *data = val;
++ int r = X86EMUL_CONTINUE;
++
++ while (bytes) {
++ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
++ exception);
++ unsigned offset = addr & (PAGE_SIZE-1);
++ unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
++ int ret;
++
++ if (gpa == UNMAPPED_GVA)
++ return X86EMUL_PROPAGATE_FAULT;
++ ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data,
++ offset, toread);
++ if (ret < 0) {
++ r = X86EMUL_IO_NEEDED;
++ goto out;
++ }
++
++ bytes -= toread;
++ data += toread;
++ addr += toread;
++ }
++out:
++ return r;
++}
++
++/* used for instruction fetching */
++static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
++ gva_t addr, void *val, unsigned int bytes,
++ struct x86_exception *exception)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
++ unsigned offset;
++ int ret;
++
++ /* Inline kvm_read_guest_virt_helper for speed. */
++ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
++ exception);
++ if (unlikely(gpa == UNMAPPED_GVA))
++ return X86EMUL_PROPAGATE_FAULT;
++
++ offset = addr & (PAGE_SIZE-1);
++ if (WARN_ON(offset + bytes > PAGE_SIZE))
++ bytes = (unsigned)PAGE_SIZE - offset;
++ ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val,
++ offset, bytes);
++ if (unlikely(ret < 0))
++ return X86EMUL_IO_NEEDED;
++
++ return X86EMUL_CONTINUE;
++}
++
++int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
++ gva_t addr, void *val, unsigned int bytes,
++ struct x86_exception *exception)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
++
++ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
++ exception);
++}
++EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
++
++static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
++ gva_t addr, void *val, unsigned int bytes,
++ struct x86_exception *exception)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
++}
++
++int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
++ gva_t addr, void *val,
++ unsigned int bytes,
++ struct x86_exception *exception)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ void *data = val;
++ int r = X86EMUL_CONTINUE;
++
++ while (bytes) {
++ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
++ PFERR_WRITE_MASK,
++ exception);
++ unsigned offset = addr & (PAGE_SIZE-1);
++ unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
++ int ret;
++
++ if (gpa == UNMAPPED_GVA)
++ return X86EMUL_PROPAGATE_FAULT;
++ ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
++ if (ret < 0) {
++ r = X86EMUL_IO_NEEDED;
++ goto out;
++ }
++
++ bytes -= towrite;
++ data += towrite;
++ addr += towrite;
++ }
++out:
++ return r;
++}
++EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
++
++static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
++ gpa_t *gpa, struct x86_exception *exception,
++ bool write)
++{
++ u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
++ | (write ? PFERR_WRITE_MASK : 0);
++
++ if (vcpu_match_mmio_gva(vcpu, gva)
++ && !permission_fault(vcpu, vcpu->arch.walk_mmu,
++ vcpu->arch.access, access)) {
++ *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
++ (gva & (PAGE_SIZE - 1));
++ trace_vcpu_match_mmio(gva, *gpa, write, false);
++ return 1;
++ }
++
++ *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
++
++ if (*gpa == UNMAPPED_GVA)
++ return -1;
++
++ /* For APIC access vmexit */
++ if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
++ return 1;
++
++ if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
++ trace_vcpu_match_mmio(gva, *gpa, write, true);
++ return 1;
++ }
++
++ return 0;
++}
++
++int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
++ const void *val, int bytes)
++{
++ int ret;
++
++ ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
++ if (ret < 0)
++ return 0;
++ kvm_mmu_pte_write(vcpu, gpa, val, bytes);
++ return 1;
++}
++
++struct read_write_emulator_ops {
++ int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
++ int bytes);
++ int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
++ void *val, int bytes);
++ int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
++ int bytes, void *val);
++ int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
++ void *val, int bytes);
++ bool write;
++};
++
++static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
++{
++ if (vcpu->mmio_read_completed) {
++ trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
++ vcpu->mmio_fragments[0].gpa, *(u64 *)val);
++ vcpu->mmio_read_completed = 0;
++ return 1;
++ }
++
++ return 0;
++}
++
++static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
++ void *val, int bytes)
++{
++ return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
++}
++
++static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
++ void *val, int bytes)
++{
++ return emulator_write_phys(vcpu, gpa, val, bytes);
++}
++
++static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
++{
++ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
++ return vcpu_mmio_write(vcpu, gpa, bytes, val);
++}
++
++static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
++ void *val, int bytes)
++{
++ trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
++ return X86EMUL_IO_NEEDED;
++}
++
++static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
++ void *val, int bytes)
++{
++ struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
++
++ memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
++ return X86EMUL_CONTINUE;
++}
++
++static const struct read_write_emulator_ops read_emultor = {
++ .read_write_prepare = read_prepare,
++ .read_write_emulate = read_emulate,
++ .read_write_mmio = vcpu_mmio_read,
++ .read_write_exit_mmio = read_exit_mmio,
++};
++
++static const struct read_write_emulator_ops write_emultor = {
++ .read_write_emulate = write_emulate,
++ .read_write_mmio = write_mmio,
++ .read_write_exit_mmio = write_exit_mmio,
++ .write = true,
++};
++
++static int emulator_read_write_onepage(unsigned long addr, void *val,
++ unsigned int bytes,
++ struct x86_exception *exception,
++ struct kvm_vcpu *vcpu,
++ const struct read_write_emulator_ops *ops)
++{
++ gpa_t gpa;
++ int handled, ret;
++ bool write = ops->write;
++ struct kvm_mmio_fragment *frag;
++
++ ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
++
++ if (ret < 0)
++ return X86EMUL_PROPAGATE_FAULT;
++
++ /* For APIC access vmexit */
++ if (ret)
++ goto mmio;
++
++ if (ops->read_write_emulate(vcpu, gpa, val, bytes))
++ return X86EMUL_CONTINUE;
++
++mmio:
++ /*
++ * Is this MMIO handled locally?
++ */
++ handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
++ if (handled == bytes)
++ return X86EMUL_CONTINUE;
++
++ gpa += handled;
++ bytes -= handled;
++ val += handled;
++
++ WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
++ frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
++ frag->gpa = gpa;
++ frag->data = val;
++ frag->len = bytes;
++ return X86EMUL_CONTINUE;
++}
++
++int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
++ void *val, unsigned int bytes,
++ struct x86_exception *exception,
++ const struct read_write_emulator_ops *ops)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ gpa_t gpa;
++ int rc;
++
++ if (ops->read_write_prepare &&
++ ops->read_write_prepare(vcpu, val, bytes))
++ return X86EMUL_CONTINUE;
++
++ vcpu->mmio_nr_fragments = 0;
++
++ /* Crossing a page boundary? */
++ if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
++ int now;
++
++ now = -addr & ~PAGE_MASK;
++ rc = emulator_read_write_onepage(addr, val, now, exception,
++ vcpu, ops);
++
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++ addr += now;
++ val += now;
++ bytes -= now;
++ }
++
++ rc = emulator_read_write_onepage(addr, val, bytes, exception,
++ vcpu, ops);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++
++ if (!vcpu->mmio_nr_fragments)
++ return rc;
++
++ gpa = vcpu->mmio_fragments[0].gpa;
++
++ vcpu->mmio_needed = 1;
++ vcpu->mmio_cur_fragment = 0;
++
++ vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
++ vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
++ vcpu->run->exit_reason = KVM_EXIT_MMIO;
++ vcpu->run->mmio.phys_addr = gpa;
++
++ return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
++}
++
++static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
++ unsigned long addr,
++ void *val,
++ unsigned int bytes,
++ struct x86_exception *exception)
++{
++ return emulator_read_write(ctxt, addr, val, bytes,
++ exception, &read_emultor);
++}
++
++int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
++ unsigned long addr,
++ const void *val,
++ unsigned int bytes,
++ struct x86_exception *exception)
++{
++ return emulator_read_write(ctxt, addr, (void *)val, bytes,
++ exception, &write_emultor);
++}
++
++#define CMPXCHG_TYPE(t, ptr, old, new) \
++ (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
++
++#ifdef CONFIG_X86_64
++# define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
++#else
++# define CMPXCHG64(ptr, old, new) \
++ (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
++#endif
++
++static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
++ unsigned long addr,
++ const void *old,
++ const void *new,
++ unsigned int bytes,
++ struct x86_exception *exception)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ gpa_t gpa;
++ struct page *page;
++ char *kaddr;
++ bool exchanged;
++
++ /* guests cmpxchg8b have to be emulated atomically */
++ if (bytes > 8 || (bytes & (bytes - 1)))
++ goto emul_write;
++
++ gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
++
++ if (gpa == UNMAPPED_GVA ||
++ (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
++ goto emul_write;
++
++ if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
++ goto emul_write;
++
++ page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
++ if (is_error_page(page))
++ goto emul_write;
++
++ kaddr = kmap_atomic(page);
++ kaddr += offset_in_page(gpa);
++ switch (bytes) {
++ case 1:
++ exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
++ break;
++ case 2:
++ exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
++ break;
++ case 4:
++ exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
++ break;
++ case 8:
++ exchanged = CMPXCHG64(kaddr, old, new);
++ break;
++ default:
++ BUG();
++ }
++ kunmap_atomic(kaddr);
++ kvm_release_page_dirty(page);
++
++ if (!exchanged)
++ return X86EMUL_CMPXCHG_FAILED;
++
++ mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
++ kvm_mmu_pte_write(vcpu, gpa, new, bytes);
++
++ return X86EMUL_CONTINUE;
++
++emul_write:
++ printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
++
++ return emulator_write_emulated(ctxt, addr, new, bytes, exception);
++}
++
++static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
++{
++ /* TODO: String I/O for in kernel device */
++ int r;
++
++ if (vcpu->arch.pio.in)
++ r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
++ vcpu->arch.pio.size, pd);
++ else
++ r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
++ vcpu->arch.pio.port, vcpu->arch.pio.size,
++ pd);
++ return r;
++}
++
++static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
++ unsigned short port, void *val,
++ unsigned int count, bool in)
++{
++ vcpu->arch.pio.port = port;
++ vcpu->arch.pio.in = in;
++ vcpu->arch.pio.count = count;
++ vcpu->arch.pio.size = size;
++
++ if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
++ vcpu->arch.pio.count = 0;
++ return 1;
++ }
++
++ vcpu->run->exit_reason = KVM_EXIT_IO;
++ vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
++ vcpu->run->io.size = size;
++ vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
++ vcpu->run->io.count = count;
++ vcpu->run->io.port = port;
++
++ return 0;
++}
++
++static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
++ int size, unsigned short port, void *val,
++ unsigned int count)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ int ret;
++
++ if (vcpu->arch.pio.count)
++ goto data_avail;
++
++ ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
++ if (ret) {
++data_avail:
++ memcpy(val, vcpu->arch.pio_data, size * count);
++ trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
++ vcpu->arch.pio.count = 0;
++ return 1;
++ }
++
++ return 0;
++}
++
++static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
++ int size, unsigned short port,
++ const void *val, unsigned int count)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++
++ memcpy(vcpu->arch.pio_data, val, size * count);
++ trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
++ return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
++}
++
++static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
++{
++ return kvm_x86_ops->get_segment_base(vcpu, seg);
++}
++
++static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
++{
++ kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
++}
++
++int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
++{
++ if (!need_emulate_wbinvd(vcpu))
++ return X86EMUL_CONTINUE;
++
++ if (kvm_x86_ops->has_wbinvd_exit()) {
++ int cpu = get_cpu();
++
++ cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
++ smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
++ wbinvd_ipi, NULL, 1);
++ put_cpu();
++ cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
++ } else
++ wbinvd();
++ return X86EMUL_CONTINUE;
++}
++EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
++
++static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
++{
++ kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
++}
++
++int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
++{
++ return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
++}
++
++int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
++{
++
++ return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
++}
++
++static u64 mk_cr_64(u64 curr_cr, u32 new_val)
++{
++ return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
++}
++
++static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ unsigned long value;
++
++ switch (cr) {
++ case 0:
++ value = kvm_read_cr0(vcpu);
++ break;
++ case 2:
++ value = vcpu->arch.cr2;
++ break;
++ case 3:
++ value = kvm_read_cr3(vcpu);
++ break;
++ case 4:
++ value = kvm_read_cr4(vcpu);
++ break;
++ case 8:
++ value = kvm_get_cr8(vcpu);
++ break;
++ default:
++ kvm_err("%s: unexpected cr %u\n", __func__, cr);
++ return 0;
++ }
++
++ return value;
++}
++
++static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ int res = 0;
++
++ switch (cr) {
++ case 0:
++ res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
++ break;
++ case 2:
++ vcpu->arch.cr2 = val;
++ break;
++ case 3:
++ res = kvm_set_cr3(vcpu, val);
++ break;
++ case 4:
++ res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
++ break;
++ case 8:
++ res = kvm_set_cr8(vcpu, val);
++ break;
++ default:
++ kvm_err("%s: unexpected cr %u\n", __func__, cr);
++ res = -1;
++ }
++
++ return res;
++}
++
++static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
++{
++ return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
++}
++
++static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
++{
++ kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
++}
++
++static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
++{
++ kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
++}
++
++static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
++{
++ kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
++}
++
++static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
++{
++ kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
++}
++
++static unsigned long emulator_get_cached_segment_base(
++ struct x86_emulate_ctxt *ctxt, int seg)
++{
++ return get_segment_base(emul_to_vcpu(ctxt), seg);
++}
++
++static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
++ struct desc_struct *desc, u32 *base3,
++ int seg)
++{
++ struct kvm_segment var;
++
++ kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
++ *selector = var.selector;
++
++ if (var.unusable) {
++ memset(desc, 0, sizeof(*desc));
++ return false;
++ }
++
++ if (var.g)
++ var.limit >>= 12;
++ set_desc_limit(desc, var.limit);
++ set_desc_base(desc, (unsigned long)var.base);
++#ifdef CONFIG_X86_64
++ if (base3)
++ *base3 = var.base >> 32;
++#endif
++ desc->type = var.type;
++ desc->s = var.s;
++ desc->dpl = var.dpl;
++ desc->p = var.present;
++ desc->avl = var.avl;
++ desc->l = var.l;
++ desc->d = var.db;
++ desc->g = var.g;
++
++ return true;
++}
++
++static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
++ struct desc_struct *desc, u32 base3,
++ int seg)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ struct kvm_segment var;
++
++ var.selector = selector;
++ var.base = get_desc_base(desc);
++#ifdef CONFIG_X86_64
++ var.base |= ((u64)base3) << 32;
++#endif
++ var.limit = get_desc_limit(desc);
++ if (desc->g)
++ var.limit = (var.limit << 12) | 0xfff;
++ var.type = desc->type;
++ var.dpl = desc->dpl;
++ var.db = desc->d;
++ var.s = desc->s;
++ var.l = desc->l;
++ var.g = desc->g;
++ var.avl = desc->avl;
++ var.present = desc->p;
++ var.unusable = !var.present;
++ var.padding = 0;
++
++ kvm_set_segment(vcpu, &var, seg);
++ return;
++}
++
++static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
++ u32 msr_index, u64 *pdata)
++{
++ return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
++}
++
++static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
++ u32 msr_index, u64 data)
++{
++ struct msr_data msr;
++
++ msr.data = data;
++ msr.index = msr_index;
++ msr.host_initiated = false;
++ return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
++}
++
++static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
++ u32 pmc)
++{
++ return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc);
++}
++
++static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
++ u32 pmc, u64 *pdata)
++{
++ return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
++}
++
++static void emulator_halt(struct x86_emulate_ctxt *ctxt)
++{
++ emul_to_vcpu(ctxt)->arch.halt_request = 1;
++}
++
++static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
++{
++ preempt_disable();
++ kvm_load_guest_fpu(emul_to_vcpu(ctxt));
++ /*
++ * CR0.TS may reference the host fpu state, not the guest fpu state,
++ * so it may be clear at this point.
++ */
++ clts();
++}
++
++static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
++{
++ preempt_enable();
++}
++
++static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
++ struct x86_instruction_info *info,
++ enum x86_intercept_stage stage)
++{
++ return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
++}
++
++static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
++ u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
++{
++ kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);
++}
++
++static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
++{
++ return kvm_register_read(emul_to_vcpu(ctxt), reg);
++}
++
++static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
++{
++ kvm_register_write(emul_to_vcpu(ctxt), reg, val);
++}
++
++static const struct x86_emulate_ops emulate_ops = {
++ .read_gpr = emulator_read_gpr,
++ .write_gpr = emulator_write_gpr,
++ .read_std = kvm_read_guest_virt_system,
++ .write_std = kvm_write_guest_virt_system,
++ .fetch = kvm_fetch_guest_virt,
++ .read_emulated = emulator_read_emulated,
++ .write_emulated = emulator_write_emulated,
++ .cmpxchg_emulated = emulator_cmpxchg_emulated,
++ .invlpg = emulator_invlpg,
++ .pio_in_emulated = emulator_pio_in_emulated,
++ .pio_out_emulated = emulator_pio_out_emulated,
++ .get_segment = emulator_get_segment,
++ .set_segment = emulator_set_segment,
++ .get_cached_segment_base = emulator_get_cached_segment_base,
++ .get_gdt = emulator_get_gdt,
++ .get_idt = emulator_get_idt,
++ .set_gdt = emulator_set_gdt,
++ .set_idt = emulator_set_idt,
++ .get_cr = emulator_get_cr,
++ .set_cr = emulator_set_cr,
++ .cpl = emulator_get_cpl,
++ .get_dr = emulator_get_dr,
++ .set_dr = emulator_set_dr,
++ .set_msr = emulator_set_msr,
++ .get_msr = emulator_get_msr,
++ .check_pmc = emulator_check_pmc,
++ .read_pmc = emulator_read_pmc,
++ .halt = emulator_halt,
++ .wbinvd = emulator_wbinvd,
++ .fix_hypercall = emulator_fix_hypercall,
++ .get_fpu = emulator_get_fpu,
++ .put_fpu = emulator_put_fpu,
++ .intercept = emulator_intercept,
++ .get_cpuid = emulator_get_cpuid,
++};
++
++static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
++{
++ u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
++ /*
++ * an sti; sti; sequence only disable interrupts for the first
++ * instruction. So, if the last instruction, be it emulated or
++ * not, left the system with the INT_STI flag enabled, it
++ * means that the last instruction is an sti. We should not
++ * leave the flag on in this case. The same goes for mov ss
++ */
++ if (int_shadow & mask)
++ mask = 0;
++ if (unlikely(int_shadow || mask)) {
++ kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
++ if (!mask)
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++ }
++}
++
++static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
++{
++ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
++ if (ctxt->exception.vector == PF_VECTOR)
++ return kvm_propagate_fault(vcpu, &ctxt->exception);
++
++ if (ctxt->exception.error_code_valid)
++ kvm_queue_exception_e(vcpu, ctxt->exception.vector,
++ ctxt->exception.error_code);
++ else
++ kvm_queue_exception(vcpu, ctxt->exception.vector);
++ return false;
++}
++
++static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
++{
++ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
++ int cs_db, cs_l;
++
++ kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
++
++ ctxt->eflags = kvm_get_rflags(vcpu);
++ ctxt->eip = kvm_rip_read(vcpu);
++ ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
++ (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
++ (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 :
++ cs_db ? X86EMUL_MODE_PROT32 :
++ X86EMUL_MODE_PROT16;
++ ctxt->guest_mode = is_guest_mode(vcpu);
++
++ init_decode_cache(ctxt);
++ vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
++}
++
++int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
++{
++ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
++ int ret;
++
++ init_emulate_ctxt(vcpu);
++
++ ctxt->op_bytes = 2;
++ ctxt->ad_bytes = 2;
++ ctxt->_eip = ctxt->eip + inc_eip;
++ ret = emulate_int_real(ctxt, irq);
++
++ if (ret != X86EMUL_CONTINUE)
++ return EMULATE_FAIL;
++
++ ctxt->eip = ctxt->_eip;
++ kvm_rip_write(vcpu, ctxt->eip);
++ kvm_set_rflags(vcpu, ctxt->eflags);
++
++ if (irq == NMI_VECTOR)
++ vcpu->arch.nmi_pending = 0;
++ else
++ vcpu->arch.interrupt.pending = false;
++
++ return EMULATE_DONE;
++}
++EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
++
++static int handle_emulation_failure(struct kvm_vcpu *vcpu)
++{
++ int r = EMULATE_DONE;
++
++ ++vcpu->stat.insn_emulation_fail;
++ trace_kvm_emulate_insn_failed(vcpu);
++ if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
++ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
++ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
++ vcpu->run->internal.ndata = 0;
++ r = EMULATE_FAIL;
++ }
++ kvm_queue_exception(vcpu, UD_VECTOR);
++
++ return r;
++}
++
++static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
++ bool write_fault_to_shadow_pgtable,
++ int emulation_type)
++{
++ gpa_t gpa = cr2;
++ pfn_t pfn;
++
++ if (emulation_type & EMULTYPE_NO_REEXECUTE)
++ return false;
++
++ if (!vcpu->arch.mmu.direct_map) {
++ /*
++ * Write permission should be allowed since only
++ * write access need to be emulated.
++ */
++ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
++
++ /*
++ * If the mapping is invalid in guest, let cpu retry
++ * it to generate fault.
++ */
++ if (gpa == UNMAPPED_GVA)
++ return true;
++ }
++
++ /*
++ * Do not retry the unhandleable instruction if it faults on the
++ * readonly host memory, otherwise it will goto a infinite loop:
++ * retry instruction -> write #PF -> emulation fail -> retry
++ * instruction -> ...
++ */
++ pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
++
++ /*
++ * If the instruction failed on the error pfn, it can not be fixed,
++ * report the error to userspace.
++ */
++ if (is_error_noslot_pfn(pfn))
++ return false;
++
++ kvm_release_pfn_clean(pfn);
++
++ /* The instructions are well-emulated on direct mmu. */
++ if (vcpu->arch.mmu.direct_map) {
++ unsigned int indirect_shadow_pages;
++
++ spin_lock(&vcpu->kvm->mmu_lock);
++ indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
++ spin_unlock(&vcpu->kvm->mmu_lock);
++
++ if (indirect_shadow_pages)
++ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
++
++ return true;
++ }
++
++ /*
++ * if emulation was due to access to shadowed page table
++ * and it failed try to unshadow page and re-enter the
++ * guest to let CPU execute the instruction.
++ */
++ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
++
++ /*
++ * If the access faults on its page table, it can not
++ * be fixed by unprotecting shadow page and it should
++ * be reported to userspace.
++ */
++ return !write_fault_to_shadow_pgtable;
++}
++
++static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
++ unsigned long cr2, int emulation_type)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
++
++ last_retry_eip = vcpu->arch.last_retry_eip;
++ last_retry_addr = vcpu->arch.last_retry_addr;
++
++ /*
++ * If the emulation is caused by #PF and it is non-page_table
++ * writing instruction, it means the VM-EXIT is caused by shadow
++ * page protected, we can zap the shadow page and retry this
++ * instruction directly.
++ *
++ * Note: if the guest uses a non-page-table modifying instruction
++ * on the PDE that points to the instruction, then we will unmap
++ * the instruction and go to an infinite loop. So, we cache the
++ * last retried eip and the last fault address, if we meet the eip
++ * and the address again, we can break out of the potential infinite
++ * loop.
++ */
++ vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
++
++ if (!(emulation_type & EMULTYPE_RETRY))
++ return false;
++
++ if (x86_page_table_writing_insn(ctxt))
++ return false;
++
++ if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
++ return false;
++
++ vcpu->arch.last_retry_eip = ctxt->eip;
++ vcpu->arch.last_retry_addr = cr2;
++
++ if (!vcpu->arch.mmu.direct_map)
++ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
++
++ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
++
++ return true;
++}
++
++static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
++static int complete_emulated_pio(struct kvm_vcpu *vcpu);
++
++static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
++ unsigned long *db)
++{
++ u32 dr6 = 0;
++ int i;
++ u32 enable, rwlen;
++
++ enable = dr7;
++ rwlen = dr7 >> 16;
++ for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
++ if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
++ dr6 |= (1 << i);
++ return dr6;
++}
++
++static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
++{
++ struct kvm_run *kvm_run = vcpu->run;
++
++ /*
++ * rflags is the old, "raw" value of the flags. The new value has
++ * not been saved yet.
++ *
++ * This is correct even for TF set by the guest, because "the
++ * processor will not generate this exception after the instruction
++ * that sets the TF flag".
++ */
++ if (unlikely(rflags & X86_EFLAGS_TF)) {
++ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
++ kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
++ DR6_RTM;
++ kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
++ kvm_run->debug.arch.exception = DB_VECTOR;
++ kvm_run->exit_reason = KVM_EXIT_DEBUG;
++ *r = EMULATE_USER_EXIT;
++ } else {
++ vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
++ /*
++ * "Certain debug exceptions may clear bit 0-3. The
++ * remaining contents of the DR6 register are never
++ * cleared by the processor".
++ */
++ vcpu->arch.dr6 &= ~15;
++ vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
++ kvm_queue_exception(vcpu, DB_VECTOR);
++ }
++ }
++}
++
++static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
++{
++ struct kvm_run *kvm_run = vcpu->run;
++ unsigned long eip = vcpu->arch.emulate_ctxt.eip;
++ u32 dr6 = 0;
++
++ if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
++ (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
++ dr6 = kvm_vcpu_check_hw_bp(eip, 0,
++ vcpu->arch.guest_debug_dr7,
++ vcpu->arch.eff_db);
++
++ if (dr6 != 0) {
++ kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
++ kvm_run->debug.arch.pc = kvm_rip_read(vcpu) +
++ get_segment_base(vcpu, VCPU_SREG_CS);
++
++ kvm_run->debug.arch.exception = DB_VECTOR;
++ kvm_run->exit_reason = KVM_EXIT_DEBUG;
++ *r = EMULATE_USER_EXIT;
++ return true;
++ }
++ }
++
++ if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
++ !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
++ dr6 = kvm_vcpu_check_hw_bp(eip, 0,
++ vcpu->arch.dr7,
++ vcpu->arch.db);
++
++ if (dr6 != 0) {
++ vcpu->arch.dr6 &= ~15;
++ vcpu->arch.dr6 |= dr6 | DR6_RTM;
++ kvm_queue_exception(vcpu, DB_VECTOR);
++ *r = EMULATE_DONE;
++ return true;
++ }
++ }
++
++ return false;
++}
++
++int x86_emulate_instruction(struct kvm_vcpu *vcpu,
++ unsigned long cr2,
++ int emulation_type,
++ void *insn,
++ int insn_len)
++{
++ int r;
++ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
++ bool writeback = true;
++ bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
++
++ /*
++ * Clear write_fault_to_shadow_pgtable here to ensure it is
++ * never reused.
++ */
++ vcpu->arch.write_fault_to_shadow_pgtable = false;
++ kvm_clear_exception_queue(vcpu);
++
++ if (!(emulation_type & EMULTYPE_NO_DECODE)) {
++ init_emulate_ctxt(vcpu);
++
++ /*
++ * We will reenter on the same instruction since
++ * we do not set complete_userspace_io. This does not
++ * handle watchpoints yet, those would be handled in
++ * the emulate_ops.
++ */
++ if (kvm_vcpu_check_breakpoint(vcpu, &r))
++ return r;
++
++ ctxt->interruptibility = 0;
++ ctxt->have_exception = false;
++ ctxt->exception.vector = -1;
++ ctxt->perm_ok = false;
++
++ ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
++
++ r = x86_decode_insn(ctxt, insn, insn_len);
++
++ trace_kvm_emulate_insn_start(vcpu);
++ ++vcpu->stat.insn_emulation;
++ if (r != EMULATION_OK) {
++ if (emulation_type & EMULTYPE_TRAP_UD)
++ return EMULATE_FAIL;
++ if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
++ emulation_type))
++ return EMULATE_DONE;
++ if (emulation_type & EMULTYPE_SKIP)
++ return EMULATE_FAIL;
++ return handle_emulation_failure(vcpu);
++ }
++ }
++
++ if (emulation_type & EMULTYPE_SKIP) {
++ kvm_rip_write(vcpu, ctxt->_eip);
++ if (ctxt->eflags & X86_EFLAGS_RF)
++ kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
++ return EMULATE_DONE;
++ }
++
++ if (retry_instruction(ctxt, cr2, emulation_type))
++ return EMULATE_DONE;
++
++ /* this is needed for vmware backdoor interface to work since it
++ changes registers values during IO operation */
++ if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
++ vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
++ emulator_invalidate_register_cache(ctxt);
++ }
++
++restart:
++ r = x86_emulate_insn(ctxt);
++
++ if (r == EMULATION_INTERCEPTED)
++ return EMULATE_DONE;
++
++ if (r == EMULATION_FAILED) {
++ if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
++ emulation_type))
++ return EMULATE_DONE;
++
++ return handle_emulation_failure(vcpu);
++ }
++
++ if (ctxt->have_exception) {
++ r = EMULATE_DONE;
++ if (inject_emulated_exception(vcpu))
++ return r;
++ } else if (vcpu->arch.pio.count) {
++ if (!vcpu->arch.pio.in) {
++ /* FIXME: return into emulator if single-stepping. */
++ vcpu->arch.pio.count = 0;
++ } else {
++ writeback = false;
++ vcpu->arch.complete_userspace_io = complete_emulated_pio;
++ }
++ r = EMULATE_USER_EXIT;
++ } else if (vcpu->mmio_needed) {
++ if (!vcpu->mmio_is_write)
++ writeback = false;
++ r = EMULATE_USER_EXIT;
++ vcpu->arch.complete_userspace_io = complete_emulated_mmio;
++ } else if (r == EMULATION_RESTART)
++ goto restart;
++ else
++ r = EMULATE_DONE;
++
++ if (writeback) {
++ unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
++ toggle_interruptibility(vcpu, ctxt->interruptibility);
++ vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
++ kvm_rip_write(vcpu, ctxt->eip);
++ if (r == EMULATE_DONE)
++ kvm_vcpu_check_singlestep(vcpu, rflags, &r);
++ __kvm_set_rflags(vcpu, ctxt->eflags);
++
++ /*
++ * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
++ * do nothing, and it will be requested again as soon as
++ * the shadow expires. But we still need to check here,
++ * because POPF has no interrupt shadow.
++ */
++ if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++ } else
++ vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
++
++ return r;
++}
++EXPORT_SYMBOL_GPL(x86_emulate_instruction);
++
++int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
++{
++ unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
++ int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
++ size, port, &val, 1);
++ /* do not return to emulator after return from userspace */
++ vcpu->arch.pio.count = 0;
++ return ret;
++}
++EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
++
++static void tsc_bad(void *info)
++{
++ __this_cpu_write(cpu_tsc_khz, 0);
++}
++
++static void tsc_khz_changed(void *data)
++{
++ struct cpufreq_freqs *freq = data;
++ unsigned long khz = 0;
++
++ if (data)
++ khz = freq->new;
++ else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
++ khz = cpufreq_quick_get(raw_smp_processor_id());
++ if (!khz)
++ khz = tsc_khz;
++ __this_cpu_write(cpu_tsc_khz, khz);
++}
++
++static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
++ void *data)
++{
++ struct cpufreq_freqs *freq = data;
++ struct kvm *kvm;
++ struct kvm_vcpu *vcpu;
++ int i, send_ipi = 0;
++
++ /*
++ * We allow guests to temporarily run on slowing clocks,
++ * provided we notify them after, or to run on accelerating
++ * clocks, provided we notify them before. Thus time never
++ * goes backwards.
++ *
++ * However, we have a problem. We can't atomically update
++ * the frequency of a given CPU from this function; it is
++ * merely a notifier, which can be called from any CPU.
++ * Changing the TSC frequency at arbitrary points in time
++ * requires a recomputation of local variables related to
++ * the TSC for each VCPU. We must flag these local variables
++ * to be updated and be sure the update takes place with the
++ * new frequency before any guests proceed.
++ *
++ * Unfortunately, the combination of hotplug CPU and frequency
++ * change creates an intractable locking scenario; the order
++ * of when these callouts happen is undefined with respect to
++ * CPU hotplug, and they can race with each other. As such,
++ * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
++ * undefined; you can actually have a CPU frequency change take
++ * place in between the computation of X and the setting of the
++ * variable. To protect against this problem, all updates of
++ * the per_cpu tsc_khz variable are done in an interrupt
++ * protected IPI, and all callers wishing to update the value
++ * must wait for a synchronous IPI to complete (which is trivial
++ * if the caller is on the CPU already). This establishes the
++ * necessary total order on variable updates.
++ *
++ * Note that because a guest time update may take place
++ * anytime after the setting of the VCPU's request bit, the
++ * correct TSC value must be set before the request. However,
++ * to ensure the update actually makes it to any guest which
++ * starts running in hardware virtualization between the set
++ * and the acquisition of the spinlock, we must also ping the
++ * CPU after setting the request bit.
++ *
++ */
++
++ if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
++ return 0;
++ if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
++ return 0;
++
++ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
++
++ spin_lock(&kvm_lock);
++ list_for_each_entry(kvm, &vm_list, vm_list) {
++ kvm_for_each_vcpu(i, vcpu, kvm) {
++ if (vcpu->cpu != freq->cpu)
++ continue;
++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
++ if (vcpu->cpu != smp_processor_id())
++ send_ipi = 1;
++ }
++ }
++ spin_unlock(&kvm_lock);
++
++ if (freq->old < freq->new && send_ipi) {
++ /*
++ * We upscale the frequency. Must make the guest
++ * doesn't see old kvmclock values while running with
++ * the new frequency, otherwise we risk the guest sees
++ * time go backwards.
++ *
++ * In case we update the frequency for another cpu
++ * (which might be in guest context) send an interrupt
++ * to kick the cpu out of guest context. Next time
++ * guest context is entered kvmclock will be updated,
++ * so the guest will not see stale values.
++ */
++ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
++ }
++ return 0;
++}
++
++static struct notifier_block kvmclock_cpufreq_notifier_block = {
++ .notifier_call = kvmclock_cpufreq_notifier
++};
++
++static int kvmclock_cpu_notifier(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ unsigned int cpu = (unsigned long)hcpu;
++
++ switch (action) {
++ case CPU_ONLINE:
++ case CPU_DOWN_FAILED:
++ smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
++ break;
++ case CPU_DOWN_PREPARE:
++ smp_call_function_single(cpu, tsc_bad, NULL, 1);
++ break;
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block kvmclock_cpu_notifier_block = {
++ .notifier_call = kvmclock_cpu_notifier,
++ .priority = -INT_MAX
++};
++
++static void kvm_timer_init(void)
++{
++ int cpu;
++
++ max_tsc_khz = tsc_khz;
++
++ cpu_notifier_register_begin();
++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
++#ifdef CONFIG_CPU_FREQ
++ struct cpufreq_policy policy;
++ memset(&policy, 0, sizeof(policy));
++ cpu = get_cpu();
++ cpufreq_get_policy(&policy, cpu);
++ if (policy.cpuinfo.max_freq)
++ max_tsc_khz = policy.cpuinfo.max_freq;
++ put_cpu();
++#endif
++ cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
++ CPUFREQ_TRANSITION_NOTIFIER);
++ }
++ pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
++ for_each_online_cpu(cpu)
++ smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
++
++ __register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
++ cpu_notifier_register_done();
++
++}
++
++static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
++
++int kvm_is_in_guest(void)
++{
++ return __this_cpu_read(current_vcpu) != NULL;
++}
++
++static int kvm_is_user_mode(void)
++{
++ int user_mode = 3;
++
++ if (__this_cpu_read(current_vcpu))
++ user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
++
++ return user_mode != 0;
++}
++
++static unsigned long kvm_get_guest_ip(void)
++{
++ unsigned long ip = 0;
++
++ if (__this_cpu_read(current_vcpu))
++ ip = kvm_rip_read(__this_cpu_read(current_vcpu));
++
++ return ip;
++}
++
++static struct perf_guest_info_callbacks kvm_guest_cbs = {
++ .is_in_guest = kvm_is_in_guest,
++ .is_user_mode = kvm_is_user_mode,
++ .get_guest_ip = kvm_get_guest_ip,
++};
++
++void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
++{
++ __this_cpu_write(current_vcpu, vcpu);
++}
++EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
++
++void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
++{
++ __this_cpu_write(current_vcpu, NULL);
++}
++EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
++
++static void kvm_set_mmio_spte_mask(void)
++{
++ u64 mask;
++ int maxphyaddr = boot_cpu_data.x86_phys_bits;
++
++ /*
++ * Set the reserved bits and the present bit of an paging-structure
++ * entry to generate page fault with PFER.RSV = 1.
++ */
++ /* Mask the reserved physical address bits. */
++ mask = rsvd_bits(maxphyaddr, 51);
++
++ /* Bit 62 is always reserved for 32bit host. */
++ mask |= 0x3ull << 62;
++
++ /* Set the present bit. */
++ mask |= 1ull;
++
++#ifdef CONFIG_X86_64
++ /*
++ * If reserved bit is not supported, clear the present bit to disable
++ * mmio page fault.
++ */
++ if (maxphyaddr == 52)
++ mask &= ~1ull;
++#endif
++
++ kvm_mmu_set_mmio_spte_mask(mask);
++}
++
++#ifdef CONFIG_X86_64
++static void pvclock_gtod_update_fn(struct work_struct *work)
++{
++ struct kvm *kvm;
++
++ struct kvm_vcpu *vcpu;
++ int i;
++
++ spin_lock(&kvm_lock);
++ list_for_each_entry(kvm, &vm_list, vm_list)
++ kvm_for_each_vcpu(i, vcpu, kvm)
++ kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
++ atomic_set(&kvm_guest_has_master_clock, 0);
++ spin_unlock(&kvm_lock);
++}
++
++static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
++
++/*
++ * Notification about pvclock gtod data update.
++ */
++static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
++ void *priv)
++{
++ struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
++ struct timekeeper *tk = priv;
++
++ update_pvclock_gtod(tk);
++
++ /* disable master clock if host does not trust, or does not
++ * use, TSC clocksource
++ */
++ if (gtod->clock.vclock_mode != VCLOCK_TSC &&
++ atomic_read(&kvm_guest_has_master_clock) != 0)
++ queue_work(system_long_wq, &pvclock_gtod_work);
++
++ return 0;
++}
++
++static struct notifier_block pvclock_gtod_notifier = {
++ .notifier_call = pvclock_gtod_notify,
++};
++#endif
++
++int kvm_arch_init(void *opaque)
++{
++ int r;
++ struct kvm_x86_ops *ops = opaque;
++
++ if (kvm_x86_ops) {
++ printk(KERN_ERR "kvm: already loaded the other module\n");
++ r = -EEXIST;
++ goto out;
++ }
++
++ if (!ops->cpu_has_kvm_support()) {
++ printk(KERN_ERR "kvm: no hardware support\n");
++ r = -EOPNOTSUPP;
++ goto out;
++ }
++ if (ops->disabled_by_bios()) {
++ printk(KERN_ERR "kvm: disabled by bios\n");
++ r = -EOPNOTSUPP;
++ goto out;
++ }
++
++ r = -ENOMEM;
++ shared_msrs = alloc_percpu(struct kvm_shared_msrs);
++ if (!shared_msrs) {
++ printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
++ goto out;
++ }
++
++ r = kvm_mmu_module_init();
++ if (r)
++ goto out_free_percpu;
++
++ kvm_set_mmio_spte_mask();
++
++ kvm_x86_ops = ops;
++ kvm_init_msr_list();
++
++ kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
++ PT_DIRTY_MASK, PT64_NX_MASK, 0);
++
++ kvm_timer_init();
++
++ perf_register_guest_info_callbacks(&kvm_guest_cbs);
++
++ if (cpu_has_xsave)
++ host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
++
++ kvm_lapic_init();
++#ifdef CONFIG_X86_64
++ pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
++#endif
++
++ return 0;
++
++out_free_percpu:
++ free_percpu(shared_msrs);
++out:
++ return r;
++}
++
++void kvm_arch_exit(void)
++{
++ perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
++
++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
++ cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
++ CPUFREQ_TRANSITION_NOTIFIER);
++ unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
++#ifdef CONFIG_X86_64
++ pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
++#endif
++ kvm_x86_ops = NULL;
++ kvm_mmu_module_exit();
++ free_percpu(shared_msrs);
++}
++
++int kvm_emulate_halt(struct kvm_vcpu *vcpu)
++{
++ ++vcpu->stat.halt_exits;
++ if (irqchip_in_kernel(vcpu->kvm)) {
++ vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
++ return 1;
++ } else {
++ vcpu->run->exit_reason = KVM_EXIT_HLT;
++ return 0;
++ }
++}
++EXPORT_SYMBOL_GPL(kvm_emulate_halt);
++
++int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
++{
++ u64 param, ingpa, outgpa, ret;
++ uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
++ bool fast, longmode;
++
++ /*
++ * hypercall generates UD from non zero cpl and real mode
++ * per HYPER-V spec
++ */
++ if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
++ kvm_queue_exception(vcpu, UD_VECTOR);
++ return 0;
++ }
++
++ longmode = is_64_bit_mode(vcpu);
++
++ if (!longmode) {
++ param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
++ (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
++ ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
++ (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
++ outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
++ (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
++ }
++#ifdef CONFIG_X86_64
++ else {
++ param = kvm_register_read(vcpu, VCPU_REGS_RCX);
++ ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
++ outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
++ }
++#endif
++
++ code = param & 0xffff;
++ fast = (param >> 16) & 0x1;
++ rep_cnt = (param >> 32) & 0xfff;
++ rep_idx = (param >> 48) & 0xfff;
++
++ trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
++
++ switch (code) {
++ case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
++ kvm_vcpu_on_spin(vcpu);
++ break;
++ default:
++ res = HV_STATUS_INVALID_HYPERCALL_CODE;
++ break;
++ }
++
++ ret = res | (((u64)rep_done & 0xfff) << 32);
++ if (longmode) {
++ kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
++ } else {
++ kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
++ kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
++ }
++
++ return 1;
++}
++
++/*
++ * kvm_pv_kick_cpu_op: Kick a vcpu.
++ *
++ * @apicid - apicid of vcpu to be kicked.
++ */
++static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
++{
++ struct kvm_lapic_irq lapic_irq;
++
++ lapic_irq.shorthand = 0;
++ lapic_irq.dest_mode = 0;
++ lapic_irq.dest_id = apicid;
++
++ lapic_irq.delivery_mode = APIC_DM_REMRD;
++ kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL);
++}
++
++int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
++{
++ unsigned long nr, a0, a1, a2, a3, ret;
++ int op_64_bit, r = 1;
++
++ if (kvm_hv_hypercall_enabled(vcpu->kvm))
++ return kvm_hv_hypercall(vcpu);
++
++ nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
++ a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
++ a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
++ a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
++ a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
++
++ trace_kvm_hypercall(nr, a0, a1, a2, a3);
++
++ op_64_bit = is_64_bit_mode(vcpu);
++ if (!op_64_bit) {
++ nr &= 0xFFFFFFFF;
++ a0 &= 0xFFFFFFFF;
++ a1 &= 0xFFFFFFFF;
++ a2 &= 0xFFFFFFFF;
++ a3 &= 0xFFFFFFFF;
++ }
++
++ if (kvm_x86_ops->get_cpl(vcpu) != 0) {
++ ret = -KVM_EPERM;
++ goto out;
++ }
++
++ switch (nr) {
++ case KVM_HC_VAPIC_POLL_IRQ:
++ ret = 0;
++ break;
++ case KVM_HC_KICK_CPU:
++ kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
++ ret = 0;
++ break;
++ default:
++ ret = -KVM_ENOSYS;
++ break;
++ }
++out:
++ if (!op_64_bit)
++ ret = (u32)ret;
++ kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
++ ++vcpu->stat.hypercalls;
++ return r;
++}
++EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
++
++static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
++{
++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++ char instruction[3];
++ unsigned long rip = kvm_rip_read(vcpu);
++
++ kvm_x86_ops->patch_hypercall(vcpu, instruction);
++
++ return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
++}
++
++/*
++ * Check if userspace requested an interrupt window, and that the
++ * interrupt window is open.
++ *
++ * No need to exit to userspace if we already have an interrupt queued.
++ */
++static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
++{
++ return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
++ vcpu->run->request_interrupt_window &&
++ kvm_arch_interrupt_allowed(vcpu));
++}
++
++static void post_kvm_run_save(struct kvm_vcpu *vcpu)
++{
++ struct kvm_run *kvm_run = vcpu->run;
++
++ kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
++ kvm_run->cr8 = kvm_get_cr8(vcpu);
++ kvm_run->apic_base = kvm_get_apic_base(vcpu);
++ if (irqchip_in_kernel(vcpu->kvm))
++ kvm_run->ready_for_interrupt_injection = 1;
++ else
++ kvm_run->ready_for_interrupt_injection =
++ kvm_arch_interrupt_allowed(vcpu) &&
++ !kvm_cpu_has_interrupt(vcpu) &&
++ !kvm_event_needs_reinjection(vcpu);
++}
++
++static void update_cr8_intercept(struct kvm_vcpu *vcpu)
++{
++ int max_irr, tpr;
++
++ if (!kvm_x86_ops->update_cr8_intercept)
++ return;
++
++ if (!vcpu->arch.apic)
++ return;
++
++ if (!vcpu->arch.apic->vapic_addr)
++ max_irr = kvm_lapic_find_highest_irr(vcpu);
++ else
++ max_irr = -1;
++
++ if (max_irr != -1)
++ max_irr >>= 4;
++
++ tpr = kvm_lapic_get_cr8(vcpu);
++
++ kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
++}
++
++static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
++{
++ int r;
++
++ /* try to reinject previous events if any */
++ if (vcpu->arch.exception.pending) {
++ trace_kvm_inj_exception(vcpu->arch.exception.nr,
++ vcpu->arch.exception.has_error_code,
++ vcpu->arch.exception.error_code);
++
++ if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
++ __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
++ X86_EFLAGS_RF);
++
++ kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
++ vcpu->arch.exception.has_error_code,
++ vcpu->arch.exception.error_code,
++ vcpu->arch.exception.reinject);
++ return 0;
++ }
++
++ if (vcpu->arch.nmi_injected) {
++ kvm_x86_ops->set_nmi(vcpu);
++ return 0;
++ }
++
++ if (vcpu->arch.interrupt.pending) {
++ kvm_x86_ops->set_irq(vcpu);
++ return 0;
++ }
++
++ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
++ r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
++ if (r != 0)
++ return r;
++ }
++
++ /* try to inject new event if pending */
++ if (vcpu->arch.nmi_pending) {
++ if (kvm_x86_ops->nmi_allowed(vcpu)) {
++ --vcpu->arch.nmi_pending;
++ vcpu->arch.nmi_injected = true;
++ kvm_x86_ops->set_nmi(vcpu);
++ }
++ } else if (kvm_cpu_has_injectable_intr(vcpu)) {
++ /*
++ * Because interrupts can be injected asynchronously, we are
++ * calling check_nested_events again here to avoid a race condition.
++ * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
++ * proposal and current concerns. Perhaps we should be setting
++ * KVM_REQ_EVENT only on certain events and not unconditionally?
++ */
++ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
++ r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
++ if (r != 0)
++ return r;
++ }
++ if (kvm_x86_ops->interrupt_allowed(vcpu)) {
++ kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
++ false);
++ kvm_x86_ops->set_irq(vcpu);
++ }
++ }
++ return 0;
++}
++
++static void process_nmi(struct kvm_vcpu *vcpu)
++{
++ unsigned limit = 2;
++
++ /*
++ * x86 is limited to one NMI running, and one NMI pending after it.
++ * If an NMI is already in progress, limit further NMIs to just one.
++ * Otherwise, allow two (and we'll inject the first one immediately).
++ */
++ if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
++ limit = 1;
++
++ vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
++ vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++}
++
++static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
++{
++ u64 eoi_exit_bitmap[4];
++ u32 tmr[8];
++
++ if (!kvm_apic_hw_enabled(vcpu->arch.apic))
++ return;
++
++ memset(eoi_exit_bitmap, 0, 32);
++ memset(tmr, 0, 32);
++
++ kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr);
++ kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
++ kvm_apic_update_tmr(vcpu, tmr);
++}
++
++static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
++{
++ ++vcpu->stat.tlb_flush;
++ kvm_x86_ops->tlb_flush(vcpu);
++}
++
++void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
++{
++ struct page *page = NULL;
++
++ if (!irqchip_in_kernel(vcpu->kvm))
++ return;
++
++ if (!kvm_x86_ops->set_apic_access_page_addr)
++ return;
++
++ page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
++ kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
++
++ /*
++ * Do not pin apic access page in memory, the MMU notifier
++ * will call us again if it is migrated or swapped out.
++ */
++ put_page(page);
++}
++EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
++
++void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
++ unsigned long address)
++{
++ /*
++ * The physical address of apic access page is stored in the VMCS.
++ * Update it when it becomes invalid.
++ */
++ if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
++ kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
++}
++
++/*
++ * Returns 1 to let __vcpu_run() continue the guest execution loop without
++ * exiting to the userspace. Otherwise, the value will be returned to the
++ * userspace.
++ */
++static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
++{
++ int r;
++ bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
++ vcpu->run->request_interrupt_window;
++ bool req_immediate_exit = false;
++
++ if (vcpu->requests) {
++ if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
++ kvm_mmu_unload(vcpu);
++ if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
++ __kvm_migrate_timers(vcpu);
++ if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
++ kvm_gen_update_masterclock(vcpu->kvm);
++ if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
++ kvm_gen_kvmclock_update(vcpu);
++ if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
++ r = kvm_guest_time_update(vcpu);
++ if (unlikely(r))
++ goto out;
++ }
++ if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
++ kvm_mmu_sync_roots(vcpu);
++ if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
++ kvm_vcpu_flush_tlb(vcpu);
++ if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
++ vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
++ r = 0;
++ goto out;
++ }
++ if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
++ vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
++ r = 0;
++ goto out;
++ }
++ if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
++ vcpu->fpu_active = 0;
++ kvm_x86_ops->fpu_deactivate(vcpu);
++ }
++ if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
++ /* Page is swapped out. Do synthetic halt */
++ vcpu->arch.apf.halted = true;
++ r = 1;
++ goto out;
++ }
++ if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
++ record_steal_time(vcpu);
++ if (kvm_check_request(KVM_REQ_NMI, vcpu))
++ process_nmi(vcpu);
++ if (kvm_check_request(KVM_REQ_PMU, vcpu))
++ kvm_handle_pmu_event(vcpu);
++ if (kvm_check_request(KVM_REQ_PMI, vcpu))
++ kvm_deliver_pmi(vcpu);
++ if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
++ vcpu_scan_ioapic(vcpu);
++ if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
++ kvm_vcpu_reload_apic_access_page(vcpu);
++ }
++
++ if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
++ kvm_apic_accept_events(vcpu);
++ if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
++ r = 1;
++ goto out;
++ }
++
++ if (inject_pending_event(vcpu, req_int_win) != 0)
++ req_immediate_exit = true;
++ /* enable NMI/IRQ window open exits if needed */
++ else if (vcpu->arch.nmi_pending)
++ kvm_x86_ops->enable_nmi_window(vcpu);
++ else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
++ kvm_x86_ops->enable_irq_window(vcpu);
++
++ if (kvm_lapic_enabled(vcpu)) {
++ /*
++ * Update architecture specific hints for APIC
++ * virtual interrupt delivery.
++ */
++ if (kvm_x86_ops->hwapic_irr_update)
++ kvm_x86_ops->hwapic_irr_update(vcpu,
++ kvm_lapic_find_highest_irr(vcpu));
++ update_cr8_intercept(vcpu);
++ kvm_lapic_sync_to_vapic(vcpu);
++ }
++ }
++
++ r = kvm_mmu_reload(vcpu);
++ if (unlikely(r)) {
++ goto cancel_injection;
++ }
++
++ preempt_disable();
++
++ kvm_x86_ops->prepare_guest_switch(vcpu);
++ if (vcpu->fpu_active)
++ kvm_load_guest_fpu(vcpu);
++ kvm_load_guest_xcr0(vcpu);
++
++ vcpu->mode = IN_GUEST_MODE;
++
++ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
++
++ /* We should set ->mode before check ->requests,
++ * see the comment in make_all_cpus_request.
++ */
++ smp_mb__after_srcu_read_unlock();
++
++ local_irq_disable();
++
++ if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
++ || need_resched() || signal_pending(current)) {
++ vcpu->mode = OUTSIDE_GUEST_MODE;
++ smp_wmb();
++ local_irq_enable();
++ preempt_enable();
++ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
++ r = 1;
++ goto cancel_injection;
++ }
++
++ if (req_immediate_exit)
++ smp_send_reschedule(vcpu->cpu);
++
++ kvm_guest_enter();
++
++ if (unlikely(vcpu->arch.switch_db_regs)) {
++ set_debugreg(0, 7);
++ set_debugreg(vcpu->arch.eff_db[0], 0);
++ set_debugreg(vcpu->arch.eff_db[1], 1);
++ set_debugreg(vcpu->arch.eff_db[2], 2);
++ set_debugreg(vcpu->arch.eff_db[3], 3);
++ set_debugreg(vcpu->arch.dr6, 6);
++ }
++
++ trace_kvm_entry(vcpu->vcpu_id);
++ kvm_x86_ops->run(vcpu);
++
++ /*
++ * Do this here before restoring debug registers on the host. And
++ * since we do this before handling the vmexit, a DR access vmexit
++ * can (a) read the correct value of the debug registers, (b) set
++ * KVM_DEBUGREG_WONT_EXIT again.
++ */
++ if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
++ int i;
++
++ WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
++ kvm_x86_ops->sync_dirty_debug_regs(vcpu);
++ for (i = 0; i < KVM_NR_DB_REGS; i++)
++ vcpu->arch.eff_db[i] = vcpu->arch.db[i];
++ }
++
++ /*
++ * If the guest has used debug registers, at least dr7
++ * will be disabled while returning to the host.
++ * If we don't have active breakpoints in the host, we don't
++ * care about the messed up debug address registers. But if
++ * we have some of them active, restore the old state.
++ */
++ if (hw_breakpoint_active())
++ hw_breakpoint_restore();
++
++ vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
++ native_read_tsc());
++
++ vcpu->mode = OUTSIDE_GUEST_MODE;
++ smp_wmb();
++
++ /* Interrupt is enabled by handle_external_intr() */
++ kvm_x86_ops->handle_external_intr(vcpu);
++
++ ++vcpu->stat.exits;
++
++ /*
++ * We must have an instruction between local_irq_enable() and
++ * kvm_guest_exit(), so the timer interrupt isn't delayed by
++ * the interrupt shadow. The stat.exits increment will do nicely.
++ * But we need to prevent reordering, hence this barrier():
++ */
++ barrier();
++
++ kvm_guest_exit();
++
++ preempt_enable();
++
++ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
++
++ /*
++ * Profile KVM exit RIPs:
++ */
++ if (unlikely(prof_on == KVM_PROFILING)) {
++ unsigned long rip = kvm_rip_read(vcpu);
++ profile_hit(KVM_PROFILING, (void *)rip);
++ }
++
++ if (unlikely(vcpu->arch.tsc_always_catchup))
++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
++
++ if (vcpu->arch.apic_attention)
++ kvm_lapic_sync_from_vapic(vcpu);
++
++ r = kvm_x86_ops->handle_exit(vcpu);
++ return r;
++
++cancel_injection:
++ kvm_x86_ops->cancel_injection(vcpu);
++ if (unlikely(vcpu->arch.apic_attention))
++ kvm_lapic_sync_from_vapic(vcpu);
++out:
++ return r;
++}
++
++
++static int __vcpu_run(struct kvm_vcpu *vcpu)
++{
++ int r;
++ struct kvm *kvm = vcpu->kvm;
++
++ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
++
++ r = 1;
++ while (r > 0) {
++ if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
++ !vcpu->arch.apf.halted)
++ r = vcpu_enter_guest(vcpu);
++ else {
++ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
++ kvm_vcpu_block(vcpu);
++ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
++ if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
++ kvm_apic_accept_events(vcpu);
++ switch(vcpu->arch.mp_state) {
++ case KVM_MP_STATE_HALTED:
++ vcpu->arch.pv.pv_unhalted = false;
++ vcpu->arch.mp_state =
++ KVM_MP_STATE_RUNNABLE;
++ case KVM_MP_STATE_RUNNABLE:
++ vcpu->arch.apf.halted = false;
++ break;
++ case KVM_MP_STATE_INIT_RECEIVED:
++ break;
++ default:
++ r = -EINTR;
++ break;
++ }
++ }
++ }
++
++ if (r <= 0)
++ break;
++
++ clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
++ if (kvm_cpu_has_pending_timer(vcpu))
++ kvm_inject_pending_timer_irqs(vcpu);
++
++ if (dm_request_for_irq_injection(vcpu)) {
++ r = -EINTR;
++ vcpu->run->exit_reason = KVM_EXIT_INTR;
++ ++vcpu->stat.request_irq_exits;
++ }
++
++ kvm_check_async_pf_completion(vcpu);
++
++ if (signal_pending(current)) {
++ r = -EINTR;
++ vcpu->run->exit_reason = KVM_EXIT_INTR;
++ ++vcpu->stat.signal_exits;
++ }
++ if (need_resched()) {
++ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
++ cond_resched();
++ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
++ }
++ }
++
++ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
++
++ return r;
++}
++
++static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
++{
++ int r;
++ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
++ r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
++ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
++ if (r != EMULATE_DONE)
++ return 0;
++ return 1;
++}
++
++static int complete_emulated_pio(struct kvm_vcpu *vcpu)
++{
++ BUG_ON(!vcpu->arch.pio.count);
++
++ return complete_emulated_io(vcpu);
++}
++
++/*
++ * Implements the following, as a state machine:
++ *
++ * read:
++ * for each fragment
++ * for each mmio piece in the fragment
++ * write gpa, len
++ * exit
++ * copy data
++ * execute insn
++ *
++ * write:
++ * for each fragment
++ * for each mmio piece in the fragment
++ * write gpa, len
++ * copy data
++ * exit
++ */
++static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
++{
++ struct kvm_run *run = vcpu->run;
++ struct kvm_mmio_fragment *frag;
++ unsigned len;
++
++ BUG_ON(!vcpu->mmio_needed);
++
++ /* Complete previous fragment */
++ frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
++ len = min(8u, frag->len);
++ if (!vcpu->mmio_is_write)
++ memcpy(frag->data, run->mmio.data, len);
++
++ if (frag->len <= 8) {
++ /* Switch to the next fragment. */
++ frag++;
++ vcpu->mmio_cur_fragment++;
++ } else {
++ /* Go forward to the next mmio piece. */
++ frag->data += len;
++ frag->gpa += len;
++ frag->len -= len;
++ }
++
++ if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
++ vcpu->mmio_needed = 0;
++
++ /* FIXME: return into emulator if single-stepping. */
++ if (vcpu->mmio_is_write)
++ return 1;
++ vcpu->mmio_read_completed = 1;
++ return complete_emulated_io(vcpu);
++ }
++
++ run->exit_reason = KVM_EXIT_MMIO;
++ run->mmio.phys_addr = frag->gpa;
++ if (vcpu->mmio_is_write)
++ memcpy(run->mmio.data, frag->data, min(8u, frag->len));
++ run->mmio.len = min(8u, frag->len);
++ run->mmio.is_write = vcpu->mmio_is_write;
++ vcpu->arch.complete_userspace_io = complete_emulated_mmio;
++ return 0;
++}
++
++
++int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
++{
++ int r;
++ sigset_t sigsaved;
++
++ if (!tsk_used_math(current) && init_fpu(current))
++ return -ENOMEM;
++
++ if (vcpu->sigset_active)
++ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
++
++ if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
++ kvm_vcpu_block(vcpu);
++ kvm_apic_accept_events(vcpu);
++ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
++ r = -EAGAIN;
++ goto out;
++ }
++
++ /* re-sync apic's tpr */
++ if (!irqchip_in_kernel(vcpu->kvm)) {
++ if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
++ r = -EINVAL;
++ goto out;
++ }
++ }
++
++ if (unlikely(vcpu->arch.complete_userspace_io)) {
++ int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
++ vcpu->arch.complete_userspace_io = NULL;
++ r = cui(vcpu);
++ if (r <= 0)
++ goto out;
++ } else
++ WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
++
++ r = __vcpu_run(vcpu);
++
++out:
++ post_kvm_run_save(vcpu);
++ if (vcpu->sigset_active)
++ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
++
++ return r;
++}
++
++int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
++{
++ if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
++ /*
++ * We are here if userspace calls get_regs() in the middle of
++ * instruction emulation. Registers state needs to be copied
++ * back from emulation context to vcpu. Userspace shouldn't do
++ * that usually, but some bad designed PV devices (vmware
++ * backdoor interface) need this to work
++ */
++ emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
++ vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
++ }
++ regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
++ regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
++ regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
++ regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
++ regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
++ regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
++ regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
++ regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
++#ifdef CONFIG_X86_64
++ regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
++ regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
++ regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
++ regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
++ regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
++ regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
++ regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
++ regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
++#endif
++
++ regs->rip = kvm_rip_read(vcpu);
++ regs->rflags = kvm_get_rflags(vcpu);
++
++ return 0;
++}
++
++int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
++{
++ vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
++ vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
++
++ kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
++ kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
++ kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
++ kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
++ kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
++ kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
++ kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
++ kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
++#ifdef CONFIG_X86_64
++ kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
++ kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
++ kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
++ kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
++ kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
++ kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
++ kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
++ kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
++#endif
++
++ kvm_rip_write(vcpu, regs->rip);
++ kvm_set_rflags(vcpu, regs->rflags);
++
++ vcpu->arch.exception.pending = false;
++
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++
++ return 0;
++}
++
++void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
++{
++ struct kvm_segment cs;
++
++ kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
++ *db = cs.db;
++ *l = cs.l;
++}
++EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
++
++int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
++ struct kvm_sregs *sregs)
++{
++ struct desc_ptr dt;
++
++ kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
++ kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
++ kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
++ kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
++ kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
++ kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
++
++ kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
++ kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
++
++ kvm_x86_ops->get_idt(vcpu, &dt);
++ sregs->idt.limit = dt.size;
++ sregs->idt.base = dt.address;
++ kvm_x86_ops->get_gdt(vcpu, &dt);
++ sregs->gdt.limit = dt.size;
++ sregs->gdt.base = dt.address;
++
++ sregs->cr0 = kvm_read_cr0(vcpu);
++ sregs->cr2 = vcpu->arch.cr2;
++ sregs->cr3 = kvm_read_cr3(vcpu);
++ sregs->cr4 = kvm_read_cr4(vcpu);
++ sregs->cr8 = kvm_get_cr8(vcpu);
++ sregs->efer = vcpu->arch.efer;
++ sregs->apic_base = kvm_get_apic_base(vcpu);
++
++ memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
++
++ if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
++ set_bit(vcpu->arch.interrupt.nr,
++ (unsigned long *)sregs->interrupt_bitmap);
++
++ return 0;
++}
++
++int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
++ struct kvm_mp_state *mp_state)
++{
++ kvm_apic_accept_events(vcpu);
++ if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
++ vcpu->arch.pv.pv_unhalted)
++ mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
++ else
++ mp_state->mp_state = vcpu->arch.mp_state;
++
++ return 0;
++}
++
++int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
++ struct kvm_mp_state *mp_state)
++{
++ if (!kvm_vcpu_has_lapic(vcpu) &&
++ mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
++ return -EINVAL;
++
++ if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
++ vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
++ set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
++ } else
++ vcpu->arch.mp_state = mp_state->mp_state;
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++ return 0;
++}
++
++int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
++ int reason, bool has_error_code, u32 error_code)
++{
++ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
++ int ret;
++
++ init_emulate_ctxt(vcpu);
++
++ ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
++ has_error_code, error_code);
++
++ if (ret)
++ return EMULATE_FAIL;
++
++ kvm_rip_write(vcpu, ctxt->eip);
++ kvm_set_rflags(vcpu, ctxt->eflags);
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++ return EMULATE_DONE;
++}
++EXPORT_SYMBOL_GPL(kvm_task_switch);
++
++int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
++ struct kvm_sregs *sregs)
++{
++ struct msr_data apic_base_msr;
++ int mmu_reset_needed = 0;
++ int pending_vec, max_bits, idx;
++ struct desc_ptr dt;
++
++ if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
++ return -EINVAL;
++
++ dt.size = sregs->idt.limit;
++ dt.address = sregs->idt.base;
++ kvm_x86_ops->set_idt(vcpu, &dt);
++ dt.size = sregs->gdt.limit;
++ dt.address = sregs->gdt.base;
++ kvm_x86_ops->set_gdt(vcpu, &dt);
++
++ vcpu->arch.cr2 = sregs->cr2;
++ mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
++ vcpu->arch.cr3 = sregs->cr3;
++ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
++
++ kvm_set_cr8(vcpu, sregs->cr8);
++
++ mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
++ kvm_x86_ops->set_efer(vcpu, sregs->efer);
++ apic_base_msr.data = sregs->apic_base;
++ apic_base_msr.host_initiated = true;
++ kvm_set_apic_base(vcpu, &apic_base_msr);
++
++ mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
++ kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
++ vcpu->arch.cr0 = sregs->cr0;
++
++ mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
++ kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
++ if (sregs->cr4 & X86_CR4_OSXSAVE)
++ kvm_update_cpuid(vcpu);
++
++ idx = srcu_read_lock(&vcpu->kvm->srcu);
++ if (!is_long_mode(vcpu) && is_pae(vcpu)) {
++ load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
++ mmu_reset_needed = 1;
++ }
++ srcu_read_unlock(&vcpu->kvm->srcu, idx);
++
++ if (mmu_reset_needed)
++ kvm_mmu_reset_context(vcpu);
++
++ max_bits = KVM_NR_INTERRUPTS;
++ pending_vec = find_first_bit(
++ (const unsigned long *)sregs->interrupt_bitmap, max_bits);
++ if (pending_vec < max_bits) {
++ kvm_queue_interrupt(vcpu, pending_vec, false);
++ pr_debug("Set back pending irq %d\n", pending_vec);
++ }
++
++ kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
++ kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
++ kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
++ kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
++ kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
++ kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
++
++ kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
++ kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
++
++ update_cr8_intercept(vcpu);
++
++ /* Older userspace won't unhalt the vcpu on reset. */
++ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
++ sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
++ !is_protmode(vcpu))
++ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
++
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++
++ return 0;
++}
++
++int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
++ struct kvm_guest_debug *dbg)
++{
++ unsigned long rflags;
++ int i, r;
++
++ if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
++ r = -EBUSY;
++ if (vcpu->arch.exception.pending)
++ goto out;
++ if (dbg->control & KVM_GUESTDBG_INJECT_DB)
++ kvm_queue_exception(vcpu, DB_VECTOR);
++ else
++ kvm_queue_exception(vcpu, BP_VECTOR);
++ }
++
++ /*
++ * Read rflags as long as potentially injected trace flags are still
++ * filtered out.
++ */
++ rflags = kvm_get_rflags(vcpu);
++
++ vcpu->guest_debug = dbg->control;
++ if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
++ vcpu->guest_debug = 0;
++
++ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
++ for (i = 0; i < KVM_NR_DB_REGS; ++i)
++ vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
++ vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
++ } else {
++ for (i = 0; i < KVM_NR_DB_REGS; i++)
++ vcpu->arch.eff_db[i] = vcpu->arch.db[i];
++ }
++ kvm_update_dr7(vcpu);
++
++ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
++ vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
++ get_segment_base(vcpu, VCPU_SREG_CS);
++
++ /*
++ * Trigger an rflags update that will inject or remove the trace
++ * flags.
++ */
++ kvm_set_rflags(vcpu, rflags);
++
++ kvm_x86_ops->update_db_bp_intercept(vcpu);
++
++ r = 0;
++
++out:
++
++ return r;
++}
++
++/*
++ * Translate a guest virtual address to a guest physical address.
++ */
++int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
++ struct kvm_translation *tr)
++{
++ unsigned long vaddr = tr->linear_address;
++ gpa_t gpa;
++ int idx;
++
++ idx = srcu_read_lock(&vcpu->kvm->srcu);
++ gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
++ srcu_read_unlock(&vcpu->kvm->srcu, idx);
++ tr->physical_address = gpa;
++ tr->valid = gpa != UNMAPPED_GVA;
++ tr->writeable = 1;
++ tr->usermode = 0;
++
++ return 0;
++}
++
++int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
++{
++ struct i387_fxsave_struct *fxsave =
++ &vcpu->arch.guest_fpu.state->fxsave;
++
++ memcpy(fpu->fpr, fxsave->st_space, 128);
++ fpu->fcw = fxsave->cwd;
++ fpu->fsw = fxsave->swd;
++ fpu->ftwx = fxsave->twd;
++ fpu->last_opcode = fxsave->fop;
++ fpu->last_ip = fxsave->rip;
++ fpu->last_dp = fxsave->rdp;
++ memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
++
++ return 0;
++}
++
++int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
++{
++ struct i387_fxsave_struct *fxsave =
++ &vcpu->arch.guest_fpu.state->fxsave;
++
++ memcpy(fxsave->st_space, fpu->fpr, 128);
++ fxsave->cwd = fpu->fcw;
++ fxsave->swd = fpu->fsw;
++ fxsave->twd = fpu->ftwx;
++ fxsave->fop = fpu->last_opcode;
++ fxsave->rip = fpu->last_ip;
++ fxsave->rdp = fpu->last_dp;
++ memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
++
++ return 0;
++}
++
++int fx_init(struct kvm_vcpu *vcpu)
++{
++ int err;
++
++ err = fpu_alloc(&vcpu->arch.guest_fpu);
++ if (err)
++ return err;
++
++ fpu_finit(&vcpu->arch.guest_fpu);
++ if (cpu_has_xsaves)
++ vcpu->arch.guest_fpu.state->xsave.xsave_hdr.xcomp_bv =
++ host_xcr0 | XSTATE_COMPACTION_ENABLED;
++
++ /*
++ * Ensure guest xcr0 is valid for loading
++ */
++ vcpu->arch.xcr0 = XSTATE_FP;
++
++ vcpu->arch.cr0 |= X86_CR0_ET;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(fx_init);
++
++static void fx_free(struct kvm_vcpu *vcpu)
++{
++ fpu_free(&vcpu->arch.guest_fpu);
++}
++
++void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
++{
++ if (vcpu->guest_fpu_loaded)
++ return;
++
++ /*
++ * Restore all possible states in the guest,
++ * and assume host would use all available bits.
++ * Guest xcr0 would be loaded later.
++ */
++ kvm_put_guest_xcr0(vcpu);
++ vcpu->guest_fpu_loaded = 1;
++ __kernel_fpu_begin();
++ fpu_restore_checking(&vcpu->arch.guest_fpu);
++ trace_kvm_fpu(1);
++}
++
++void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
++{
++ kvm_put_guest_xcr0(vcpu);
++
++ if (!vcpu->guest_fpu_loaded)
++ return;
++
++ vcpu->guest_fpu_loaded = 0;
++ fpu_save_init(&vcpu->arch.guest_fpu);
++ __kernel_fpu_end();
++ ++vcpu->stat.fpu_reload;
++ kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
++ trace_kvm_fpu(0);
++}
++
++void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
++{
++ kvmclock_reset(vcpu);
++
++ free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
++ fx_free(vcpu);
++ kvm_x86_ops->vcpu_free(vcpu);
++}
++
++struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
++ unsigned int id)
++{
++ if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
++ printk_once(KERN_WARNING
++ "kvm: SMP vm created on host with unstable TSC; "
++ "guest TSC will not be reliable\n");
++ return kvm_x86_ops->vcpu_create(kvm, id);
++}
++
++int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
++{
++ int r;
++
++ vcpu->arch.mtrr_state.have_fixed = 1;
++ r = vcpu_load(vcpu);
++ if (r)
++ return r;
++ kvm_vcpu_reset(vcpu);
++ kvm_mmu_setup(vcpu);
++ vcpu_put(vcpu);
++
++ return r;
++}
++
++int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
++{
++ int r;
++ struct msr_data msr;
++ struct kvm *kvm = vcpu->kvm;
++
++ r = vcpu_load(vcpu);
++ if (r)
++ return r;
++ msr.data = 0x0;
++ msr.index = MSR_IA32_TSC;
++ msr.host_initiated = true;
++ kvm_write_tsc(vcpu, &msr);
++ vcpu_put(vcpu);
++
++ schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
++ KVMCLOCK_SYNC_PERIOD);
++
++ return r;
++}
++
++void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
++{
++ int r;
++ vcpu->arch.apf.msr_val = 0;
++
++ r = vcpu_load(vcpu);
++ BUG_ON(r);
++ kvm_mmu_unload(vcpu);
++ vcpu_put(vcpu);
++
++ fx_free(vcpu);
++ kvm_x86_ops->vcpu_free(vcpu);
++}
++
++void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
++{
++ atomic_set(&vcpu->arch.nmi_queued, 0);
++ vcpu->arch.nmi_pending = 0;
++ vcpu->arch.nmi_injected = false;
++ kvm_clear_interrupt_queue(vcpu);
++ kvm_clear_exception_queue(vcpu);
++
++ memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
++ vcpu->arch.dr6 = DR6_INIT;
++ kvm_update_dr6(vcpu);
++ vcpu->arch.dr7 = DR7_FIXED_1;
++ kvm_update_dr7(vcpu);
++
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++ vcpu->arch.apf.msr_val = 0;
++ vcpu->arch.st.msr_val = 0;
++
++ kvmclock_reset(vcpu);
++
++ kvm_clear_async_pf_completion_queue(vcpu);
++ kvm_async_pf_hash_reset(vcpu);
++ vcpu->arch.apf.halted = false;
++
++ kvm_pmu_reset(vcpu);
++
++ memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
++ vcpu->arch.regs_avail = ~0;
++ vcpu->arch.regs_dirty = ~0;
++
++ kvm_x86_ops->vcpu_reset(vcpu);
++}
++
++void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector)
++{
++ struct kvm_segment cs;
++
++ kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
++ cs.selector = vector << 8;
++ cs.base = vector << 12;
++ kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
++ kvm_rip_write(vcpu, 0);
++}
++
++int kvm_arch_hardware_enable(void)
++{
++ struct kvm *kvm;
++ struct kvm_vcpu *vcpu;
++ int i;
++ int ret;
++ u64 local_tsc;
++ u64 max_tsc = 0;
++ bool stable, backwards_tsc = false;
++
++ kvm_shared_msr_cpu_online();
++ ret = kvm_x86_ops->hardware_enable();
++ if (ret != 0)
++ return ret;
++
++ local_tsc = native_read_tsc();
++ stable = !check_tsc_unstable();
++ list_for_each_entry(kvm, &vm_list, vm_list) {
++ kvm_for_each_vcpu(i, vcpu, kvm) {
++ if (!stable && vcpu->cpu == smp_processor_id())
++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
++ if (stable && vcpu->arch.last_host_tsc > local_tsc) {
++ backwards_tsc = true;
++ if (vcpu->arch.last_host_tsc > max_tsc)
++ max_tsc = vcpu->arch.last_host_tsc;
++ }
++ }
++ }
++
++ /*
++ * Sometimes, even reliable TSCs go backwards. This happens on
++ * platforms that reset TSC during suspend or hibernate actions, but
++ * maintain synchronization. We must compensate. Fortunately, we can
++ * detect that condition here, which happens early in CPU bringup,
++ * before any KVM threads can be running. Unfortunately, we can't
++ * bring the TSCs fully up to date with real time, as we aren't yet far
++ * enough into CPU bringup that we know how much real time has actually
++ * elapsed; our helper function, get_kernel_ns() will be using boot
++ * variables that haven't been updated yet.
++ *
++ * So we simply find the maximum observed TSC above, then record the
++ * adjustment to TSC in each VCPU. When the VCPU later gets loaded,
++ * the adjustment will be applied. Note that we accumulate
++ * adjustments, in case multiple suspend cycles happen before some VCPU
++ * gets a chance to run again. In the event that no KVM threads get a
++ * chance to run, we will miss the entire elapsed period, as we'll have
++ * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
++ * loose cycle time. This isn't too big a deal, since the loss will be
++ * uniform across all VCPUs (not to mention the scenario is extremely
++ * unlikely). It is possible that a second hibernate recovery happens
++ * much faster than a first, causing the observed TSC here to be
++ * smaller; this would require additional padding adjustment, which is
++ * why we set last_host_tsc to the local tsc observed here.
++ *
++ * N.B. - this code below runs only on platforms with reliable TSC,
++ * as that is the only way backwards_tsc is set above. Also note
++ * that this runs for ALL vcpus, which is not a bug; all VCPUs should
++ * have the same delta_cyc adjustment applied if backwards_tsc
++ * is detected. Note further, this adjustment is only done once,
++ * as we reset last_host_tsc on all VCPUs to stop this from being
++ * called multiple times (one for each physical CPU bringup).
++ *
++ * Platforms with unreliable TSCs don't have to deal with this, they
++ * will be compensated by the logic in vcpu_load, which sets the TSC to
++ * catchup mode. This will catchup all VCPUs to real time, but cannot
++ * guarantee that they stay in perfect synchronization.
++ */
++ if (backwards_tsc) {
++ u64 delta_cyc = max_tsc - local_tsc;
++ backwards_tsc_observed = true;
++ list_for_each_entry(kvm, &vm_list, vm_list) {
++ kvm_for_each_vcpu(i, vcpu, kvm) {
++ vcpu->arch.tsc_offset_adjustment += delta_cyc;
++ vcpu->arch.last_host_tsc = local_tsc;
++ kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
++ }
++
++ /*
++ * We have to disable TSC offset matching.. if you were
++ * booting a VM while issuing an S4 host suspend....
++ * you may have some problem. Solving this issue is
++ * left as an exercise to the reader.
++ */
++ kvm->arch.last_tsc_nsec = 0;
++ kvm->arch.last_tsc_write = 0;
++ }
++
++ }
++ return 0;
++}
++
++void kvm_arch_hardware_disable(void)
++{
++ kvm_x86_ops->hardware_disable();
++ drop_user_return_notifiers();
++}
++
++int kvm_arch_hardware_setup(void)
++{
++ return kvm_x86_ops->hardware_setup();
++}
++
++void kvm_arch_hardware_unsetup(void)
++{
++ kvm_x86_ops->hardware_unsetup();
++}
++
++void kvm_arch_check_processor_compat(void *rtn)
++{
++ kvm_x86_ops->check_processor_compatibility(rtn);
++}
++
++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
++{
++ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
++}
++
++struct static_key kvm_no_apic_vcpu __read_mostly;
++
++int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
++{
++ struct page *page;
++ struct kvm *kvm;
++ int r;
++
++ BUG_ON(vcpu->kvm == NULL);
++ kvm = vcpu->kvm;
++
++ vcpu->arch.pv.pv_unhalted = false;
++ vcpu->arch.emulate_ctxt.ops = &emulate_ops;
++ if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
++ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
++ else
++ vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
++
++ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
++ if (!page) {
++ r = -ENOMEM;
++ goto fail;
++ }
++ vcpu->arch.pio_data = page_address(page);
++
++ kvm_set_tsc_khz(vcpu, max_tsc_khz);
++
++ r = kvm_mmu_create(vcpu);
++ if (r < 0)
++ goto fail_free_pio_data;
++
++ if (irqchip_in_kernel(kvm)) {
++ r = kvm_create_lapic(vcpu);
++ if (r < 0)
++ goto fail_mmu_destroy;
++ } else
++ static_key_slow_inc(&kvm_no_apic_vcpu);
++
++ vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
++ GFP_KERNEL);
++ if (!vcpu->arch.mce_banks) {
++ r = -ENOMEM;
++ goto fail_free_lapic;
++ }
++ vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
++
++ if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) {
++ r = -ENOMEM;
++ goto fail_free_mce_banks;
++ }
++
++ r = fx_init(vcpu);
++ if (r)
++ goto fail_free_wbinvd_dirty_mask;
++
++ vcpu->arch.ia32_tsc_adjust_msr = 0x0;
++ vcpu->arch.pv_time_enabled = false;
++
++ vcpu->arch.guest_supported_xcr0 = 0;
++ vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
++
++ kvm_async_pf_hash_reset(vcpu);
++ kvm_pmu_init(vcpu);
++
++ return 0;
++fail_free_wbinvd_dirty_mask:
++ free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
++fail_free_mce_banks:
++ kfree(vcpu->arch.mce_banks);
++fail_free_lapic:
++ kvm_free_lapic(vcpu);
++fail_mmu_destroy:
++ kvm_mmu_destroy(vcpu);
++fail_free_pio_data:
++ free_page((unsigned long)vcpu->arch.pio_data);
++fail:
++ return r;
++}
++
++void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
++{
++ int idx;
++
++ kvm_pmu_destroy(vcpu);
++ kfree(vcpu->arch.mce_banks);
++ kvm_free_lapic(vcpu);
++ idx = srcu_read_lock(&vcpu->kvm->srcu);
++ kvm_mmu_destroy(vcpu);
++ srcu_read_unlock(&vcpu->kvm->srcu, idx);
++ free_page((unsigned long)vcpu->arch.pio_data);
++ if (!irqchip_in_kernel(vcpu->kvm))
++ static_key_slow_dec(&kvm_no_apic_vcpu);
++}
++
++void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
++{
++ kvm_x86_ops->sched_in(vcpu, cpu);
++}
++
++int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
++{
++ if (type)
++ return -EINVAL;
++
++ INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
++ INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
++ INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
++ atomic_set(&kvm->arch.noncoherent_dma_count, 0);
++
++ /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
++ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
++ /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
++ set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
++ &kvm->arch.irq_sources_bitmap);
++
++ raw_spin_lock_init(&kvm->arch.tsc_write_lock);
++ mutex_init(&kvm->arch.apic_map_lock);
++ spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
++
++ pvclock_update_vm_gtod_copy(kvm);
++
++ INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
++ INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
++
++ return 0;
++}
++
++static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
++{
++ int r;
++ r = vcpu_load(vcpu);
++ BUG_ON(r);
++ kvm_mmu_unload(vcpu);
++ vcpu_put(vcpu);
++}
++
++static void kvm_free_vcpus(struct kvm *kvm)
++{
++ unsigned int i;
++ struct kvm_vcpu *vcpu;
++
++ /*
++ * Unpin any mmu pages first.
++ */
++ kvm_for_each_vcpu(i, vcpu, kvm) {
++ kvm_clear_async_pf_completion_queue(vcpu);
++ kvm_unload_vcpu_mmu(vcpu);
++ }
++ kvm_for_each_vcpu(i, vcpu, kvm)
++ kvm_arch_vcpu_free(vcpu);
++
++ mutex_lock(&kvm->lock);
++ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
++ kvm->vcpus[i] = NULL;
++
++ atomic_set(&kvm->online_vcpus, 0);
++ mutex_unlock(&kvm->lock);
++}
++
++void kvm_arch_sync_events(struct kvm *kvm)
++{
++ cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
++ cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
++ kvm_free_all_assigned_devices(kvm);
++ kvm_free_pit(kvm);
++}
++
++void kvm_arch_destroy_vm(struct kvm *kvm)
++{
++ if (current->mm == kvm->mm) {
++ /*
++ * Free memory regions allocated on behalf of userspace,
++ * unless the the memory map has changed due to process exit
++ * or fd copying.
++ */
++ struct kvm_userspace_memory_region mem;
++ memset(&mem, 0, sizeof(mem));
++ mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
++ kvm_set_memory_region(kvm, &mem);
++
++ mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
++ kvm_set_memory_region(kvm, &mem);
++
++ mem.slot = TSS_PRIVATE_MEMSLOT;
++ kvm_set_memory_region(kvm, &mem);
++ }
++ kvm_iommu_unmap_guest(kvm);
++ kfree(kvm->arch.vpic);
++ kfree(kvm->arch.vioapic);
++ kvm_free_vcpus(kvm);
++ kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
++}
++
++void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
++ struct kvm_memory_slot *dont)
++{
++ int i;
++
++ for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
++ if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
++ kvm_kvfree(free->arch.rmap[i]);
++ free->arch.rmap[i] = NULL;
++ }
++ if (i == 0)
++ continue;
++
++ if (!dont || free->arch.lpage_info[i - 1] !=
++ dont->arch.lpage_info[i - 1]) {
++ kvm_kvfree(free->arch.lpage_info[i - 1]);
++ free->arch.lpage_info[i - 1] = NULL;
++ }
++ }
++}
++
++int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
++ unsigned long npages)
++{
++ int i;
++
++ for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
++ unsigned long ugfn;
++ int lpages;
++ int level = i + 1;
++
++ lpages = gfn_to_index(slot->base_gfn + npages - 1,
++ slot->base_gfn, level) + 1;
++
++ slot->arch.rmap[i] =
++ kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
++ if (!slot->arch.rmap[i])
++ goto out_free;
++ if (i == 0)
++ continue;
++
++ slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages *
++ sizeof(*slot->arch.lpage_info[i - 1]));
++ if (!slot->arch.lpage_info[i - 1])
++ goto out_free;
++
++ if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
++ slot->arch.lpage_info[i - 1][0].write_count = 1;
++ if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
++ slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1;
++ ugfn = slot->userspace_addr >> PAGE_SHIFT;
++ /*
++ * If the gfn and userspace address are not aligned wrt each
++ * other, or if explicitly asked to, disable large page
++ * support for this slot
++ */
++ if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
++ !kvm_largepages_enabled()) {
++ unsigned long j;
++
++ for (j = 0; j < lpages; ++j)
++ slot->arch.lpage_info[i - 1][j].write_count = 1;
++ }
++ }
++
++ return 0;
++
++out_free:
++ for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
++ kvm_kvfree(slot->arch.rmap[i]);
++ slot->arch.rmap[i] = NULL;
++ if (i == 0)
++ continue;
++
++ kvm_kvfree(slot->arch.lpage_info[i - 1]);
++ slot->arch.lpage_info[i - 1] = NULL;
++ }
++ return -ENOMEM;
++}
++
++void kvm_arch_memslots_updated(struct kvm *kvm)
++{
++ /*
++ * memslots->generation has been incremented.
++ * mmio generation may have reached its maximum value.
++ */
++ kvm_mmu_invalidate_mmio_sptes(kvm);
++}
++
++int kvm_arch_prepare_memory_region(struct kvm *kvm,
++ struct kvm_memory_slot *memslot,
++ struct kvm_userspace_memory_region *mem,
++ enum kvm_mr_change change)
++{
++ /*
++ * Only private memory slots need to be mapped here since
++ * KVM_SET_MEMORY_REGION ioctl is no longer supported.
++ */
++ if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
++ unsigned long userspace_addr;
++
++ /*
++ * MAP_SHARED to prevent internal slot pages from being moved
++ * by fork()/COW.
++ */
++ userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED | MAP_ANONYMOUS, 0);
++
++ if (IS_ERR((void *)userspace_addr))
++ return PTR_ERR((void *)userspace_addr);
++
++ memslot->userspace_addr = userspace_addr;
++ }
++
++ return 0;
++}
++
++void kvm_arch_commit_memory_region(struct kvm *kvm,
++ struct kvm_userspace_memory_region *mem,
++ const struct kvm_memory_slot *old,
++ enum kvm_mr_change change)
++{
++
++ int nr_mmu_pages = 0;
++
++ if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) {
++ int ret;
++
++ ret = vm_munmap(old->userspace_addr,
++ old->npages * PAGE_SIZE);
++ if (ret < 0)
++ printk(KERN_WARNING
++ "kvm_vm_ioctl_set_memory_region: "
++ "failed to munmap memory\n");
++ }
++
++ if (!kvm->arch.n_requested_mmu_pages)
++ nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
++
++ if (nr_mmu_pages)
++ kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
++ /*
++ * Write protect all pages for dirty logging.
++ *
++ * All the sptes including the large sptes which point to this
++ * slot are set to readonly. We can not create any new large
++ * spte on this slot until the end of the logging.
++ *
++ * See the comments in fast_page_fault().
++ */
++ if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
++ kvm_mmu_slot_remove_write_access(kvm, mem->slot);
++}
++
++void kvm_arch_flush_shadow_all(struct kvm *kvm)
++{
++ kvm_mmu_invalidate_zap_all_pages(kvm);
++}
++
++void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
++ struct kvm_memory_slot *slot)
++{
++ kvm_mmu_invalidate_zap_all_pages(kvm);
++}
++
++int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
++{
++ if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
++ kvm_x86_ops->check_nested_events(vcpu, false);
++
++ return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
++ !vcpu->arch.apf.halted)
++ || !list_empty_careful(&vcpu->async_pf.done)
++ || kvm_apic_has_events(vcpu)
++ || vcpu->arch.pv.pv_unhalted
++ || atomic_read(&vcpu->arch.nmi_queued) ||
++ (kvm_arch_interrupt_allowed(vcpu) &&
++ kvm_cpu_has_interrupt(vcpu));
++}
++
++int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
++{
++ return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
++}
++
++int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
++{
++ return kvm_x86_ops->interrupt_allowed(vcpu);
++}
++
++bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
++{
++ unsigned long current_rip = kvm_rip_read(vcpu) +
++ get_segment_base(vcpu, VCPU_SREG_CS);
++
++ return current_rip == linear_rip;
++}
++EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
++
++unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
++{
++ unsigned long rflags;
++
++ rflags = kvm_x86_ops->get_rflags(vcpu);
++ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
++ rflags &= ~X86_EFLAGS_TF;
++ return rflags;
++}
++EXPORT_SYMBOL_GPL(kvm_get_rflags);
++
++static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
++{
++ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
++ kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
++ rflags |= X86_EFLAGS_TF;
++ kvm_x86_ops->set_rflags(vcpu, rflags);
++}
++
++void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
++{
++ __kvm_set_rflags(vcpu, rflags);
++ kvm_make_request(KVM_REQ_EVENT, vcpu);
++}
++EXPORT_SYMBOL_GPL(kvm_set_rflags);
++
++void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
++{
++ int r;
++
++ if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
++ work->wakeup_all)
++ return;
++
++ r = kvm_mmu_reload(vcpu);
++ if (unlikely(r))
++ return;
++
++ if (!vcpu->arch.mmu.direct_map &&
++ work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
++ return;
++
++ vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
++}
++
++static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
++{
++ return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
++}
++
++static inline u32 kvm_async_pf_next_probe(u32 key)
++{
++ return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
++}
++
++static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
++{
++ u32 key = kvm_async_pf_hash_fn(gfn);
++
++ while (vcpu->arch.apf.gfns[key] != ~0)
++ key = kvm_async_pf_next_probe(key);
++
++ vcpu->arch.apf.gfns[key] = gfn;
++}
++
++static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
++{
++ int i;
++ u32 key = kvm_async_pf_hash_fn(gfn);
++
++ for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
++ (vcpu->arch.apf.gfns[key] != gfn &&
++ vcpu->arch.apf.gfns[key] != ~0); i++)
++ key = kvm_async_pf_next_probe(key);
++
++ return key;
++}
++
++bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
++{
++ return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
++}
++
++static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
++{
++ u32 i, j, k;
++
++ i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
++ while (true) {
++ vcpu->arch.apf.gfns[i] = ~0;
++ do {
++ j = kvm_async_pf_next_probe(j);
++ if (vcpu->arch.apf.gfns[j] == ~0)
++ return;
++ k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
++ /*
++ * k lies cyclically in ]i,j]
++ * | i.k.j |
++ * |....j i.k.| or |.k..j i...|
++ */
++ } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
++ vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
++ i = j;
++ }
++}
++
++static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
++{
++
++ return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
++ sizeof(val));
++}
++
++void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
++ struct kvm_async_pf *work)
++{
++ struct x86_exception fault;
++
++ trace_kvm_async_pf_not_present(work->arch.token, work->gva);
++ kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
++
++ if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
++ (vcpu->arch.apf.send_user_only &&
++ kvm_x86_ops->get_cpl(vcpu) == 0))
++ kvm_make_request(KVM_REQ_APF_HALT, vcpu);
++ else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
++ fault.vector = PF_VECTOR;
++ fault.error_code_valid = true;
++ fault.error_code = 0;
++ fault.nested_page_fault = false;
++ fault.address = work->arch.token;
++ kvm_inject_page_fault(vcpu, &fault);
++ }
++}
++
++void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
++ struct kvm_async_pf *work)
++{
++ struct x86_exception fault;
++
++ trace_kvm_async_pf_ready(work->arch.token, work->gva);
++ if (work->wakeup_all)
++ work->arch.token = ~0; /* broadcast wakeup */
++ else
++ kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
++
++ if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
++ !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
++ fault.vector = PF_VECTOR;
++ fault.error_code_valid = true;
++ fault.error_code = 0;
++ fault.nested_page_fault = false;
++ fault.address = work->arch.token;
++ kvm_inject_page_fault(vcpu, &fault);
++ }
++ vcpu->arch.apf.halted = false;
++ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
++}
++
++bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
++{
++ if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
++ return true;
++ else
++ return !kvm_event_needs_reinjection(vcpu) &&
++ kvm_x86_ops->interrupt_allowed(vcpu);
++}
++
++void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
++{
++ atomic_inc(&kvm->arch.noncoherent_dma_count);
++}
++EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
++
++void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
++{
++ atomic_dec(&kvm->arch.noncoherent_dma_count);
++}
++EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
++
++bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
++{
++ return atomic_read(&kvm->arch.noncoherent_dma_count);
++}
++EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
++
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
++EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window);
+diff -Nur linux-3.18.9.orig/arch/x86/mm/fault.c linux-3.18.9/arch/x86/mm/fault.c
+--- linux-3.18.9.orig/arch/x86/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/mm/fault.c 2015-03-15 16:03:03.696094875 -0500
@@ -1128,7 +1128,7 @@
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
@@ -2675,9 +10503,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/mm/fault.c linux-3.18.8/arch/x86/mm/fault.c
bad_area_nosemaphore(regs, error_code, address);
return;
}
-diff -Nur linux-3.18.8.orig/arch/x86/mm/highmem_32.c linux-3.18.8/arch/x86/mm/highmem_32.c
---- linux-3.18.8.orig/arch/x86/mm/highmem_32.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/mm/highmem_32.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/mm/highmem_32.c linux-3.18.9/arch/x86/mm/highmem_32.c
+--- linux-3.18.9.orig/arch/x86/mm/highmem_32.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/mm/highmem_32.c 2015-03-15 16:03:03.696094875 -0500
@@ -32,6 +32,7 @@
*/
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
@@ -2708,9 +10536,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/mm/highmem_32.c linux-3.18.8/arch/x86/mm/hi
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
arch_flush_lazy_mmu_mode();
-diff -Nur linux-3.18.8.orig/arch/x86/mm/iomap_32.c linux-3.18.8/arch/x86/mm/iomap_32.c
---- linux-3.18.8.orig/arch/x86/mm/iomap_32.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/mm/iomap_32.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/mm/iomap_32.c linux-3.18.9/arch/x86/mm/iomap_32.c
+--- linux-3.18.9.orig/arch/x86/mm/iomap_32.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/mm/iomap_32.c 2015-03-15 16:03:03.696094875 -0500
@@ -56,6 +56,7 @@
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
@@ -2743,9 +10571,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/mm/iomap_32.c linux-3.18.8/arch/x86/mm/ioma
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
}
-diff -Nur linux-3.18.8.orig/arch/x86/platform/uv/tlb_uv.c linux-3.18.8/arch/x86/platform/uv/tlb_uv.c
---- linux-3.18.8.orig/arch/x86/platform/uv/tlb_uv.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/platform/uv/tlb_uv.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/platform/uv/tlb_uv.c linux-3.18.9/arch/x86/platform/uv/tlb_uv.c
+--- linux-3.18.9.orig/arch/x86/platform/uv/tlb_uv.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/platform/uv/tlb_uv.c 2015-03-15 16:03:03.696094875 -0500
@@ -714,9 +714,9 @@
quiesce_local_uvhub(hmaster);
@@ -2832,9 +10660,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/platform/uv/tlb_uv.c linux-3.18.8/arch/x86/
}
}
-diff -Nur linux-3.18.8.orig/arch/x86/platform/uv/uv_time.c linux-3.18.8/arch/x86/platform/uv/uv_time.c
---- linux-3.18.8.orig/arch/x86/platform/uv/uv_time.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/x86/platform/uv/uv_time.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/x86/platform/uv/uv_time.c linux-3.18.9/arch/x86/platform/uv/uv_time.c
+--- linux-3.18.9.orig/arch/x86/platform/uv/uv_time.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/x86/platform/uv/uv_time.c 2015-03-15 16:03:03.696094875 -0500
@@ -58,7 +58,7 @@
/* There is one of these allocated per node */
@@ -2915,9 +10743,9 @@ diff -Nur linux-3.18.8.orig/arch/x86/platform/uv/uv_time.c linux-3.18.8/arch/x86
}
/*
-diff -Nur linux-3.18.8.orig/arch/xtensa/mm/fault.c linux-3.18.8/arch/xtensa/mm/fault.c
---- linux-3.18.8.orig/arch/xtensa/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/arch/xtensa/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/arch/xtensa/mm/fault.c linux-3.18.9/arch/xtensa/mm/fault.c
+--- linux-3.18.9.orig/arch/xtensa/mm/fault.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/arch/xtensa/mm/fault.c 2015-03-15 16:03:03.696094875 -0500
@@ -57,7 +57,7 @@
/* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -2927,9 +10755,9 @@ diff -Nur linux-3.18.8.orig/arch/xtensa/mm/fault.c linux-3.18.8/arch/xtensa/mm/f
bad_page_fault(regs, address, SIGSEGV);
return;
}
-diff -Nur linux-3.18.8.orig/block/blk-core.c linux-3.18.8/block/blk-core.c
---- linux-3.18.8.orig/block/blk-core.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/block/blk-core.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/block/blk-core.c linux-3.18.9/block/blk-core.c
+--- linux-3.18.9.orig/block/blk-core.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/block/blk-core.c 2015-03-15 16:03:03.696094875 -0500
@@ -100,6 +100,9 @@
INIT_LIST_HEAD(&rq->queuelist);
@@ -3005,9 +10833,9 @@ diff -Nur linux-3.18.8.orig/block/blk-core.c linux-3.18.8/block/blk-core.c
}
void blk_finish_plug(struct blk_plug *plug)
-diff -Nur linux-3.18.8.orig/block/blk-ioc.c linux-3.18.8/block/blk-ioc.c
---- linux-3.18.8.orig/block/blk-ioc.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/block/blk-ioc.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/block/blk-ioc.c linux-3.18.9/block/blk-ioc.c
+--- linux-3.18.9.orig/block/blk-ioc.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/block/blk-ioc.c 2015-03-15 16:03:03.696094875 -0500
@@ -7,6 +7,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -3034,9 +10862,9 @@ diff -Nur linux-3.18.8.orig/block/blk-ioc.c linux-3.18.8/block/blk-ioc.c
goto retry;
}
}
-diff -Nur linux-3.18.8.orig/block/blk-iopoll.c linux-3.18.8/block/blk-iopoll.c
---- linux-3.18.8.orig/block/blk-iopoll.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/block/blk-iopoll.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/block/blk-iopoll.c linux-3.18.9/block/blk-iopoll.c
+--- linux-3.18.9.orig/block/blk-iopoll.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/block/blk-iopoll.c 2015-03-15 16:03:03.696094875 -0500
@@ -35,6 +35,7 @@
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
@@ -3061,9 +10889,9 @@ diff -Nur linux-3.18.8.orig/block/blk-iopoll.c linux-3.18.8/block/blk-iopoll.c
}
return NOTIFY_OK;
-diff -Nur linux-3.18.8.orig/block/blk-mq.c linux-3.18.8/block/blk-mq.c
---- linux-3.18.8.orig/block/blk-mq.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/block/blk-mq.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/block/blk-mq.c linux-3.18.9/block/blk-mq.c
+--- linux-3.18.9.orig/block/blk-mq.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/block/blk-mq.c 2015-03-15 16:03:03.696094875 -0500
@@ -85,7 +85,7 @@
if (percpu_ref_tryget_live(&q->mq_usage_counter))
return 0;
@@ -3208,9 +11036,9 @@ diff -Nur linux-3.18.8.orig/block/blk-mq.c linux-3.18.8/block/blk-mq.c
return blk_mq_hctx_cpu_offline(hctx, cpu);
else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
return blk_mq_hctx_cpu_online(hctx, cpu);
-diff -Nur linux-3.18.8.orig/block/blk-mq-cpu.c linux-3.18.8/block/blk-mq-cpu.c
---- linux-3.18.8.orig/block/blk-mq-cpu.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/block/blk-mq-cpu.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/block/blk-mq-cpu.c linux-3.18.9/block/blk-mq-cpu.c
+--- linux-3.18.9.orig/block/blk-mq-cpu.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/block/blk-mq-cpu.c 2015-03-15 16:03:03.696094875 -0500
@@ -16,7 +16,7 @@
#include "blk-mq.h"
@@ -3262,9 +11090,9 @@ diff -Nur linux-3.18.8.orig/block/blk-mq-cpu.c linux-3.18.8/block/blk-mq-cpu.c
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
-diff -Nur linux-3.18.8.orig/block/blk-mq.h linux-3.18.8/block/blk-mq.h
---- linux-3.18.8.orig/block/blk-mq.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/block/blk-mq.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/block/blk-mq.h linux-3.18.9/block/blk-mq.h
+--- linux-3.18.9.orig/block/blk-mq.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/block/blk-mq.h 2015-03-15 16:03:03.700094875 -0500
@@ -73,7 +73,10 @@
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
@@ -3292,9 +11120,9 @@ diff -Nur linux-3.18.8.orig/block/blk-mq.h linux-3.18.8/block/blk-mq.h
}
struct blk_mq_alloc_data {
-diff -Nur linux-3.18.8.orig/block/blk-softirq.c linux-3.18.8/block/blk-softirq.c
---- linux-3.18.8.orig/block/blk-softirq.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/block/blk-softirq.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/block/blk-softirq.c linux-3.18.9/block/blk-softirq.c
+--- linux-3.18.9.orig/block/blk-softirq.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/block/blk-softirq.c 2015-03-15 16:03:03.700094875 -0500
@@ -51,6 +51,7 @@
raise_softirq_irqoff(BLOCK_SOFTIRQ);
@@ -3319,9 +11147,9 @@ diff -Nur linux-3.18.8.orig/block/blk-softirq.c linux-3.18.8/block/blk-softirq.c
}
/**
-diff -Nur linux-3.18.8.orig/block/bounce.c linux-3.18.8/block/bounce.c
---- linux-3.18.8.orig/block/bounce.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/block/bounce.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/block/bounce.c linux-3.18.9/block/bounce.c
+--- linux-3.18.9.orig/block/bounce.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/block/bounce.c 2015-03-15 16:03:03.700094875 -0500
@@ -54,11 +54,11 @@
unsigned long flags;
unsigned char *vto;
@@ -3336,9 +11164,9 @@ diff -Nur linux-3.18.8.orig/block/bounce.c linux-3.18.8/block/bounce.c
}
#else /* CONFIG_HIGHMEM */
-diff -Nur linux-3.18.8.orig/crypto/algapi.c linux-3.18.8/crypto/algapi.c
---- linux-3.18.8.orig/crypto/algapi.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/crypto/algapi.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/crypto/algapi.c linux-3.18.9/crypto/algapi.c
+--- linux-3.18.9.orig/crypto/algapi.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/crypto/algapi.c 2015-03-15 16:03:03.700094875 -0500
@@ -698,13 +698,13 @@
int crypto_register_notifier(struct notifier_block *nb)
@@ -3355,9 +11183,9 @@ diff -Nur linux-3.18.8.orig/crypto/algapi.c linux-3.18.8/crypto/algapi.c
}
EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
-diff -Nur linux-3.18.8.orig/crypto/api.c linux-3.18.8/crypto/api.c
---- linux-3.18.8.orig/crypto/api.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/crypto/api.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/crypto/api.c linux-3.18.9/crypto/api.c
+--- linux-3.18.9.orig/crypto/api.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/crypto/api.c 2015-03-15 16:03:03.700094875 -0500
@@ -31,7 +31,7 @@
DECLARE_RWSEM(crypto_alg_sem);
EXPORT_SYMBOL_GPL(crypto_alg_sem);
@@ -3380,9 +11208,9 @@ diff -Nur linux-3.18.8.orig/crypto/api.c linux-3.18.8/crypto/api.c
}
return ok;
-diff -Nur linux-3.18.8.orig/crypto/internal.h linux-3.18.8/crypto/internal.h
---- linux-3.18.8.orig/crypto/internal.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/crypto/internal.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/crypto/internal.h linux-3.18.9/crypto/internal.h
+--- linux-3.18.9.orig/crypto/internal.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/crypto/internal.h 2015-03-15 16:03:03.700094875 -0500
@@ -48,7 +48,7 @@
extern struct list_head crypto_alg_list;
@@ -3401,9 +11229,9 @@ diff -Nur linux-3.18.8.orig/crypto/internal.h linux-3.18.8/crypto/internal.h
}
#endif /* _CRYPTO_INTERNAL_H */
-diff -Nur linux-3.18.8.orig/Documentation/hwlat_detector.txt linux-3.18.8/Documentation/hwlat_detector.txt
---- linux-3.18.8.orig/Documentation/hwlat_detector.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/Documentation/hwlat_detector.txt 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/Documentation/hwlat_detector.txt linux-3.18.9/Documentation/hwlat_detector.txt
+--- linux-3.18.9.orig/Documentation/hwlat_detector.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/Documentation/hwlat_detector.txt 2015-03-15 16:03:03.704094875 -0500
@@ -0,0 +1,64 @@
+Introduction:
+-------------
@@ -3469,9 +11297,9 @@ diff -Nur linux-3.18.8.orig/Documentation/hwlat_detector.txt linux-3.18.8/Docume
+observe any latencies that exceed the threshold (initially 100 usecs),
+then we write to a global sample ring buffer of 8K samples, which is
+consumed by reading from the "sample" (pipe) debugfs file interface.
-diff -Nur linux-3.18.8.orig/Documentation/sysrq.txt linux-3.18.8/Documentation/sysrq.txt
---- linux-3.18.8.orig/Documentation/sysrq.txt 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/Documentation/sysrq.txt 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/Documentation/sysrq.txt linux-3.18.9/Documentation/sysrq.txt
+--- linux-3.18.9.orig/Documentation/sysrq.txt 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/Documentation/sysrq.txt 2015-03-15 16:03:03.704094875 -0500
@@ -59,10 +59,17 @@
On other - If you know of the key combos for other architectures, please
let me know so I can add them to this section.
@@ -3492,9 +11320,9 @@ diff -Nur linux-3.18.8.orig/Documentation/sysrq.txt linux-3.18.8/Documentation/s
* What are the 'command' keys?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'b' - Will immediately reboot the system without syncing or unmounting
-diff -Nur linux-3.18.8.orig/Documentation/trace/histograms.txt linux-3.18.8/Documentation/trace/histograms.txt
---- linux-3.18.8.orig/Documentation/trace/histograms.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/Documentation/trace/histograms.txt 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/Documentation/trace/histograms.txt linux-3.18.9/Documentation/trace/histograms.txt
+--- linux-3.18.9.orig/Documentation/trace/histograms.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/Documentation/trace/histograms.txt 2015-03-15 16:03:03.704094875 -0500
@@ -0,0 +1,186 @@
+ Using the Linux Kernel Latency Histograms
+
@@ -3682,9 +11510,9 @@ diff -Nur linux-3.18.8.orig/Documentation/trace/histograms.txt linux-3.18.8/Docu
+is provided.
+
+These data are also reset when the wakeup histogram is reset.
-diff -Nur linux-3.18.8.orig/drivers/acpi/acpica/acglobal.h linux-3.18.8/drivers/acpi/acpica/acglobal.h
---- linux-3.18.8.orig/drivers/acpi/acpica/acglobal.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/acpi/acpica/acglobal.h 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/acpi/acpica/acglobal.h linux-3.18.9/drivers/acpi/acpica/acglobal.h
+--- linux-3.18.9.orig/drivers/acpi/acpica/acglobal.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/acpi/acpica/acglobal.h 2015-03-15 16:03:03.704094875 -0500
@@ -112,7 +112,7 @@
* interrupt level
*/
@@ -3694,9 +11522,9 @@ diff -Nur linux-3.18.8.orig/drivers/acpi/acpica/acglobal.h linux-3.18.8/drivers/
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
/* Mutex for _OSI support */
-diff -Nur linux-3.18.8.orig/drivers/acpi/acpica/hwregs.c linux-3.18.8/drivers/acpi/acpica/hwregs.c
---- linux-3.18.8.orig/drivers/acpi/acpica/hwregs.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/acpi/acpica/hwregs.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/acpi/acpica/hwregs.c linux-3.18.9/drivers/acpi/acpica/hwregs.c
+--- linux-3.18.9.orig/drivers/acpi/acpica/hwregs.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/acpi/acpica/hwregs.c 2015-03-15 16:03:03.704094875 -0500
@@ -269,14 +269,14 @@
ACPI_BITMASK_ALL_FIXED_STATUS,
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
@@ -3714,9 +11542,9 @@ diff -Nur linux-3.18.8.orig/drivers/acpi/acpica/hwregs.c linux-3.18.8/drivers/ac
if (ACPI_FAILURE(status)) {
goto exit;
-diff -Nur linux-3.18.8.orig/drivers/acpi/acpica/hwxface.c linux-3.18.8/drivers/acpi/acpica/hwxface.c
---- linux-3.18.8.orig/drivers/acpi/acpica/hwxface.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/acpi/acpica/hwxface.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/acpi/acpica/hwxface.c linux-3.18.9/drivers/acpi/acpica/hwxface.c
+--- linux-3.18.9.orig/drivers/acpi/acpica/hwxface.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/acpi/acpica/hwxface.c 2015-03-15 16:03:03.704094875 -0500
@@ -374,7 +374,7 @@
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -3735,9 +11563,9 @@ diff -Nur linux-3.18.8.orig/drivers/acpi/acpica/hwxface.c linux-3.18.8/drivers/a
return_ACPI_STATUS(status);
}
-diff -Nur linux-3.18.8.orig/drivers/acpi/acpica/utmutex.c linux-3.18.8/drivers/acpi/acpica/utmutex.c
---- linux-3.18.8.orig/drivers/acpi/acpica/utmutex.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/acpi/acpica/utmutex.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/acpi/acpica/utmutex.c linux-3.18.9/drivers/acpi/acpica/utmutex.c
+--- linux-3.18.9.orig/drivers/acpi/acpica/utmutex.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/acpi/acpica/utmutex.c 2015-03-15 16:03:03.704094875 -0500
@@ -88,7 +88,7 @@
return_ACPI_STATUS (status);
}
@@ -3756,9 +11584,9 @@ diff -Nur linux-3.18.8.orig/drivers/acpi/acpica/utmutex.c linux-3.18.8/drivers/a
acpi_os_delete_lock(acpi_gbl_reference_count_lock);
/* Delete the reader/writer lock */
-diff -Nur linux-3.18.8.orig/drivers/ata/libata-sff.c linux-3.18.8/drivers/ata/libata-sff.c
---- linux-3.18.8.orig/drivers/ata/libata-sff.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/ata/libata-sff.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/ata/libata-sff.c linux-3.18.9/drivers/ata/libata-sff.c
+--- linux-3.18.9.orig/drivers/ata/libata-sff.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/ata/libata-sff.c 2015-03-15 16:03:03.704094875 -0500
@@ -678,9 +678,9 @@
unsigned long flags;
unsigned int consumed;
@@ -3807,9 +11635,9 @@ diff -Nur linux-3.18.8.orig/drivers/ata/libata-sff.c linux-3.18.8/drivers/ata/li
} else {
buf = page_address(page);
consumed = ap->ops->sff_data_xfer(dev, buf + offset,
-diff -Nur linux-3.18.8.orig/drivers/char/random.c linux-3.18.8/drivers/char/random.c
---- linux-3.18.8.orig/drivers/char/random.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/char/random.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/char/random.c linux-3.18.9/drivers/char/random.c
+--- linux-3.18.9.orig/drivers/char/random.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/char/random.c 2015-03-15 16:03:03.708094875 -0500
@@ -776,8 +776,6 @@
} sample;
long delta, delta2, delta3;
@@ -3861,9 +11689,9 @@ diff -Nur linux-3.18.8.orig/drivers/char/random.c linux-3.18.8/drivers/char/rand
fast_mix(fast_pool);
add_interrupt_bench(cycles);
-diff -Nur linux-3.18.8.orig/drivers/clocksource/tcb_clksrc.c linux-3.18.8/drivers/clocksource/tcb_clksrc.c
---- linux-3.18.8.orig/drivers/clocksource/tcb_clksrc.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/clocksource/tcb_clksrc.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/clocksource/tcb_clksrc.c linux-3.18.9/drivers/clocksource/tcb_clksrc.c
+--- linux-3.18.9.orig/drivers/clocksource/tcb_clksrc.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/clocksource/tcb_clksrc.c 2015-03-15 16:03:03.708094875 -0500
@@ -23,8 +23,7 @@
* this 32 bit free-running counter. the second channel is not used.
*
@@ -3978,9 +11806,9 @@ diff -Nur linux-3.18.8.orig/drivers/clocksource/tcb_clksrc.c linux-3.18.8/driver
if (ret)
goto err_unregister_clksrc;
-diff -Nur linux-3.18.8.orig/drivers/clocksource/timer-atmel-pit.c linux-3.18.8/drivers/clocksource/timer-atmel-pit.c
---- linux-3.18.8.orig/drivers/clocksource/timer-atmel-pit.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/clocksource/timer-atmel-pit.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/clocksource/timer-atmel-pit.c linux-3.18.9/drivers/clocksource/timer-atmel-pit.c
+--- linux-3.18.9.orig/drivers/clocksource/timer-atmel-pit.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/clocksource/timer-atmel-pit.c 2015-03-15 16:03:03.708094875 -0500
@@ -90,6 +90,7 @@
return elapsed;
}
@@ -4006,9 +11834,9 @@ diff -Nur linux-3.18.8.orig/drivers/clocksource/timer-atmel-pit.c linux-3.18.8/d
break;
case CLOCK_EVT_MODE_RESUME:
break;
-diff -Nur linux-3.18.8.orig/drivers/gpio/gpio-omap.c linux-3.18.8/drivers/gpio/gpio-omap.c
---- linux-3.18.8.orig/drivers/gpio/gpio-omap.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/gpio/gpio-omap.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/gpio/gpio-omap.c linux-3.18.9/drivers/gpio/gpio-omap.c
+--- linux-3.18.9.orig/drivers/gpio/gpio-omap.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/gpio/gpio-omap.c 2015-03-15 16:03:03.708094875 -0500
@@ -57,7 +57,7 @@
u32 saved_datain;
u32 level_mask;
@@ -4283,9 +12111,9 @@ diff -Nur linux-3.18.8.orig/drivers/gpio/gpio-omap.c linux-3.18.8/drivers/gpio/g
return 0;
}
-diff -Nur linux-3.18.8.orig/drivers/gpu/drm/i915/i915_gem.c linux-3.18.8/drivers/gpu/drm/i915/i915_gem.c
---- linux-3.18.8.orig/drivers/gpu/drm/i915/i915_gem.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/gpu/drm/i915/i915_gem.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/gpu/drm/i915/i915_gem.c linux-3.18.9/drivers/gpu/drm/i915/i915_gem.c
+--- linux-3.18.9.orig/drivers/gpu/drm/i915/i915_gem.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/gpu/drm/i915/i915_gem.c 2015-03-15 16:03:03.712094875 -0500
@@ -5144,7 +5144,7 @@
if (!mutex_is_locked(mutex))
return false;
@@ -4295,9 +12123,9 @@ diff -Nur linux-3.18.8.orig/drivers/gpu/drm/i915/i915_gem.c linux-3.18.8/drivers
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
-diff -Nur linux-3.18.8.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.18.8/drivers/gpu/drm/i915/i915_gem_execbuffer.c
---- linux-3.18.8.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.18.9/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+--- linux-3.18.9.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-03-15 16:03:03.712094875 -0500
@@ -1170,7 +1170,9 @@
return ret;
}
@@ -4308,9 +12136,9 @@ diff -Nur linux-3.18.8.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.1
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
-diff -Nur linux-3.18.8.orig/drivers/i2c/busses/i2c-omap.c linux-3.18.8/drivers/i2c/busses/i2c-omap.c
---- linux-3.18.8.orig/drivers/i2c/busses/i2c-omap.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/i2c/busses/i2c-omap.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/i2c/busses/i2c-omap.c linux-3.18.9/drivers/i2c/busses/i2c-omap.c
+--- linux-3.18.9.orig/drivers/i2c/busses/i2c-omap.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/i2c/busses/i2c-omap.c 2015-03-15 16:03:03.712094875 -0500
@@ -875,15 +875,12 @@
u16 mask;
u16 stat;
@@ -4328,9 +12156,9 @@ diff -Nur linux-3.18.8.orig/drivers/i2c/busses/i2c-omap.c linux-3.18.8/drivers/i
return ret;
}
-diff -Nur linux-3.18.8.orig/drivers/ide/alim15x3.c linux-3.18.8/drivers/ide/alim15x3.c
---- linux-3.18.8.orig/drivers/ide/alim15x3.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/ide/alim15x3.c 2015-03-03 08:05:17.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/ide/alim15x3.c linux-3.18.9/drivers/ide/alim15x3.c
+--- linux-3.18.9.orig/drivers/ide/alim15x3.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/ide/alim15x3.c 2015-03-15 16:03:03.712094875 -0500
@@ -234,7 +234,7 @@
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
@@ -4349,9 +12177,9 @@ diff -Nur linux-3.18.8.orig/drivers/ide/alim15x3.c linux-3.18.8/drivers/ide/alim
return 0;
}
-diff -Nur linux-3.18.8.orig/drivers/ide/hpt366.c linux-3.18.8/drivers/ide/hpt366.c
---- linux-3.18.8.orig/drivers/ide/hpt366.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/ide/hpt366.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/ide/hpt366.c linux-3.18.9/drivers/ide/hpt366.c
+--- linux-3.18.9.orig/drivers/ide/hpt366.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/ide/hpt366.c 2015-03-15 16:03:03.716094875 -0500
@@ -1241,7 +1241,7 @@
dma_old = inb(base + 2);
@@ -4370,9 +12198,9 @@ diff -Nur linux-3.18.8.orig/drivers/ide/hpt366.c linux-3.18.8/drivers/ide/hpt366
printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
hwif->name, base, base + 7);
-diff -Nur linux-3.18.8.orig/drivers/ide/ide-io.c linux-3.18.8/drivers/ide/ide-io.c
---- linux-3.18.8.orig/drivers/ide/ide-io.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/ide/ide-io.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/ide/ide-io.c linux-3.18.9/drivers/ide/ide-io.c
+--- linux-3.18.9.orig/drivers/ide/ide-io.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/ide/ide-io.c 2015-03-15 16:03:03.716094875 -0500
@@ -659,7 +659,7 @@
/* disable_irq_nosync ?? */
disable_irq(hwif->irq);
@@ -4382,9 +12210,9 @@ diff -Nur linux-3.18.8.orig/drivers/ide/ide-io.c linux-3.18.8/drivers/ide/ide-io
if (hwif->polling) {
startstop = handler(drive);
} else if (drive_is_ready(drive)) {
-diff -Nur linux-3.18.8.orig/drivers/ide/ide-iops.c linux-3.18.8/drivers/ide/ide-iops.c
---- linux-3.18.8.orig/drivers/ide/ide-iops.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/ide/ide-iops.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/ide/ide-iops.c linux-3.18.9/drivers/ide/ide-iops.c
+--- linux-3.18.9.orig/drivers/ide/ide-iops.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/ide/ide-iops.c 2015-03-15 16:03:03.716094875 -0500
@@ -129,12 +129,12 @@
if ((stat & ATA_BUSY) == 0)
break;
@@ -4400,9 +12228,9 @@ diff -Nur linux-3.18.8.orig/drivers/ide/ide-iops.c linux-3.18.8/drivers/ide/ide-
}
/*
* Allow status to settle, then read it again.
-diff -Nur linux-3.18.8.orig/drivers/ide/ide-io-std.c linux-3.18.8/drivers/ide/ide-io-std.c
---- linux-3.18.8.orig/drivers/ide/ide-io-std.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/ide/ide-io-std.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/ide/ide-io-std.c linux-3.18.9/drivers/ide/ide-io-std.c
+--- linux-3.18.9.orig/drivers/ide/ide-io-std.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/ide/ide-io-std.c 2015-03-15 16:03:03.716094875 -0500
@@ -175,7 +175,7 @@
unsigned long uninitialized_var(flags);
@@ -4439,9 +12267,9 @@ diff -Nur linux-3.18.8.orig/drivers/ide/ide-io-std.c linux-3.18.8/drivers/ide/id
if (((len + 1) & 3) < 2)
return;
-diff -Nur linux-3.18.8.orig/drivers/ide/ide-probe.c linux-3.18.8/drivers/ide/ide-probe.c
---- linux-3.18.8.orig/drivers/ide/ide-probe.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/ide/ide-probe.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/ide/ide-probe.c linux-3.18.9/drivers/ide/ide-probe.c
+--- linux-3.18.9.orig/drivers/ide/ide-probe.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/ide/ide-probe.c 2015-03-15 16:03:03.716094875 -0500
@@ -196,10 +196,10 @@
int bswap = 1;
@@ -4455,9 +12283,9 @@ diff -Nur linux-3.18.8.orig/drivers/ide/ide-probe.c linux-3.18.8/drivers/ide/ide
drive->dev_flags |= IDE_DFLAG_ID_READ;
#ifdef DEBUG
-diff -Nur linux-3.18.8.orig/drivers/ide/ide-taskfile.c linux-3.18.8/drivers/ide/ide-taskfile.c
---- linux-3.18.8.orig/drivers/ide/ide-taskfile.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/ide/ide-taskfile.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/ide/ide-taskfile.c linux-3.18.9/drivers/ide/ide-taskfile.c
+--- linux-3.18.9.orig/drivers/ide/ide-taskfile.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/ide/ide-taskfile.c 2015-03-15 16:03:03.716094875 -0500
@@ -250,7 +250,7 @@
page_is_high = PageHighMem(page);
@@ -4485,9 +12313,9 @@ diff -Nur linux-3.18.8.orig/drivers/ide/ide-taskfile.c linux-3.18.8/drivers/ide/
ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
-diff -Nur linux-3.18.8.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-3.18.8/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
---- linux-3.18.8.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-3.18.9/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+--- linux-3.18.9.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-03-15 16:03:03.716094875 -0500
@@ -796,7 +796,7 @@
ipoib_mcast_stop_thread(dev, 0);
@@ -4506,9 +12334,9 @@ diff -Nur linux-3.18.8.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux
/* We have to cancel outside of the spinlock */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
-diff -Nur linux-3.18.8.orig/drivers/input/gameport/gameport.c linux-3.18.8/drivers/input/gameport/gameport.c
---- linux-3.18.8.orig/drivers/input/gameport/gameport.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/input/gameport/gameport.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/input/gameport/gameport.c linux-3.18.9/drivers/input/gameport/gameport.c
+--- linux-3.18.9.orig/drivers/input/gameport/gameport.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/input/gameport/gameport.c 2015-03-15 16:03:03.716094875 -0500
@@ -124,12 +124,12 @@
tx = 1 << 30;
@@ -4538,9 +12366,9 @@ diff -Nur linux-3.18.8.orig/drivers/input/gameport/gameport.c linux-3.18.8/drive
udelay(i * 10);
if (t2 - t1 < tx) tx = t2 - t1;
}
-diff -Nur linux-3.18.8.orig/drivers/leds/trigger/Kconfig linux-3.18.8/drivers/leds/trigger/Kconfig
---- linux-3.18.8.orig/drivers/leds/trigger/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/leds/trigger/Kconfig 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/leds/trigger/Kconfig linux-3.18.9/drivers/leds/trigger/Kconfig
+--- linux-3.18.9.orig/drivers/leds/trigger/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/leds/trigger/Kconfig 2015-03-15 16:03:03.716094875 -0500
@@ -61,7 +61,7 @@
config LEDS_TRIGGER_CPU
@@ -4550,9 +12378,9 @@ diff -Nur linux-3.18.8.orig/drivers/leds/trigger/Kconfig linux-3.18.8/drivers/le
help
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
-diff -Nur linux-3.18.8.orig/drivers/md/bcache/Kconfig linux-3.18.8/drivers/md/bcache/Kconfig
---- linux-3.18.8.orig/drivers/md/bcache/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/md/bcache/Kconfig 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/md/bcache/Kconfig linux-3.18.9/drivers/md/bcache/Kconfig
+--- linux-3.18.9.orig/drivers/md/bcache/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/md/bcache/Kconfig 2015-03-15 16:03:03.720094875 -0500
@@ -1,6 +1,7 @@
config BCACHE
@@ -4561,9 +12389,9 @@ diff -Nur linux-3.18.8.orig/drivers/md/bcache/Kconfig linux-3.18.8/drivers/md/bc
---help---
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
-diff -Nur linux-3.18.8.orig/drivers/md/dm.c linux-3.18.8/drivers/md/dm.c
---- linux-3.18.8.orig/drivers/md/dm.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/md/dm.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/md/dm.c linux-3.18.9/drivers/md/dm.c
+--- linux-3.18.9.orig/drivers/md/dm.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/md/dm.c 2015-03-15 16:03:03.720094875 -0500
@@ -1898,14 +1898,14 @@
if (map_request(ti, clone, md))
goto requeued;
@@ -4581,9 +12409,9 @@ diff -Nur linux-3.18.8.orig/drivers/md/dm.c linux-3.18.8/drivers/md/dm.c
spin_lock(q->queue_lock);
delay_and_out:
-diff -Nur linux-3.18.8.orig/drivers/md/raid5.c linux-3.18.8/drivers/md/raid5.c
---- linux-3.18.8.orig/drivers/md/raid5.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/md/raid5.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/md/raid5.c linux-3.18.9/drivers/md/raid5.c
+--- linux-3.18.9.orig/drivers/md/raid5.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/md/raid5.c 2015-03-15 16:03:03.724094875 -0500
@@ -1649,8 +1649,9 @@
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -4605,7 +12433,7 @@ diff -Nur linux-3.18.8.orig/drivers/md/raid5.c linux-3.18.8/drivers/md/raid5.c
}
static int grow_one_stripe(struct r5conf *conf, int hash)
-@@ -5707,6 +5709,7 @@
+@@ -5708,6 +5710,7 @@
__func__, cpu);
break;
}
@@ -4613,9 +12441,7157 @@ diff -Nur linux-3.18.8.orig/drivers/md/raid5.c linux-3.18.8/drivers/md/raid5.c
}
put_online_cpus();
-diff -Nur linux-3.18.8.orig/drivers/md/raid5.h linux-3.18.8/drivers/md/raid5.h
---- linux-3.18.8.orig/drivers/md/raid5.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/md/raid5.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/md/raid5.c.orig linux-3.18.9/drivers/md/raid5.c.orig
+--- linux-3.18.9.orig/drivers/md/raid5.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/drivers/md/raid5.c.orig 2015-03-06 16:53:42.000000000 -0600
+@@ -0,0 +1,7144 @@
++/*
++ * raid5.c : Multiple Devices driver for Linux
++ * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
++ * Copyright (C) 1999, 2000 Ingo Molnar
++ * Copyright (C) 2002, 2003 H. Peter Anvin
++ *
++ * RAID-4/5/6 management functions.
++ * Thanks to Penguin Computing for making the RAID-6 development possible
++ * by donating a test server!
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2, or (at your option)
++ * any later version.
++ *
++ * You should have received a copy of the GNU General Public License
++ * (for example /usr/src/linux/COPYING); if not, write to the Free
++ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ */
++
++/*
++ * BITMAP UNPLUGGING:
++ *
++ * The sequencing for updating the bitmap reliably is a little
++ * subtle (and I got it wrong the first time) so it deserves some
++ * explanation.
++ *
++ * We group bitmap updates into batches. Each batch has a number.
++ * We may write out several batches at once, but that isn't very important.
++ * conf->seq_write is the number of the last batch successfully written.
++ * conf->seq_flush is the number of the last batch that was closed to
++ * new additions.
++ * When we discover that we will need to write to any block in a stripe
++ * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
++ * the number of the batch it will be in. This is seq_flush+1.
++ * When we are ready to do a write, if that batch hasn't been written yet,
++ * we plug the array and queue the stripe for later.
++ * When an unplug happens, we increment bm_flush, thus closing the current
++ * batch.
++ * When we notice that bm_flush > bm_write, we write out all pending updates
++ * to the bitmap, and advance bm_write to where bm_flush was.
++ * This may occasionally write a bit out twice, but is sure never to
++ * miss any bits.
++ */
++
++#include <linux/blkdev.h>
++#include <linux/kthread.h>
++#include <linux/raid/pq.h>
++#include <linux/async_tx.h>
++#include <linux/module.h>
++#include <linux/async.h>
++#include <linux/seq_file.h>
++#include <linux/cpu.h>
++#include <linux/slab.h>
++#include <linux/ratelimit.h>
++#include <linux/nodemask.h>
++#include <trace/events/block.h>
++
++#include "md.h"
++#include "raid5.h"
++#include "raid0.h"
++#include "bitmap.h"
++
++#define cpu_to_group(cpu) cpu_to_node(cpu)
++#define ANY_GROUP NUMA_NO_NODE
++
++static bool devices_handle_discard_safely = false;
++module_param(devices_handle_discard_safely, bool, 0644);
++MODULE_PARM_DESC(devices_handle_discard_safely,
++ "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
++static struct workqueue_struct *raid5_wq;
++/*
++ * Stripe cache
++ */
++
++#define NR_STRIPES 256
++#define STRIPE_SIZE PAGE_SIZE
++#define STRIPE_SHIFT (PAGE_SHIFT - 9)
++#define STRIPE_SECTORS (STRIPE_SIZE>>9)
++#define IO_THRESHOLD 1
++#define BYPASS_THRESHOLD 1
++#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
++#define HASH_MASK (NR_HASH - 1)
++#define MAX_STRIPE_BATCH 8
++
++static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
++{
++ int hash = (sect >> STRIPE_SHIFT) & HASH_MASK;
++ return &conf->stripe_hashtbl[hash];
++}
++
++static inline int stripe_hash_locks_hash(sector_t sect)
++{
++ return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK;
++}
++
++static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
++{
++ spin_lock_irq(conf->hash_locks + hash);
++ spin_lock(&conf->device_lock);
++}
++
++static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
++{
++ spin_unlock(&conf->device_lock);
++ spin_unlock_irq(conf->hash_locks + hash);
++}
++
++static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
++{
++ int i;
++ local_irq_disable();
++ spin_lock(conf->hash_locks);
++ for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
++ spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
++ spin_lock(&conf->device_lock);
++}
++
++static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
++{
++ int i;
++ spin_unlock(&conf->device_lock);
++ for (i = NR_STRIPE_HASH_LOCKS; i; i--)
++ spin_unlock(conf->hash_locks + i - 1);
++ local_irq_enable();
++}
++
++/* bio's attached to a stripe+device for I/O are linked together in bi_sector
++ * order without overlap. There may be several bio's per stripe+device, and
++ * a bio could span several devices.
++ * When walking this list for a particular stripe+device, we must never proceed
++ * beyond a bio that extends past this device, as the next bio might no longer
++ * be valid.
++ * This function is used to determine the 'next' bio in the list, given the sector
++ * of the current stripe+device
++ */
++static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
++{
++ int sectors = bio_sectors(bio);
++ if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
++ return bio->bi_next;
++ else
++ return NULL;
++}
++
++/*
++ * We maintain a biased count of active stripes in the bottom 16 bits of
++ * bi_phys_segments, and a count of processed stripes in the upper 16 bits
++ */
++static inline int raid5_bi_processed_stripes(struct bio *bio)
++{
++ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
++ return (atomic_read(segments) >> 16) & 0xffff;
++}
++
++static inline int raid5_dec_bi_active_stripes(struct bio *bio)
++{
++ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
++ return atomic_sub_return(1, segments) & 0xffff;
++}
++
++static inline void raid5_inc_bi_active_stripes(struct bio *bio)
++{
++ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
++ atomic_inc(segments);
++}
++
++static inline void raid5_set_bi_processed_stripes(struct bio *bio,
++ unsigned int cnt)
++{
++ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
++ int old, new;
++
++ do {
++ old = atomic_read(segments);
++ new = (old & 0xffff) | (cnt << 16);
++ } while (atomic_cmpxchg(segments, old, new) != old);
++}
++
++static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
++{
++ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
++ atomic_set(segments, cnt);
++}
++
++/* Find first data disk in a raid6 stripe */
++static inline int raid6_d0(struct stripe_head *sh)
++{
++ if (sh->ddf_layout)
++ /* ddf always start from first device */
++ return 0;
++ /* md starts just after Q block */
++ if (sh->qd_idx == sh->disks - 1)
++ return 0;
++ else
++ return sh->qd_idx + 1;
++}
++static inline int raid6_next_disk(int disk, int raid_disks)
++{
++ disk++;
++ return (disk < raid_disks) ? disk : 0;
++}
++
++/* When walking through the disks in a raid5, starting at raid6_d0,
++ * We need to map each disk to a 'slot', where the data disks are slot
++ * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
++ * is raid_disks-1. This help does that mapping.
++ */
++static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
++ int *count, int syndrome_disks)
++{
++ int slot = *count;
++
++ if (sh->ddf_layout)
++ (*count)++;
++ if (idx == sh->pd_idx)
++ return syndrome_disks;
++ if (idx == sh->qd_idx)
++ return syndrome_disks + 1;
++ if (!sh->ddf_layout)
++ (*count)++;
++ return slot;
++}
++
++static void return_io(struct bio *return_bi)
++{
++ struct bio *bi = return_bi;
++ while (bi) {
++
++ return_bi = bi->bi_next;
++ bi->bi_next = NULL;
++ bi->bi_iter.bi_size = 0;
++ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
++ bi, 0);
++ bio_endio(bi, 0);
++ bi = return_bi;
++ }
++}
++
++static void print_raid5_conf (struct r5conf *conf);
++
++static int stripe_operations_active(struct stripe_head *sh)
++{
++ return sh->check_state || sh->reconstruct_state ||
++ test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
++ test_bit(STRIPE_COMPUTE_RUN, &sh->state);
++}
++
++static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
++{
++ struct r5conf *conf = sh->raid_conf;
++ struct r5worker_group *group;
++ int thread_cnt;
++ int i, cpu = sh->cpu;
++
++ if (!cpu_online(cpu)) {
++ cpu = cpumask_any(cpu_online_mask);
++ sh->cpu = cpu;
++ }
++
++ if (list_empty(&sh->lru)) {
++ struct r5worker_group *group;
++ group = conf->worker_groups + cpu_to_group(cpu);
++ list_add_tail(&sh->lru, &group->handle_list);
++ group->stripes_cnt++;
++ sh->group = group;
++ }
++
++ if (conf->worker_cnt_per_group == 0) {
++ md_wakeup_thread(conf->mddev->thread);
++ return;
++ }
++
++ group = conf->worker_groups + cpu_to_group(sh->cpu);
++
++ group->workers[0].working = true;
++ /* at least one worker should run to avoid race */
++ queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work);
++
++ thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1;
++ /* wakeup more workers */
++ for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
++ if (group->workers[i].working == false) {
++ group->workers[i].working = true;
++ queue_work_on(sh->cpu, raid5_wq,
++ &group->workers[i].work);
++ thread_cnt--;
++ }
++ }
++}
++
++static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
++ struct list_head *temp_inactive_list)
++{
++ BUG_ON(!list_empty(&sh->lru));
++ BUG_ON(atomic_read(&conf->active_stripes)==0);
++ if (test_bit(STRIPE_HANDLE, &sh->state)) {
++ if (test_bit(STRIPE_DELAYED, &sh->state) &&
++ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
++ list_add_tail(&sh->lru, &conf->delayed_list);
++ if (atomic_read(&conf->preread_active_stripes)
++ < IO_THRESHOLD)
++ md_wakeup_thread(conf->mddev->thread);
++ } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
++ sh->bm_seq - conf->seq_write > 0)
++ list_add_tail(&sh->lru, &conf->bitmap_list);
++ else {
++ clear_bit(STRIPE_DELAYED, &sh->state);
++ clear_bit(STRIPE_BIT_DELAY, &sh->state);
++ if (conf->worker_cnt_per_group == 0) {
++ list_add_tail(&sh->lru, &conf->handle_list);
++ } else {
++ raid5_wakeup_stripe_thread(sh);
++ return;
++ }
++ }
++ md_wakeup_thread(conf->mddev->thread);
++ } else {
++ BUG_ON(stripe_operations_active(sh));
++ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ if (atomic_dec_return(&conf->preread_active_stripes)
++ < IO_THRESHOLD)
++ md_wakeup_thread(conf->mddev->thread);
++ atomic_dec(&conf->active_stripes);
++ if (!test_bit(STRIPE_EXPANDING, &sh->state))
++ list_add_tail(&sh->lru, temp_inactive_list);
++ }
++}
++
++static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
++ struct list_head *temp_inactive_list)
++{
++ if (atomic_dec_and_test(&sh->count))
++ do_release_stripe(conf, sh, temp_inactive_list);
++}
++
++/*
++ * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list
++ *
++ * Be careful: Only one task can add/delete stripes from temp_inactive_list at
++ * given time. Adding stripes only takes device lock, while deleting stripes
++ * only takes hash lock.
++ */
++static void release_inactive_stripe_list(struct r5conf *conf,
++ struct list_head *temp_inactive_list,
++ int hash)
++{
++ int size;
++ bool do_wakeup = false;
++ unsigned long flags;
++
++ if (hash == NR_STRIPE_HASH_LOCKS) {
++ size = NR_STRIPE_HASH_LOCKS;
++ hash = NR_STRIPE_HASH_LOCKS - 1;
++ } else
++ size = 1;
++ while (size) {
++ struct list_head *list = &temp_inactive_list[size - 1];
++
++ /*
++ * We don't hold any lock here yet, get_active_stripe() might
++ * remove stripes from the list
++ */
++ if (!list_empty_careful(list)) {
++ spin_lock_irqsave(conf->hash_locks + hash, flags);
++ if (list_empty(conf->inactive_list + hash) &&
++ !list_empty(list))
++ atomic_dec(&conf->empty_inactive_list_nr);
++ list_splice_tail_init(list, conf->inactive_list + hash);
++ do_wakeup = true;
++ spin_unlock_irqrestore(conf->hash_locks + hash, flags);
++ }
++ size--;
++ hash--;
++ }
++
++ if (do_wakeup) {
++ wake_up(&conf->wait_for_stripe);
++ if (conf->retry_read_aligned)
++ md_wakeup_thread(conf->mddev->thread);
++ }
++}
++
++/* should hold conf->device_lock already */
++static int release_stripe_list(struct r5conf *conf,
++ struct list_head *temp_inactive_list)
++{
++ struct stripe_head *sh;
++ int count = 0;
++ struct llist_node *head;
++
++ head = llist_del_all(&conf->released_stripes);
++ head = llist_reverse_order(head);
++ while (head) {
++ int hash;
++
++ sh = llist_entry(head, struct stripe_head, release_list);
++ head = llist_next(head);
++ /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */
++ smp_mb();
++ clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state);
++ /*
++ * Don't worry the bit is set here, because if the bit is set
++ * again, the count is always > 1. This is true for
++ * STRIPE_ON_UNPLUG_LIST bit too.
++ */
++ hash = sh->hash_lock_index;
++ __release_stripe(conf, sh, &temp_inactive_list[hash]);
++ count++;
++ }
++
++ return count;
++}
++
++static void release_stripe(struct stripe_head *sh)
++{
++ struct r5conf *conf = sh->raid_conf;
++ unsigned long flags;
++ struct list_head list;
++ int hash;
++ bool wakeup;
++
++ /* Avoid release_list until the last reference.
++ */
++ if (atomic_add_unless(&sh->count, -1, 1))
++ return;
++
++ if (unlikely(!conf->mddev->thread) ||
++ test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
++ goto slow_path;
++ wakeup = llist_add(&sh->release_list, &conf->released_stripes);
++ if (wakeup)
++ md_wakeup_thread(conf->mddev->thread);
++ return;
++slow_path:
++ local_irq_save(flags);
++ /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
++ if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
++ INIT_LIST_HEAD(&list);
++ hash = sh->hash_lock_index;
++ do_release_stripe(conf, sh, &list);
++ spin_unlock(&conf->device_lock);
++ release_inactive_stripe_list(conf, &list, hash);
++ }
++ local_irq_restore(flags);
++}
++
++static inline void remove_hash(struct stripe_head *sh)
++{
++ pr_debug("remove_hash(), stripe %llu\n",
++ (unsigned long long)sh->sector);
++
++ hlist_del_init(&sh->hash);
++}
++
++static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
++{
++ struct hlist_head *hp = stripe_hash(conf, sh->sector);
++
++ pr_debug("insert_hash(), stripe %llu\n",
++ (unsigned long long)sh->sector);
++
++ hlist_add_head(&sh->hash, hp);
++}
++
++/* find an idle stripe, make sure it is unhashed, and return it. */
++static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
++{
++ struct stripe_head *sh = NULL;
++ struct list_head *first;
++
++ if (list_empty(conf->inactive_list + hash))
++ goto out;
++ first = (conf->inactive_list + hash)->next;
++ sh = list_entry(first, struct stripe_head, lru);
++ list_del_init(first);
++ remove_hash(sh);
++ atomic_inc(&conf->active_stripes);
++ BUG_ON(hash != sh->hash_lock_index);
++ if (list_empty(conf->inactive_list + hash))
++ atomic_inc(&conf->empty_inactive_list_nr);
++out:
++ return sh;
++}
++
++static void shrink_buffers(struct stripe_head *sh)
++{
++ struct page *p;
++ int i;
++ int num = sh->raid_conf->pool_size;
++
++ for (i = 0; i < num ; i++) {
++ WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
++ p = sh->dev[i].page;
++ if (!p)
++ continue;
++ sh->dev[i].page = NULL;
++ put_page(p);
++ }
++}
++
++static int grow_buffers(struct stripe_head *sh)
++{
++ int i;
++ int num = sh->raid_conf->pool_size;
++
++ for (i = 0; i < num; i++) {
++ struct page *page;
++
++ if (!(page = alloc_page(GFP_KERNEL))) {
++ return 1;
++ }
++ sh->dev[i].page = page;
++ sh->dev[i].orig_page = page;
++ }
++ return 0;
++}
++
++static void raid5_build_block(struct stripe_head *sh, int i, int previous);
++static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
++ struct stripe_head *sh);
++
++static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
++{
++ struct r5conf *conf = sh->raid_conf;
++ int i, seq;
++
++ BUG_ON(atomic_read(&sh->count) != 0);
++ BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
++ BUG_ON(stripe_operations_active(sh));
++
++ pr_debug("init_stripe called, stripe %llu\n",
++ (unsigned long long)sector);
++retry:
++ seq = read_seqcount_begin(&conf->gen_lock);
++ sh->generation = conf->generation - previous;
++ sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
++ sh->sector = sector;
++ stripe_set_idx(sector, conf, previous, sh);
++ sh->state = 0;
++
++ for (i = sh->disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++
++ if (dev->toread || dev->read || dev->towrite || dev->written ||
++ test_bit(R5_LOCKED, &dev->flags)) {
++ printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
++ (unsigned long long)sh->sector, i, dev->toread,
++ dev->read, dev->towrite, dev->written,
++ test_bit(R5_LOCKED, &dev->flags));
++ WARN_ON(1);
++ }
++ dev->flags = 0;
++ raid5_build_block(sh, i, previous);
++ }
++ if (read_seqcount_retry(&conf->gen_lock, seq))
++ goto retry;
++ insert_hash(conf, sh);
++ sh->cpu = smp_processor_id();
++}
++
++static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
++ short generation)
++{
++ struct stripe_head *sh;
++
++ pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
++ hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
++ if (sh->sector == sector && sh->generation == generation)
++ return sh;
++ pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
++ return NULL;
++}
++
++/*
++ * Need to check if array has failed when deciding whether to:
++ * - start an array
++ * - remove non-faulty devices
++ * - add a spare
++ * - allow a reshape
++ * This determination is simple when no reshape is happening.
++ * However if there is a reshape, we need to carefully check
++ * both the before and after sections.
++ * This is because some failed devices may only affect one
++ * of the two sections, and some non-in_sync devices may
++ * be insync in the section most affected by failed devices.
++ */
++static int calc_degraded(struct r5conf *conf)
++{
++ int degraded, degraded2;
++ int i;
++
++ rcu_read_lock();
++ degraded = 0;
++ for (i = 0; i < conf->previous_raid_disks; i++) {
++ struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
++ if (rdev && test_bit(Faulty, &rdev->flags))
++ rdev = rcu_dereference(conf->disks[i].replacement);
++ if (!rdev || test_bit(Faulty, &rdev->flags))
++ degraded++;
++ else if (test_bit(In_sync, &rdev->flags))
++ ;
++ else
++ /* not in-sync or faulty.
++ * If the reshape increases the number of devices,
++ * this is being recovered by the reshape, so
++ * this 'previous' section is not in_sync.
++ * If the number of devices is being reduced however,
++ * the device can only be part of the array if
++ * we are reverting a reshape, so this section will
++ * be in-sync.
++ */
++ if (conf->raid_disks >= conf->previous_raid_disks)
++ degraded++;
++ }
++ rcu_read_unlock();
++ if (conf->raid_disks == conf->previous_raid_disks)
++ return degraded;
++ rcu_read_lock();
++ degraded2 = 0;
++ for (i = 0; i < conf->raid_disks; i++) {
++ struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
++ if (rdev && test_bit(Faulty, &rdev->flags))
++ rdev = rcu_dereference(conf->disks[i].replacement);
++ if (!rdev || test_bit(Faulty, &rdev->flags))
++ degraded2++;
++ else if (test_bit(In_sync, &rdev->flags))
++ ;
++ else
++ /* not in-sync or faulty.
++ * If reshape increases the number of devices, this
++ * section has already been recovered, else it
++ * almost certainly hasn't.
++ */
++ if (conf->raid_disks <= conf->previous_raid_disks)
++ degraded2++;
++ }
++ rcu_read_unlock();
++ if (degraded2 > degraded)
++ return degraded2;
++ return degraded;
++}
++
++static int has_failed(struct r5conf *conf)
++{
++ int degraded;
++
++ if (conf->mddev->reshape_position == MaxSector)
++ return conf->mddev->degraded > conf->max_degraded;
++
++ degraded = calc_degraded(conf);
++ if (degraded > conf->max_degraded)
++ return 1;
++ return 0;
++}
++
++static struct stripe_head *
++get_active_stripe(struct r5conf *conf, sector_t sector,
++ int previous, int noblock, int noquiesce)
++{
++ struct stripe_head *sh;
++ int hash = stripe_hash_locks_hash(sector);
++
++ pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
++
++ spin_lock_irq(conf->hash_locks + hash);
++
++ do {
++ wait_event_lock_irq(conf->wait_for_stripe,
++ conf->quiesce == 0 || noquiesce,
++ *(conf->hash_locks + hash));
++ sh = __find_stripe(conf, sector, conf->generation - previous);
++ if (!sh) {
++ if (!conf->inactive_blocked)
++ sh = get_free_stripe(conf, hash);
++ if (noblock && sh == NULL)
++ break;
++ if (!sh) {
++ conf->inactive_blocked = 1;
++ wait_event_lock_irq(
++ conf->wait_for_stripe,
++ !list_empty(conf->inactive_list + hash) &&
++ (atomic_read(&conf->active_stripes)
++ < (conf->max_nr_stripes * 3 / 4)
++ || !conf->inactive_blocked),
++ *(conf->hash_locks + hash));
++ conf->inactive_blocked = 0;
++ } else {
++ init_stripe(sh, sector, previous);
++ atomic_inc(&sh->count);
++ }
++ } else if (!atomic_inc_not_zero(&sh->count)) {
++ spin_lock(&conf->device_lock);
++ if (!atomic_read(&sh->count)) {
++ if (!test_bit(STRIPE_HANDLE, &sh->state))
++ atomic_inc(&conf->active_stripes);
++ BUG_ON(list_empty(&sh->lru) &&
++ !test_bit(STRIPE_EXPANDING, &sh->state));
++ list_del_init(&sh->lru);
++ if (sh->group) {
++ sh->group->stripes_cnt--;
++ sh->group = NULL;
++ }
++ }
++ atomic_inc(&sh->count);
++ spin_unlock(&conf->device_lock);
++ }
++ } while (sh == NULL);
++
++ spin_unlock_irq(conf->hash_locks + hash);
++ return sh;
++}
++
++/* Determine if 'data_offset' or 'new_data_offset' should be used
++ * in this stripe_head.
++ */
++static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
++{
++ sector_t progress = conf->reshape_progress;
++ /* Need a memory barrier to make sure we see the value
++ * of conf->generation, or ->data_offset that was set before
++ * reshape_progress was updated.
++ */
++ smp_rmb();
++ if (progress == MaxSector)
++ return 0;
++ if (sh->generation == conf->generation - 1)
++ return 0;
++ /* We are in a reshape, and this is a new-generation stripe,
++ * so use new_data_offset.
++ */
++ return 1;
++}
++
++static void
++raid5_end_read_request(struct bio *bi, int error);
++static void
++raid5_end_write_request(struct bio *bi, int error);
++
++static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
++{
++ struct r5conf *conf = sh->raid_conf;
++ int i, disks = sh->disks;
++
++ might_sleep();
++
++ for (i = disks; i--; ) {
++ int rw;
++ int replace_only = 0;
++ struct bio *bi, *rbi;
++ struct md_rdev *rdev, *rrdev = NULL;
++ if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
++ if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
++ rw = WRITE_FUA;
++ else
++ rw = WRITE;
++ if (test_bit(R5_Discard, &sh->dev[i].flags))
++ rw |= REQ_DISCARD;
++ } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
++ rw = READ;
++ else if (test_and_clear_bit(R5_WantReplace,
++ &sh->dev[i].flags)) {
++ rw = WRITE;
++ replace_only = 1;
++ } else
++ continue;
++ if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
++ rw |= REQ_SYNC;
++
++ bi = &sh->dev[i].req;
++ rbi = &sh->dev[i].rreq; /* For writing to replacement */
++
++ rcu_read_lock();
++ rrdev = rcu_dereference(conf->disks[i].replacement);
++ smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
++ rdev = rcu_dereference(conf->disks[i].rdev);
++ if (!rdev) {
++ rdev = rrdev;
++ rrdev = NULL;
++ }
++ if (rw & WRITE) {
++ if (replace_only)
++ rdev = NULL;
++ if (rdev == rrdev)
++ /* We raced and saw duplicates */
++ rrdev = NULL;
++ } else {
++ if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
++ rdev = rrdev;
++ rrdev = NULL;
++ }
++
++ if (rdev && test_bit(Faulty, &rdev->flags))
++ rdev = NULL;
++ if (rdev)
++ atomic_inc(&rdev->nr_pending);
++ if (rrdev && test_bit(Faulty, &rrdev->flags))
++ rrdev = NULL;
++ if (rrdev)
++ atomic_inc(&rrdev->nr_pending);
++ rcu_read_unlock();
++
++ /* We have already checked bad blocks for reads. Now
++ * need to check for writes. We never accept write errors
++ * on the replacement, so we don't to check rrdev.
++ */
++ while ((rw & WRITE) && rdev &&
++ test_bit(WriteErrorSeen, &rdev->flags)) {
++ sector_t first_bad;
++ int bad_sectors;
++ int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
++ &first_bad, &bad_sectors);
++ if (!bad)
++ break;
++
++ if (bad < 0) {
++ set_bit(BlockedBadBlocks, &rdev->flags);
++ if (!conf->mddev->external &&
++ conf->mddev->flags) {
++ /* It is very unlikely, but we might
++ * still need to write out the
++ * bad block log - better give it
++ * a chance*/
++ md_check_recovery(conf->mddev);
++ }
++ /*
++ * Because md_wait_for_blocked_rdev
++ * will dec nr_pending, we must
++ * increment it first.
++ */
++ atomic_inc(&rdev->nr_pending);
++ md_wait_for_blocked_rdev(rdev, conf->mddev);
++ } else {
++ /* Acknowledged bad block - skip the write */
++ rdev_dec_pending(rdev, conf->mddev);
++ rdev = NULL;
++ }
++ }
++
++ if (rdev) {
++ if (s->syncing || s->expanding || s->expanded
++ || s->replacing)
++ md_sync_acct(rdev->bdev, STRIPE_SECTORS);
++
++ set_bit(STRIPE_IO_STARTED, &sh->state);
++
++ bio_reset(bi);
++ bi->bi_bdev = rdev->bdev;
++ bi->bi_rw = rw;
++ bi->bi_end_io = (rw & WRITE)
++ ? raid5_end_write_request
++ : raid5_end_read_request;
++ bi->bi_private = sh;
++
++ pr_debug("%s: for %llu schedule op %ld on disc %d\n",
++ __func__, (unsigned long long)sh->sector,
++ bi->bi_rw, i);
++ atomic_inc(&sh->count);
++ if (use_new_offset(conf, sh))
++ bi->bi_iter.bi_sector = (sh->sector
++ + rdev->new_data_offset);
++ else
++ bi->bi_iter.bi_sector = (sh->sector
++ + rdev->data_offset);
++ if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
++ bi->bi_rw |= REQ_NOMERGE;
++
++ if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
++ WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
++ sh->dev[i].vec.bv_page = sh->dev[i].page;
++ bi->bi_vcnt = 1;
++ bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
++ bi->bi_io_vec[0].bv_offset = 0;
++ bi->bi_iter.bi_size = STRIPE_SIZE;
++ /*
++ * If this is discard request, set bi_vcnt 0. We don't
++ * want to confuse SCSI because SCSI will replace payload
++ */
++ if (rw & REQ_DISCARD)
++ bi->bi_vcnt = 0;
++ if (rrdev)
++ set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
++
++ if (conf->mddev->gendisk)
++ trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
++ bi, disk_devt(conf->mddev->gendisk),
++ sh->dev[i].sector);
++ generic_make_request(bi);
++ }
++ if (rrdev) {
++ if (s->syncing || s->expanding || s->expanded
++ || s->replacing)
++ md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
++
++ set_bit(STRIPE_IO_STARTED, &sh->state);
++
++ bio_reset(rbi);
++ rbi->bi_bdev = rrdev->bdev;
++ rbi->bi_rw = rw;
++ BUG_ON(!(rw & WRITE));
++ rbi->bi_end_io = raid5_end_write_request;
++ rbi->bi_private = sh;
++
++ pr_debug("%s: for %llu schedule op %ld on "
++ "replacement disc %d\n",
++ __func__, (unsigned long long)sh->sector,
++ rbi->bi_rw, i);
++ atomic_inc(&sh->count);
++ if (use_new_offset(conf, sh))
++ rbi->bi_iter.bi_sector = (sh->sector
++ + rrdev->new_data_offset);
++ else
++ rbi->bi_iter.bi_sector = (sh->sector
++ + rrdev->data_offset);
++ if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
++ WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
++ sh->dev[i].rvec.bv_page = sh->dev[i].page;
++ rbi->bi_vcnt = 1;
++ rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
++ rbi->bi_io_vec[0].bv_offset = 0;
++ rbi->bi_iter.bi_size = STRIPE_SIZE;
++ /*
++ * If this is discard request, set bi_vcnt 0. We don't
++ * want to confuse SCSI because SCSI will replace payload
++ */
++ if (rw & REQ_DISCARD)
++ rbi->bi_vcnt = 0;
++ if (conf->mddev->gendisk)
++ trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
++ rbi, disk_devt(conf->mddev->gendisk),
++ sh->dev[i].sector);
++ generic_make_request(rbi);
++ }
++ if (!rdev && !rrdev) {
++ if (rw & WRITE)
++ set_bit(STRIPE_DEGRADED, &sh->state);
++ pr_debug("skip op %ld on disc %d for sector %llu\n",
++ bi->bi_rw, i, (unsigned long long)sh->sector);
++ clear_bit(R5_LOCKED, &sh->dev[i].flags);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ }
++ }
++}
++
++static struct dma_async_tx_descriptor *
++async_copy_data(int frombio, struct bio *bio, struct page **page,
++ sector_t sector, struct dma_async_tx_descriptor *tx,
++ struct stripe_head *sh)
++{
++ struct bio_vec bvl;
++ struct bvec_iter iter;
++ struct page *bio_page;
++ int page_offset;
++ struct async_submit_ctl submit;
++ enum async_tx_flags flags = 0;
++
++ if (bio->bi_iter.bi_sector >= sector)
++ page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
++ else
++ page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
++
++ if (frombio)
++ flags |= ASYNC_TX_FENCE;
++ init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
++
++ bio_for_each_segment(bvl, bio, iter) {
++ int len = bvl.bv_len;
++ int clen;
++ int b_offset = 0;
++
++ if (page_offset < 0) {
++ b_offset = -page_offset;
++ page_offset += b_offset;
++ len -= b_offset;
++ }
++
++ if (len > 0 && page_offset + len > STRIPE_SIZE)
++ clen = STRIPE_SIZE - page_offset;
++ else
++ clen = len;
++
++ if (clen > 0) {
++ b_offset += bvl.bv_offset;
++ bio_page = bvl.bv_page;
++ if (frombio) {
++ if (sh->raid_conf->skip_copy &&
++ b_offset == 0 && page_offset == 0 &&
++ clen == STRIPE_SIZE)
++ *page = bio_page;
++ else
++ tx = async_memcpy(*page, bio_page, page_offset,
++ b_offset, clen, &submit);
++ } else
++ tx = async_memcpy(bio_page, *page, b_offset,
++ page_offset, clen, &submit);
++ }
++ /* chain the operations */
++ submit.depend_tx = tx;
++
++ if (clen < len) /* hit end of page */
++ break;
++ page_offset += len;
++ }
++
++ return tx;
++}
++
++static void ops_complete_biofill(void *stripe_head_ref)
++{
++ struct stripe_head *sh = stripe_head_ref;
++ struct bio *return_bi = NULL;
++ int i;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ /* clear completed biofills */
++ for (i = sh->disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++
++ /* acknowledge completion of a biofill operation */
++ /* and check if we need to reply to a read request,
++ * new R5_Wantfill requests are held off until
++ * !STRIPE_BIOFILL_RUN
++ */
++ if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
++ struct bio *rbi, *rbi2;
++
++ BUG_ON(!dev->read);
++ rbi = dev->read;
++ dev->read = NULL;
++ while (rbi && rbi->bi_iter.bi_sector <
++ dev->sector + STRIPE_SECTORS) {
++ rbi2 = r5_next_bio(rbi, dev->sector);
++ if (!raid5_dec_bi_active_stripes(rbi)) {
++ rbi->bi_next = return_bi;
++ return_bi = rbi;
++ }
++ rbi = rbi2;
++ }
++ }
++ }
++ clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
++
++ return_io(return_bi);
++
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++}
++
++static void ops_run_biofill(struct stripe_head *sh)
++{
++ struct dma_async_tx_descriptor *tx = NULL;
++ struct async_submit_ctl submit;
++ int i;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ for (i = sh->disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (test_bit(R5_Wantfill, &dev->flags)) {
++ struct bio *rbi;
++ spin_lock_irq(&sh->stripe_lock);
++ dev->read = rbi = dev->toread;
++ dev->toread = NULL;
++ spin_unlock_irq(&sh->stripe_lock);
++ while (rbi && rbi->bi_iter.bi_sector <
++ dev->sector + STRIPE_SECTORS) {
++ tx = async_copy_data(0, rbi, &dev->page,
++ dev->sector, tx, sh);
++ rbi = r5_next_bio(rbi, dev->sector);
++ }
++ }
++ }
++
++ atomic_inc(&sh->count);
++ init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
++ async_trigger_callback(&submit);
++}
++
++static void mark_target_uptodate(struct stripe_head *sh, int target)
++{
++ struct r5dev *tgt;
++
++ if (target < 0)
++ return;
++
++ tgt = &sh->dev[target];
++ set_bit(R5_UPTODATE, &tgt->flags);
++ BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
++ clear_bit(R5_Wantcompute, &tgt->flags);
++}
++
++static void ops_complete_compute(void *stripe_head_ref)
++{
++ struct stripe_head *sh = stripe_head_ref;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ /* mark the computed target(s) as uptodate */
++ mark_target_uptodate(sh, sh->ops.target);
++ mark_target_uptodate(sh, sh->ops.target2);
++
++ clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
++ if (sh->check_state == check_state_compute_run)
++ sh->check_state = check_state_compute_result;
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++}
++
++/* return a pointer to the address conversion region of the scribble buffer */
++static addr_conv_t *to_addr_conv(struct stripe_head *sh,
++ struct raid5_percpu *percpu)
++{
++ return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
++}
++
++static struct dma_async_tx_descriptor *
++ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
++{
++ int disks = sh->disks;
++ struct page **xor_srcs = percpu->scribble;
++ int target = sh->ops.target;
++ struct r5dev *tgt = &sh->dev[target];
++ struct page *xor_dest = tgt->page;
++ int count = 0;
++ struct dma_async_tx_descriptor *tx;
++ struct async_submit_ctl submit;
++ int i;
++
++ pr_debug("%s: stripe %llu block: %d\n",
++ __func__, (unsigned long long)sh->sector, target);
++ BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
++
++ for (i = disks; i--; )
++ if (i != target)
++ xor_srcs[count++] = sh->dev[i].page;
++
++ atomic_inc(&sh->count);
++
++ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
++ ops_complete_compute, sh, to_addr_conv(sh, percpu));
++ if (unlikely(count == 1))
++ tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
++ else
++ tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
++
++ return tx;
++}
++
++/* set_syndrome_sources - populate source buffers for gen_syndrome
++ * @srcs - (struct page *) array of size sh->disks
++ * @sh - stripe_head to parse
++ *
++ * Populates srcs in proper layout order for the stripe and returns the
++ * 'count' of sources to be used in a call to async_gen_syndrome. The P
++ * destination buffer is recorded in srcs[count] and the Q destination
++ * is recorded in srcs[count+1]].
++ */
++static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
++{
++ int disks = sh->disks;
++ int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
++ int d0_idx = raid6_d0(sh);
++ int count;
++ int i;
++
++ for (i = 0; i < disks; i++)
++ srcs[i] = NULL;
++
++ count = 0;
++ i = d0_idx;
++ do {
++ int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
++
++ srcs[slot] = sh->dev[i].page;
++ i = raid6_next_disk(i, disks);
++ } while (i != d0_idx);
++
++ return syndrome_disks;
++}
++
++static struct dma_async_tx_descriptor *
++ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
++{
++ int disks = sh->disks;
++ struct page **blocks = percpu->scribble;
++ int target;
++ int qd_idx = sh->qd_idx;
++ struct dma_async_tx_descriptor *tx;
++ struct async_submit_ctl submit;
++ struct r5dev *tgt;
++ struct page *dest;
++ int i;
++ int count;
++
++ if (sh->ops.target < 0)
++ target = sh->ops.target2;
++ else if (sh->ops.target2 < 0)
++ target = sh->ops.target;
++ else
++ /* we should only have one valid target */
++ BUG();
++ BUG_ON(target < 0);
++ pr_debug("%s: stripe %llu block: %d\n",
++ __func__, (unsigned long long)sh->sector, target);
++
++ tgt = &sh->dev[target];
++ BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
++ dest = tgt->page;
++
++ atomic_inc(&sh->count);
++
++ if (target == qd_idx) {
++ count = set_syndrome_sources(blocks, sh);
++ blocks[count] = NULL; /* regenerating p is not necessary */
++ BUG_ON(blocks[count+1] != dest); /* q should already be set */
++ init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
++ ops_complete_compute, sh,
++ to_addr_conv(sh, percpu));
++ tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
++ } else {
++ /* Compute any data- or p-drive using XOR */
++ count = 0;
++ for (i = disks; i-- ; ) {
++ if (i == target || i == qd_idx)
++ continue;
++ blocks[count++] = sh->dev[i].page;
++ }
++
++ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
++ NULL, ops_complete_compute, sh,
++ to_addr_conv(sh, percpu));
++ tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
++ }
++
++ return tx;
++}
++
++static struct dma_async_tx_descriptor *
++ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
++{
++ int i, count, disks = sh->disks;
++ int syndrome_disks = sh->ddf_layout ? disks : disks-2;
++ int d0_idx = raid6_d0(sh);
++ int faila = -1, failb = -1;
++ int target = sh->ops.target;
++ int target2 = sh->ops.target2;
++ struct r5dev *tgt = &sh->dev[target];
++ struct r5dev *tgt2 = &sh->dev[target2];
++ struct dma_async_tx_descriptor *tx;
++ struct page **blocks = percpu->scribble;
++ struct async_submit_ctl submit;
++
++ pr_debug("%s: stripe %llu block1: %d block2: %d\n",
++ __func__, (unsigned long long)sh->sector, target, target2);
++ BUG_ON(target < 0 || target2 < 0);
++ BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
++ BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
++
++ /* we need to open-code set_syndrome_sources to handle the
++ * slot number conversion for 'faila' and 'failb'
++ */
++ for (i = 0; i < disks ; i++)
++ blocks[i] = NULL;
++ count = 0;
++ i = d0_idx;
++ do {
++ int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
++
++ blocks[slot] = sh->dev[i].page;
++
++ if (i == target)
++ faila = slot;
++ if (i == target2)
++ failb = slot;
++ i = raid6_next_disk(i, disks);
++ } while (i != d0_idx);
++
++ BUG_ON(faila == failb);
++ if (failb < faila)
++ swap(faila, failb);
++ pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
++ __func__, (unsigned long long)sh->sector, faila, failb);
++
++ atomic_inc(&sh->count);
++
++ if (failb == syndrome_disks+1) {
++ /* Q disk is one of the missing disks */
++ if (faila == syndrome_disks) {
++ /* Missing P+Q, just recompute */
++ init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
++ ops_complete_compute, sh,
++ to_addr_conv(sh, percpu));
++ return async_gen_syndrome(blocks, 0, syndrome_disks+2,
++ STRIPE_SIZE, &submit);
++ } else {
++ struct page *dest;
++ int data_target;
++ int qd_idx = sh->qd_idx;
++
++ /* Missing D+Q: recompute D from P, then recompute Q */
++ if (target == qd_idx)
++ data_target = target2;
++ else
++ data_target = target;
++
++ count = 0;
++ for (i = disks; i-- ; ) {
++ if (i == data_target || i == qd_idx)
++ continue;
++ blocks[count++] = sh->dev[i].page;
++ }
++ dest = sh->dev[data_target].page;
++ init_async_submit(&submit,
++ ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
++ NULL, NULL, NULL,
++ to_addr_conv(sh, percpu));
++ tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
++ &submit);
++
++ count = set_syndrome_sources(blocks, sh);
++ init_async_submit(&submit, ASYNC_TX_FENCE, tx,
++ ops_complete_compute, sh,
++ to_addr_conv(sh, percpu));
++ return async_gen_syndrome(blocks, 0, count+2,
++ STRIPE_SIZE, &submit);
++ }
++ } else {
++ init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
++ ops_complete_compute, sh,
++ to_addr_conv(sh, percpu));
++ if (failb == syndrome_disks) {
++ /* We're missing D+P. */
++ return async_raid6_datap_recov(syndrome_disks+2,
++ STRIPE_SIZE, faila,
++ blocks, &submit);
++ } else {
++ /* We're missing D+D. */
++ return async_raid6_2data_recov(syndrome_disks+2,
++ STRIPE_SIZE, faila, failb,
++ blocks, &submit);
++ }
++ }
++}
++
++static void ops_complete_prexor(void *stripe_head_ref)
++{
++ struct stripe_head *sh = stripe_head_ref;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++}
++
++static struct dma_async_tx_descriptor *
++ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
++ struct dma_async_tx_descriptor *tx)
++{
++ int disks = sh->disks;
++ struct page **xor_srcs = percpu->scribble;
++ int count = 0, pd_idx = sh->pd_idx, i;
++ struct async_submit_ctl submit;
++
++ /* existing parity data subtracted */
++ struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ /* Only process blocks that are known to be uptodate */
++ if (test_bit(R5_Wantdrain, &dev->flags))
++ xor_srcs[count++] = dev->page;
++ }
++
++ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
++ ops_complete_prexor, sh, to_addr_conv(sh, percpu));
++ tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
++
++ return tx;
++}
++
++static struct dma_async_tx_descriptor *
++ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
++{
++ int disks = sh->disks;
++ int i;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ struct bio *chosen;
++
++ if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
++ struct bio *wbi;
++
++ spin_lock_irq(&sh->stripe_lock);
++ chosen = dev->towrite;
++ dev->towrite = NULL;
++ BUG_ON(dev->written);
++ wbi = dev->written = chosen;
++ spin_unlock_irq(&sh->stripe_lock);
++ WARN_ON(dev->page != dev->orig_page);
++
++ while (wbi && wbi->bi_iter.bi_sector <
++ dev->sector + STRIPE_SECTORS) {
++ if (wbi->bi_rw & REQ_FUA)
++ set_bit(R5_WantFUA, &dev->flags);
++ if (wbi->bi_rw & REQ_SYNC)
++ set_bit(R5_SyncIO, &dev->flags);
++ if (wbi->bi_rw & REQ_DISCARD)
++ set_bit(R5_Discard, &dev->flags);
++ else {
++ tx = async_copy_data(1, wbi, &dev->page,
++ dev->sector, tx, sh);
++ if (dev->page != dev->orig_page) {
++ set_bit(R5_SkipCopy, &dev->flags);
++ clear_bit(R5_UPTODATE, &dev->flags);
++ clear_bit(R5_OVERWRITE, &dev->flags);
++ }
++ }
++ wbi = r5_next_bio(wbi, dev->sector);
++ }
++ }
++ }
++
++ return tx;
++}
++
++static void ops_complete_reconstruct(void *stripe_head_ref)
++{
++ struct stripe_head *sh = stripe_head_ref;
++ int disks = sh->disks;
++ int pd_idx = sh->pd_idx;
++ int qd_idx = sh->qd_idx;
++ int i;
++ bool fua = false, sync = false, discard = false;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ for (i = disks; i--; ) {
++ fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
++ sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
++ discard |= test_bit(R5_Discard, &sh->dev[i].flags);
++ }
++
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++
++ if (dev->written || i == pd_idx || i == qd_idx) {
++ if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
++ set_bit(R5_UPTODATE, &dev->flags);
++ if (fua)
++ set_bit(R5_WantFUA, &dev->flags);
++ if (sync)
++ set_bit(R5_SyncIO, &dev->flags);
++ }
++ }
++
++ if (sh->reconstruct_state == reconstruct_state_drain_run)
++ sh->reconstruct_state = reconstruct_state_drain_result;
++ else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
++ sh->reconstruct_state = reconstruct_state_prexor_drain_result;
++ else {
++ BUG_ON(sh->reconstruct_state != reconstruct_state_run);
++ sh->reconstruct_state = reconstruct_state_result;
++ }
++
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++}
++
++static void
++ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
++ struct dma_async_tx_descriptor *tx)
++{
++ int disks = sh->disks;
++ struct page **xor_srcs = percpu->scribble;
++ struct async_submit_ctl submit;
++ int count = 0, pd_idx = sh->pd_idx, i;
++ struct page *xor_dest;
++ int prexor = 0;
++ unsigned long flags;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ for (i = 0; i < sh->disks; i++) {
++ if (pd_idx == i)
++ continue;
++ if (!test_bit(R5_Discard, &sh->dev[i].flags))
++ break;
++ }
++ if (i >= sh->disks) {
++ atomic_inc(&sh->count);
++ set_bit(R5_Discard, &sh->dev[pd_idx].flags);
++ ops_complete_reconstruct(sh);
++ return;
++ }
++ /* check if prexor is active which means only process blocks
++ * that are part of a read-modify-write (written)
++ */
++ if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
++ prexor = 1;
++ xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (dev->written)
++ xor_srcs[count++] = dev->page;
++ }
++ } else {
++ xor_dest = sh->dev[pd_idx].page;
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (i != pd_idx)
++ xor_srcs[count++] = dev->page;
++ }
++ }
++
++ /* 1/ if we prexor'd then the dest is reused as a source
++ * 2/ if we did not prexor then we are redoing the parity
++ * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
++ * for the synchronous xor case
++ */
++ flags = ASYNC_TX_ACK |
++ (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
++
++ atomic_inc(&sh->count);
++
++ init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
++ to_addr_conv(sh, percpu));
++ if (unlikely(count == 1))
++ tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
++ else
++ tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
++}
++
++static void
++ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
++ struct dma_async_tx_descriptor *tx)
++{
++ struct async_submit_ctl submit;
++ struct page **blocks = percpu->scribble;
++ int count, i;
++
++ pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
++
++ for (i = 0; i < sh->disks; i++) {
++ if (sh->pd_idx == i || sh->qd_idx == i)
++ continue;
++ if (!test_bit(R5_Discard, &sh->dev[i].flags))
++ break;
++ }
++ if (i >= sh->disks) {
++ atomic_inc(&sh->count);
++ set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
++ set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
++ ops_complete_reconstruct(sh);
++ return;
++ }
++
++ count = set_syndrome_sources(blocks, sh);
++
++ atomic_inc(&sh->count);
++
++ init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
++ sh, to_addr_conv(sh, percpu));
++ async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
++}
++
++static void ops_complete_check(void *stripe_head_ref)
++{
++ struct stripe_head *sh = stripe_head_ref;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ sh->check_state = check_state_check_result;
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++}
++
++static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
++{
++ int disks = sh->disks;
++ int pd_idx = sh->pd_idx;
++ int qd_idx = sh->qd_idx;
++ struct page *xor_dest;
++ struct page **xor_srcs = percpu->scribble;
++ struct dma_async_tx_descriptor *tx;
++ struct async_submit_ctl submit;
++ int count;
++ int i;
++
++ pr_debug("%s: stripe %llu\n", __func__,
++ (unsigned long long)sh->sector);
++
++ count = 0;
++ xor_dest = sh->dev[pd_idx].page;
++ xor_srcs[count++] = xor_dest;
++ for (i = disks; i--; ) {
++ if (i == pd_idx || i == qd_idx)
++ continue;
++ xor_srcs[count++] = sh->dev[i].page;
++ }
++
++ init_async_submit(&submit, 0, NULL, NULL, NULL,
++ to_addr_conv(sh, percpu));
++ tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
++ &sh->ops.zero_sum_result, &submit);
++
++ atomic_inc(&sh->count);
++ init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
++ tx = async_trigger_callback(&submit);
++}
++
++static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
++{
++ struct page **srcs = percpu->scribble;
++ struct async_submit_ctl submit;
++ int count;
++
++ pr_debug("%s: stripe %llu checkp: %d\n", __func__,
++ (unsigned long long)sh->sector, checkp);
++
++ count = set_syndrome_sources(srcs, sh);
++ if (!checkp)
++ srcs[count] = NULL;
++
++ atomic_inc(&sh->count);
++ init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
++ sh, to_addr_conv(sh, percpu));
++ async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
++ &sh->ops.zero_sum_result, percpu->spare_page, &submit);
++}
++
++static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
++{
++ int overlap_clear = 0, i, disks = sh->disks;
++ struct dma_async_tx_descriptor *tx = NULL;
++ struct r5conf *conf = sh->raid_conf;
++ int level = conf->level;
++ struct raid5_percpu *percpu;
++ unsigned long cpu;
++
++ cpu = get_cpu();
++ percpu = per_cpu_ptr(conf->percpu, cpu);
++ if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
++ ops_run_biofill(sh);
++ overlap_clear++;
++ }
++
++ if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
++ if (level < 6)
++ tx = ops_run_compute5(sh, percpu);
++ else {
++ if (sh->ops.target2 < 0 || sh->ops.target < 0)
++ tx = ops_run_compute6_1(sh, percpu);
++ else
++ tx = ops_run_compute6_2(sh, percpu);
++ }
++ /* terminate the chain if reconstruct is not set to be run */
++ if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
++ async_tx_ack(tx);
++ }
++
++ if (test_bit(STRIPE_OP_PREXOR, &ops_request))
++ tx = ops_run_prexor(sh, percpu, tx);
++
++ if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
++ tx = ops_run_biodrain(sh, tx);
++ overlap_clear++;
++ }
++
++ if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
++ if (level < 6)
++ ops_run_reconstruct5(sh, percpu, tx);
++ else
++ ops_run_reconstruct6(sh, percpu, tx);
++ }
++
++ if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
++ if (sh->check_state == check_state_run)
++ ops_run_check_p(sh, percpu);
++ else if (sh->check_state == check_state_run_q)
++ ops_run_check_pq(sh, percpu, 0);
++ else if (sh->check_state == check_state_run_pq)
++ ops_run_check_pq(sh, percpu, 1);
++ else
++ BUG();
++ }
++
++ if (overlap_clear)
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (test_and_clear_bit(R5_Overlap, &dev->flags))
++ wake_up(&sh->raid_conf->wait_for_overlap);
++ }
++ put_cpu();
++}
++
++static int grow_one_stripe(struct r5conf *conf, int hash)
++{
++ struct stripe_head *sh;
++ sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
++ if (!sh)
++ return 0;
++
++ sh->raid_conf = conf;
++
++ spin_lock_init(&sh->stripe_lock);
++
++ if (grow_buffers(sh)) {
++ shrink_buffers(sh);
++ kmem_cache_free(conf->slab_cache, sh);
++ return 0;
++ }
++ sh->hash_lock_index = hash;
++ /* we just created an active stripe so... */
++ atomic_set(&sh->count, 1);
++ atomic_inc(&conf->active_stripes);
++ INIT_LIST_HEAD(&sh->lru);
++ release_stripe(sh);
++ return 1;
++}
++
++static int grow_stripes(struct r5conf *conf, int num)
++{
++ struct kmem_cache *sc;
++ int devs = max(conf->raid_disks, conf->previous_raid_disks);
++ int hash;
++
++ if (conf->mddev->gendisk)
++ sprintf(conf->cache_name[0],
++ "raid%d-%s", conf->level, mdname(conf->mddev));
++ else
++ sprintf(conf->cache_name[0],
++ "raid%d-%p", conf->level, conf->mddev);
++ sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
++
++ conf->active_name = 0;
++ sc = kmem_cache_create(conf->cache_name[conf->active_name],
++ sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
++ 0, 0, NULL);
++ if (!sc)
++ return 1;
++ conf->slab_cache = sc;
++ conf->pool_size = devs;
++ hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
++ while (num--) {
++ if (!grow_one_stripe(conf, hash))
++ return 1;
++ conf->max_nr_stripes++;
++ hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
++ }
++ return 0;
++}
++
++/**
++ * scribble_len - return the required size of the scribble region
++ * @num - total number of disks in the array
++ *
++ * The size must be enough to contain:
++ * 1/ a struct page pointer for each device in the array +2
++ * 2/ room to convert each entry in (1) to its corresponding dma
++ * (dma_map_page()) or page (page_address()) address.
++ *
++ * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
++ * calculate over all devices (not just the data blocks), using zeros in place
++ * of the P and Q blocks.
++ */
++static size_t scribble_len(int num)
++{
++ size_t len;
++
++ len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
++
++ return len;
++}
++
++static int resize_stripes(struct r5conf *conf, int newsize)
++{
++ /* Make all the stripes able to hold 'newsize' devices.
++ * New slots in each stripe get 'page' set to a new page.
++ *
++ * This happens in stages:
++ * 1/ create a new kmem_cache and allocate the required number of
++ * stripe_heads.
++ * 2/ gather all the old stripe_heads and transfer the pages across
++ * to the new stripe_heads. This will have the side effect of
++ * freezing the array as once all stripe_heads have been collected,
++ * no IO will be possible. Old stripe heads are freed once their
++ * pages have been transferred over, and the old kmem_cache is
++ * freed when all stripes are done.
++ * 3/ reallocate conf->disks to be suitable bigger. If this fails,
++ * we simple return a failre status - no need to clean anything up.
++ * 4/ allocate new pages for the new slots in the new stripe_heads.
++ * If this fails, we don't bother trying the shrink the
++ * stripe_heads down again, we just leave them as they are.
++ * As each stripe_head is processed the new one is released into
++ * active service.
++ *
++ * Once step2 is started, we cannot afford to wait for a write,
++ * so we use GFP_NOIO allocations.
++ */
++ struct stripe_head *osh, *nsh;
++ LIST_HEAD(newstripes);
++ struct disk_info *ndisks;
++ unsigned long cpu;
++ int err;
++ struct kmem_cache *sc;
++ int i;
++ int hash, cnt;
++
++ if (newsize <= conf->pool_size)
++ return 0; /* never bother to shrink */
++
++ err = md_allow_write(conf->mddev);
++ if (err)
++ return err;
++
++ /* Step 1 */
++ sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
++ sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
++ 0, 0, NULL);
++ if (!sc)
++ return -ENOMEM;
++
++ for (i = conf->max_nr_stripes; i; i--) {
++ nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
++ if (!nsh)
++ break;
++
++ nsh->raid_conf = conf;
++ spin_lock_init(&nsh->stripe_lock);
++
++ list_add(&nsh->lru, &newstripes);
++ }
++ if (i) {
++ /* didn't get enough, give up */
++ while (!list_empty(&newstripes)) {
++ nsh = list_entry(newstripes.next, struct stripe_head, lru);
++ list_del(&nsh->lru);
++ kmem_cache_free(sc, nsh);
++ }
++ kmem_cache_destroy(sc);
++ return -ENOMEM;
++ }
++ /* Step 2 - Must use GFP_NOIO now.
++ * OK, we have enough stripes, start collecting inactive
++ * stripes and copying them over
++ */
++ hash = 0;
++ cnt = 0;
++ list_for_each_entry(nsh, &newstripes, lru) {
++ lock_device_hash_lock(conf, hash);
++ wait_event_cmd(conf->wait_for_stripe,
++ !list_empty(conf->inactive_list + hash),
++ unlock_device_hash_lock(conf, hash),
++ lock_device_hash_lock(conf, hash));
++ osh = get_free_stripe(conf, hash);
++ unlock_device_hash_lock(conf, hash);
++ atomic_set(&nsh->count, 1);
++ for(i=0; i<conf->pool_size; i++) {
++ nsh->dev[i].page = osh->dev[i].page;
++ nsh->dev[i].orig_page = osh->dev[i].page;
++ }
++ for( ; i<newsize; i++)
++ nsh->dev[i].page = NULL;
++ nsh->hash_lock_index = hash;
++ kmem_cache_free(conf->slab_cache, osh);
++ cnt++;
++ if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
++ !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
++ hash++;
++ cnt = 0;
++ }
++ }
++ kmem_cache_destroy(conf->slab_cache);
++
++ /* Step 3.
++ * At this point, we are holding all the stripes so the array
++ * is completely stalled, so now is a good time to resize
++ * conf->disks and the scribble region
++ */
++ ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
++ if (ndisks) {
++ for (i=0; i<conf->raid_disks; i++)
++ ndisks[i] = conf->disks[i];
++ kfree(conf->disks);
++ conf->disks = ndisks;
++ } else
++ err = -ENOMEM;
++
++ get_online_cpus();
++ conf->scribble_len = scribble_len(newsize);
++ for_each_present_cpu(cpu) {
++ struct raid5_percpu *percpu;
++ void *scribble;
++
++ percpu = per_cpu_ptr(conf->percpu, cpu);
++ scribble = kmalloc(conf->scribble_len, GFP_NOIO);
++
++ if (scribble) {
++ kfree(percpu->scribble);
++ percpu->scribble = scribble;
++ } else {
++ err = -ENOMEM;
++ break;
++ }
++ }
++ put_online_cpus();
++
++ /* Step 4, return new stripes to service */
++ while(!list_empty(&newstripes)) {
++ nsh = list_entry(newstripes.next, struct stripe_head, lru);
++ list_del_init(&nsh->lru);
++
++ for (i=conf->raid_disks; i < newsize; i++)
++ if (nsh->dev[i].page == NULL) {
++ struct page *p = alloc_page(GFP_NOIO);
++ nsh->dev[i].page = p;
++ nsh->dev[i].orig_page = p;
++ if (!p)
++ err = -ENOMEM;
++ }
++ release_stripe(nsh);
++ }
++ /* critical section pass, GFP_NOIO no longer needed */
++
++ conf->slab_cache = sc;
++ conf->active_name = 1-conf->active_name;
++ conf->pool_size = newsize;
++ return err;
++}
++
++static int drop_one_stripe(struct r5conf *conf, int hash)
++{
++ struct stripe_head *sh;
++
++ spin_lock_irq(conf->hash_locks + hash);
++ sh = get_free_stripe(conf, hash);
++ spin_unlock_irq(conf->hash_locks + hash);
++ if (!sh)
++ return 0;
++ BUG_ON(atomic_read(&sh->count));
++ shrink_buffers(sh);
++ kmem_cache_free(conf->slab_cache, sh);
++ atomic_dec(&conf->active_stripes);
++ return 1;
++}
++
++static void shrink_stripes(struct r5conf *conf)
++{
++ int hash;
++ for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++)
++ while (drop_one_stripe(conf, hash))
++ ;
++
++ if (conf->slab_cache)
++ kmem_cache_destroy(conf->slab_cache);
++ conf->slab_cache = NULL;
++}
++
++static void raid5_end_read_request(struct bio * bi, int error)
++{
++ struct stripe_head *sh = bi->bi_private;
++ struct r5conf *conf = sh->raid_conf;
++ int disks = sh->disks, i;
++ int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
++ char b[BDEVNAME_SIZE];
++ struct md_rdev *rdev = NULL;
++ sector_t s;
++
++ for (i=0 ; i<disks; i++)
++ if (bi == &sh->dev[i].req)
++ break;
++
++ pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
++ (unsigned long long)sh->sector, i, atomic_read(&sh->count),
++ uptodate);
++ if (i == disks) {
++ BUG();
++ return;
++ }
++ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
++ /* If replacement finished while this request was outstanding,
++ * 'replacement' might be NULL already.
++ * In that case it moved down to 'rdev'.
++ * rdev is not removed until all requests are finished.
++ */
++ rdev = conf->disks[i].replacement;
++ if (!rdev)
++ rdev = conf->disks[i].rdev;
++
++ if (use_new_offset(conf, sh))
++ s = sh->sector + rdev->new_data_offset;
++ else
++ s = sh->sector + rdev->data_offset;
++ if (uptodate) {
++ set_bit(R5_UPTODATE, &sh->dev[i].flags);
++ if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
++ /* Note that this cannot happen on a
++ * replacement device. We just fail those on
++ * any error
++ */
++ printk_ratelimited(
++ KERN_INFO
++ "md/raid:%s: read error corrected"
++ " (%lu sectors at %llu on %s)\n",
++ mdname(conf->mddev), STRIPE_SECTORS,
++ (unsigned long long)s,
++ bdevname(rdev->bdev, b));
++ atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
++ clear_bit(R5_ReadError, &sh->dev[i].flags);
++ clear_bit(R5_ReWrite, &sh->dev[i].flags);
++ } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
++ clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
++
++ if (atomic_read(&rdev->read_errors))
++ atomic_set(&rdev->read_errors, 0);
++ } else {
++ const char *bdn = bdevname(rdev->bdev, b);
++ int retry = 0;
++ int set_bad = 0;
++
++ clear_bit(R5_UPTODATE, &sh->dev[i].flags);
++ atomic_inc(&rdev->read_errors);
++ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
++ printk_ratelimited(
++ KERN_WARNING
++ "md/raid:%s: read error on replacement device "
++ "(sector %llu on %s).\n",
++ mdname(conf->mddev),
++ (unsigned long long)s,
++ bdn);
++ else if (conf->mddev->degraded >= conf->max_degraded) {
++ set_bad = 1;
++ printk_ratelimited(
++ KERN_WARNING
++ "md/raid:%s: read error not correctable "
++ "(sector %llu on %s).\n",
++ mdname(conf->mddev),
++ (unsigned long long)s,
++ bdn);
++ } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
++ /* Oh, no!!! */
++ set_bad = 1;
++ printk_ratelimited(
++ KERN_WARNING
++ "md/raid:%s: read error NOT corrected!! "
++ "(sector %llu on %s).\n",
++ mdname(conf->mddev),
++ (unsigned long long)s,
++ bdn);
++ } else if (atomic_read(&rdev->read_errors)
++ > conf->max_nr_stripes)
++ printk(KERN_WARNING
++ "md/raid:%s: Too many read errors, failing device %s.\n",
++ mdname(conf->mddev), bdn);
++ else
++ retry = 1;
++ if (set_bad && test_bit(In_sync, &rdev->flags)
++ && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
++ retry = 1;
++ if (retry)
++ if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
++ set_bit(R5_ReadError, &sh->dev[i].flags);
++ clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
++ } else
++ set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
++ else {
++ clear_bit(R5_ReadError, &sh->dev[i].flags);
++ clear_bit(R5_ReWrite, &sh->dev[i].flags);
++ if (!(set_bad
++ && test_bit(In_sync, &rdev->flags)
++ && rdev_set_badblocks(
++ rdev, sh->sector, STRIPE_SECTORS, 0)))
++ md_error(conf->mddev, rdev);
++ }
++ }
++ rdev_dec_pending(rdev, conf->mddev);
++ clear_bit(R5_LOCKED, &sh->dev[i].flags);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++}
++
++static void raid5_end_write_request(struct bio *bi, int error)
++{
++ struct stripe_head *sh = bi->bi_private;
++ struct r5conf *conf = sh->raid_conf;
++ int disks = sh->disks, i;
++ struct md_rdev *uninitialized_var(rdev);
++ int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
++ sector_t first_bad;
++ int bad_sectors;
++ int replacement = 0;
++
++ for (i = 0 ; i < disks; i++) {
++ if (bi == &sh->dev[i].req) {
++ rdev = conf->disks[i].rdev;
++ break;
++ }
++ if (bi == &sh->dev[i].rreq) {
++ rdev = conf->disks[i].replacement;
++ if (rdev)
++ replacement = 1;
++ else
++ /* rdev was removed and 'replacement'
++ * replaced it. rdev is not removed
++ * until all requests are finished.
++ */
++ rdev = conf->disks[i].rdev;
++ break;
++ }
++ }
++ pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
++ (unsigned long long)sh->sector, i, atomic_read(&sh->count),
++ uptodate);
++ if (i == disks) {
++ BUG();
++ return;
++ }
++
++ if (replacement) {
++ if (!uptodate)
++ md_error(conf->mddev, rdev);
++ else if (is_badblock(rdev, sh->sector,
++ STRIPE_SECTORS,
++ &first_bad, &bad_sectors))
++ set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
++ } else {
++ if (!uptodate) {
++ set_bit(STRIPE_DEGRADED, &sh->state);
++ set_bit(WriteErrorSeen, &rdev->flags);
++ set_bit(R5_WriteError, &sh->dev[i].flags);
++ if (!test_and_set_bit(WantReplacement, &rdev->flags))
++ set_bit(MD_RECOVERY_NEEDED,
++ &rdev->mddev->recovery);
++ } else if (is_badblock(rdev, sh->sector,
++ STRIPE_SECTORS,
++ &first_bad, &bad_sectors)) {
++ set_bit(R5_MadeGood, &sh->dev[i].flags);
++ if (test_bit(R5_ReadError, &sh->dev[i].flags))
++ /* That was a successful write so make
++ * sure it looks like we already did
++ * a re-write.
++ */
++ set_bit(R5_ReWrite, &sh->dev[i].flags);
++ }
++ }
++ rdev_dec_pending(rdev, conf->mddev);
++
++ if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
++ clear_bit(R5_LOCKED, &sh->dev[i].flags);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++}
++
++static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
++
++static void raid5_build_block(struct stripe_head *sh, int i, int previous)
++{
++ struct r5dev *dev = &sh->dev[i];
++
++ bio_init(&dev->req);
++ dev->req.bi_io_vec = &dev->vec;
++ dev->req.bi_max_vecs = 1;
++ dev->req.bi_private = sh;
++
++ bio_init(&dev->rreq);
++ dev->rreq.bi_io_vec = &dev->rvec;
++ dev->rreq.bi_max_vecs = 1;
++ dev->rreq.bi_private = sh;
++
++ dev->flags = 0;
++ dev->sector = compute_blocknr(sh, i, previous);
++}
++
++static void error(struct mddev *mddev, struct md_rdev *rdev)
++{
++ char b[BDEVNAME_SIZE];
++ struct r5conf *conf = mddev->private;
++ unsigned long flags;
++ pr_debug("raid456: error called\n");
++
++ spin_lock_irqsave(&conf->device_lock, flags);
++ clear_bit(In_sync, &rdev->flags);
++ mddev->degraded = calc_degraded(conf);
++ spin_unlock_irqrestore(&conf->device_lock, flags);
++ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
++
++ set_bit(Blocked, &rdev->flags);
++ set_bit(Faulty, &rdev->flags);
++ set_bit(MD_CHANGE_DEVS, &mddev->flags);
++ printk(KERN_ALERT
++ "md/raid:%s: Disk failure on %s, disabling device.\n"
++ "md/raid:%s: Operation continuing on %d devices.\n",
++ mdname(mddev),
++ bdevname(rdev->bdev, b),
++ mdname(mddev),
++ conf->raid_disks - mddev->degraded);
++}
++
++/*
++ * Input: a 'big' sector number,
++ * Output: index of the data and parity disk, and the sector # in them.
++ */
++static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
++ int previous, int *dd_idx,
++ struct stripe_head *sh)
++{
++ sector_t stripe, stripe2;
++ sector_t chunk_number;
++ unsigned int chunk_offset;
++ int pd_idx, qd_idx;
++ int ddf_layout = 0;
++ sector_t new_sector;
++ int algorithm = previous ? conf->prev_algo
++ : conf->algorithm;
++ int sectors_per_chunk = previous ? conf->prev_chunk_sectors
++ : conf->chunk_sectors;
++ int raid_disks = previous ? conf->previous_raid_disks
++ : conf->raid_disks;
++ int data_disks = raid_disks - conf->max_degraded;
++
++ /* First compute the information on this sector */
++
++ /*
++ * Compute the chunk number and the sector offset inside the chunk
++ */
++ chunk_offset = sector_div(r_sector, sectors_per_chunk);
++ chunk_number = r_sector;
++
++ /*
++ * Compute the stripe number
++ */
++ stripe = chunk_number;
++ *dd_idx = sector_div(stripe, data_disks);
++ stripe2 = stripe;
++ /*
++ * Select the parity disk based on the user selected algorithm.
++ */
++ pd_idx = qd_idx = -1;
++ switch(conf->level) {
++ case 4:
++ pd_idx = data_disks;
++ break;
++ case 5:
++ switch (algorithm) {
++ case ALGORITHM_LEFT_ASYMMETRIC:
++ pd_idx = data_disks - sector_div(stripe2, raid_disks);
++ if (*dd_idx >= pd_idx)
++ (*dd_idx)++;
++ break;
++ case ALGORITHM_RIGHT_ASYMMETRIC:
++ pd_idx = sector_div(stripe2, raid_disks);
++ if (*dd_idx >= pd_idx)
++ (*dd_idx)++;
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC:
++ pd_idx = data_disks - sector_div(stripe2, raid_disks);
++ *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
++ break;
++ case ALGORITHM_RIGHT_SYMMETRIC:
++ pd_idx = sector_div(stripe2, raid_disks);
++ *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
++ break;
++ case ALGORITHM_PARITY_0:
++ pd_idx = 0;
++ (*dd_idx)++;
++ break;
++ case ALGORITHM_PARITY_N:
++ pd_idx = data_disks;
++ break;
++ default:
++ BUG();
++ }
++ break;
++ case 6:
++
++ switch (algorithm) {
++ case ALGORITHM_LEFT_ASYMMETRIC:
++ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
++ qd_idx = pd_idx + 1;
++ if (pd_idx == raid_disks-1) {
++ (*dd_idx)++; /* Q D D D P */
++ qd_idx = 0;
++ } else if (*dd_idx >= pd_idx)
++ (*dd_idx) += 2; /* D D P Q D */
++ break;
++ case ALGORITHM_RIGHT_ASYMMETRIC:
++ pd_idx = sector_div(stripe2, raid_disks);
++ qd_idx = pd_idx + 1;
++ if (pd_idx == raid_disks-1) {
++ (*dd_idx)++; /* Q D D D P */
++ qd_idx = 0;
++ } else if (*dd_idx >= pd_idx)
++ (*dd_idx) += 2; /* D D P Q D */
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC:
++ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
++ qd_idx = (pd_idx + 1) % raid_disks;
++ *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
++ break;
++ case ALGORITHM_RIGHT_SYMMETRIC:
++ pd_idx = sector_div(stripe2, raid_disks);
++ qd_idx = (pd_idx + 1) % raid_disks;
++ *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
++ break;
++
++ case ALGORITHM_PARITY_0:
++ pd_idx = 0;
++ qd_idx = 1;
++ (*dd_idx) += 2;
++ break;
++ case ALGORITHM_PARITY_N:
++ pd_idx = data_disks;
++ qd_idx = data_disks + 1;
++ break;
++
++ case ALGORITHM_ROTATING_ZERO_RESTART:
++ /* Exactly the same as RIGHT_ASYMMETRIC, but or
++ * of blocks for computing Q is different.
++ */
++ pd_idx = sector_div(stripe2, raid_disks);
++ qd_idx = pd_idx + 1;
++ if (pd_idx == raid_disks-1) {
++ (*dd_idx)++; /* Q D D D P */
++ qd_idx = 0;
++ } else if (*dd_idx >= pd_idx)
++ (*dd_idx) += 2; /* D D P Q D */
++ ddf_layout = 1;
++ break;
++
++ case ALGORITHM_ROTATING_N_RESTART:
++ /* Same a left_asymmetric, by first stripe is
++ * D D D P Q rather than
++ * Q D D D P
++ */
++ stripe2 += 1;
++ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
++ qd_idx = pd_idx + 1;
++ if (pd_idx == raid_disks-1) {
++ (*dd_idx)++; /* Q D D D P */
++ qd_idx = 0;
++ } else if (*dd_idx >= pd_idx)
++ (*dd_idx) += 2; /* D D P Q D */
++ ddf_layout = 1;
++ break;
++
++ case ALGORITHM_ROTATING_N_CONTINUE:
++ /* Same as left_symmetric but Q is before P */
++ pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
++ qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
++ *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
++ ddf_layout = 1;
++ break;
++
++ case ALGORITHM_LEFT_ASYMMETRIC_6:
++ /* RAID5 left_asymmetric, with Q on last device */
++ pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
++ if (*dd_idx >= pd_idx)
++ (*dd_idx)++;
++ qd_idx = raid_disks - 1;
++ break;
++
++ case ALGORITHM_RIGHT_ASYMMETRIC_6:
++ pd_idx = sector_div(stripe2, raid_disks-1);
++ if (*dd_idx >= pd_idx)
++ (*dd_idx)++;
++ qd_idx = raid_disks - 1;
++ break;
++
++ case ALGORITHM_LEFT_SYMMETRIC_6:
++ pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
++ *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
++ qd_idx = raid_disks - 1;
++ break;
++
++ case ALGORITHM_RIGHT_SYMMETRIC_6:
++ pd_idx = sector_div(stripe2, raid_disks-1);
++ *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
++ qd_idx = raid_disks - 1;
++ break;
++
++ case ALGORITHM_PARITY_0_6:
++ pd_idx = 0;
++ (*dd_idx)++;
++ qd_idx = raid_disks - 1;
++ break;
++
++ default:
++ BUG();
++ }
++ break;
++ }
++
++ if (sh) {
++ sh->pd_idx = pd_idx;
++ sh->qd_idx = qd_idx;
++ sh->ddf_layout = ddf_layout;
++ }
++ /*
++ * Finally, compute the new sector number
++ */
++ new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
++ return new_sector;
++}
++
++static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
++{
++ struct r5conf *conf = sh->raid_conf;
++ int raid_disks = sh->disks;
++ int data_disks = raid_disks - conf->max_degraded;
++ sector_t new_sector = sh->sector, check;
++ int sectors_per_chunk = previous ? conf->prev_chunk_sectors
++ : conf->chunk_sectors;
++ int algorithm = previous ? conf->prev_algo
++ : conf->algorithm;
++ sector_t stripe;
++ int chunk_offset;
++ sector_t chunk_number;
++ int dummy1, dd_idx = i;
++ sector_t r_sector;
++ struct stripe_head sh2;
++
++ chunk_offset = sector_div(new_sector, sectors_per_chunk);
++ stripe = new_sector;
++
++ if (i == sh->pd_idx)
++ return 0;
++ switch(conf->level) {
++ case 4: break;
++ case 5:
++ switch (algorithm) {
++ case ALGORITHM_LEFT_ASYMMETRIC:
++ case ALGORITHM_RIGHT_ASYMMETRIC:
++ if (i > sh->pd_idx)
++ i--;
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC:
++ case ALGORITHM_RIGHT_SYMMETRIC:
++ if (i < sh->pd_idx)
++ i += raid_disks;
++ i -= (sh->pd_idx + 1);
++ break;
++ case ALGORITHM_PARITY_0:
++ i -= 1;
++ break;
++ case ALGORITHM_PARITY_N:
++ break;
++ default:
++ BUG();
++ }
++ break;
++ case 6:
++ if (i == sh->qd_idx)
++ return 0; /* It is the Q disk */
++ switch (algorithm) {
++ case ALGORITHM_LEFT_ASYMMETRIC:
++ case ALGORITHM_RIGHT_ASYMMETRIC:
++ case ALGORITHM_ROTATING_ZERO_RESTART:
++ case ALGORITHM_ROTATING_N_RESTART:
++ if (sh->pd_idx == raid_disks-1)
++ i--; /* Q D D D P */
++ else if (i > sh->pd_idx)
++ i -= 2; /* D D P Q D */
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC:
++ case ALGORITHM_RIGHT_SYMMETRIC:
++ if (sh->pd_idx == raid_disks-1)
++ i--; /* Q D D D P */
++ else {
++ /* D D P Q D */
++ if (i < sh->pd_idx)
++ i += raid_disks;
++ i -= (sh->pd_idx + 2);
++ }
++ break;
++ case ALGORITHM_PARITY_0:
++ i -= 2;
++ break;
++ case ALGORITHM_PARITY_N:
++ break;
++ case ALGORITHM_ROTATING_N_CONTINUE:
++ /* Like left_symmetric, but P is before Q */
++ if (sh->pd_idx == 0)
++ i--; /* P D D D Q */
++ else {
++ /* D D Q P D */
++ if (i < sh->pd_idx)
++ i += raid_disks;
++ i -= (sh->pd_idx + 1);
++ }
++ break;
++ case ALGORITHM_LEFT_ASYMMETRIC_6:
++ case ALGORITHM_RIGHT_ASYMMETRIC_6:
++ if (i > sh->pd_idx)
++ i--;
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC_6:
++ case ALGORITHM_RIGHT_SYMMETRIC_6:
++ if (i < sh->pd_idx)
++ i += data_disks + 1;
++ i -= (sh->pd_idx + 1);
++ break;
++ case ALGORITHM_PARITY_0_6:
++ i -= 1;
++ break;
++ default:
++ BUG();
++ }
++ break;
++ }
++
++ chunk_number = stripe * data_disks + i;
++ r_sector = chunk_number * sectors_per_chunk + chunk_offset;
++
++ check = raid5_compute_sector(conf, r_sector,
++ previous, &dummy1, &sh2);
++ if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
++ || sh2.qd_idx != sh->qd_idx) {
++ printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
++ mdname(conf->mddev));
++ return 0;
++ }
++ return r_sector;
++}
++
++static void
++schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
++ int rcw, int expand)
++{
++ int i, pd_idx = sh->pd_idx, disks = sh->disks;
++ struct r5conf *conf = sh->raid_conf;
++ int level = conf->level;
++
++ if (rcw) {
++
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++
++ if (dev->towrite) {
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantdrain, &dev->flags);
++ if (!expand)
++ clear_bit(R5_UPTODATE, &dev->flags);
++ s->locked++;
++ }
++ }
++ /* if we are not expanding this is a proper write request, and
++ * there will be bios with new data to be drained into the
++ * stripe cache
++ */
++ if (!expand) {
++ if (!s->locked)
++ /* False alarm, nothing to do */
++ return;
++ sh->reconstruct_state = reconstruct_state_drain_run;
++ set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
++ } else
++ sh->reconstruct_state = reconstruct_state_run;
++
++ set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
++
++ if (s->locked + conf->max_degraded == disks)
++ if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
++ atomic_inc(&conf->pending_full_writes);
++ } else {
++ BUG_ON(level == 6);
++ BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
++ test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
++
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (i == pd_idx)
++ continue;
++
++ if (dev->towrite &&
++ (test_bit(R5_UPTODATE, &dev->flags) ||
++ test_bit(R5_Wantcompute, &dev->flags))) {
++ set_bit(R5_Wantdrain, &dev->flags);
++ set_bit(R5_LOCKED, &dev->flags);
++ clear_bit(R5_UPTODATE, &dev->flags);
++ s->locked++;
++ }
++ }
++ if (!s->locked)
++ /* False alarm - nothing to do */
++ return;
++ sh->reconstruct_state = reconstruct_state_prexor_drain_run;
++ set_bit(STRIPE_OP_PREXOR, &s->ops_request);
++ set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
++ set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
++ }
++
++ /* keep the parity disk(s) locked while asynchronous operations
++ * are in flight
++ */
++ set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
++ clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
++ s->locked++;
++
++ if (level == 6) {
++ int qd_idx = sh->qd_idx;
++ struct r5dev *dev = &sh->dev[qd_idx];
++
++ set_bit(R5_LOCKED, &dev->flags);
++ clear_bit(R5_UPTODATE, &dev->flags);
++ s->locked++;
++ }
++
++ pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
++ __func__, (unsigned long long)sh->sector,
++ s->locked, s->ops_request);
++}
++
++/*
++ * Each stripe/dev can have one or more bion attached.
++ * toread/towrite point to the first in a chain.
++ * The bi_next chain must be in order.
++ */
++static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
++{
++ struct bio **bip;
++ struct r5conf *conf = sh->raid_conf;
++ int firstwrite=0;
++
++ pr_debug("adding bi b#%llu to stripe s#%llu\n",
++ (unsigned long long)bi->bi_iter.bi_sector,
++ (unsigned long long)sh->sector);
++
++ /*
++ * If several bio share a stripe. The bio bi_phys_segments acts as a
++ * reference count to avoid race. The reference count should already be
++ * increased before this function is called (for example, in
++ * make_request()), so other bio sharing this stripe will not free the
++ * stripe. If a stripe is owned by one stripe, the stripe lock will
++ * protect it.
++ */
++ spin_lock_irq(&sh->stripe_lock);
++ if (forwrite) {
++ bip = &sh->dev[dd_idx].towrite;
++ if (*bip == NULL)
++ firstwrite = 1;
++ } else
++ bip = &sh->dev[dd_idx].toread;
++ while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
++ if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
++ goto overlap;
++ bip = & (*bip)->bi_next;
++ }
++ if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
++ goto overlap;
++
++ BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
++ if (*bip)
++ bi->bi_next = *bip;
++ *bip = bi;
++ raid5_inc_bi_active_stripes(bi);
++
++ if (forwrite) {
++ /* check if page is covered */
++ sector_t sector = sh->dev[dd_idx].sector;
++ for (bi=sh->dev[dd_idx].towrite;
++ sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
++ bi && bi->bi_iter.bi_sector <= sector;
++ bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
++ if (bio_end_sector(bi) >= sector)
++ sector = bio_end_sector(bi);
++ }
++ if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
++ set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
++ }
++
++ pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
++ (unsigned long long)(*bip)->bi_iter.bi_sector,
++ (unsigned long long)sh->sector, dd_idx);
++ spin_unlock_irq(&sh->stripe_lock);
++
++ if (conf->mddev->bitmap && firstwrite) {
++ bitmap_startwrite(conf->mddev->bitmap, sh->sector,
++ STRIPE_SECTORS, 0);
++ sh->bm_seq = conf->seq_flush+1;
++ set_bit(STRIPE_BIT_DELAY, &sh->state);
++ }
++ return 1;
++
++ overlap:
++ set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
++ spin_unlock_irq(&sh->stripe_lock);
++ return 0;
++}
++
++static void end_reshape(struct r5conf *conf);
++
++static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
++ struct stripe_head *sh)
++{
++ int sectors_per_chunk =
++ previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
++ int dd_idx;
++ int chunk_offset = sector_div(stripe, sectors_per_chunk);
++ int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
++
++ raid5_compute_sector(conf,
++ stripe * (disks - conf->max_degraded)
++ *sectors_per_chunk + chunk_offset,
++ previous,
++ &dd_idx, sh);
++}
++
++static void
++handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
++ struct stripe_head_state *s, int disks,
++ struct bio **return_bi)
++{
++ int i;
++ for (i = disks; i--; ) {
++ struct bio *bi;
++ int bitmap_end = 0;
++
++ if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
++ struct md_rdev *rdev;
++ rcu_read_lock();
++ rdev = rcu_dereference(conf->disks[i].rdev);
++ if (rdev && test_bit(In_sync, &rdev->flags))
++ atomic_inc(&rdev->nr_pending);
++ else
++ rdev = NULL;
++ rcu_read_unlock();
++ if (rdev) {
++ if (!rdev_set_badblocks(
++ rdev,
++ sh->sector,
++ STRIPE_SECTORS, 0))
++ md_error(conf->mddev, rdev);
++ rdev_dec_pending(rdev, conf->mddev);
++ }
++ }
++ spin_lock_irq(&sh->stripe_lock);
++ /* fail all writes first */
++ bi = sh->dev[i].towrite;
++ sh->dev[i].towrite = NULL;
++ spin_unlock_irq(&sh->stripe_lock);
++ if (bi)
++ bitmap_end = 1;
++
++ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
++ wake_up(&conf->wait_for_overlap);
++
++ while (bi && bi->bi_iter.bi_sector <
++ sh->dev[i].sector + STRIPE_SECTORS) {
++ struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
++ clear_bit(BIO_UPTODATE, &bi->bi_flags);
++ if (!raid5_dec_bi_active_stripes(bi)) {
++ md_write_end(conf->mddev);
++ bi->bi_next = *return_bi;
++ *return_bi = bi;
++ }
++ bi = nextbi;
++ }
++ if (bitmap_end)
++ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
++ STRIPE_SECTORS, 0, 0);
++ bitmap_end = 0;
++ /* and fail all 'written' */
++ bi = sh->dev[i].written;
++ sh->dev[i].written = NULL;
++ if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) {
++ WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
++ sh->dev[i].page = sh->dev[i].orig_page;
++ }
++
++ if (bi) bitmap_end = 1;
++ while (bi && bi->bi_iter.bi_sector <
++ sh->dev[i].sector + STRIPE_SECTORS) {
++ struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
++ clear_bit(BIO_UPTODATE, &bi->bi_flags);
++ if (!raid5_dec_bi_active_stripes(bi)) {
++ md_write_end(conf->mddev);
++ bi->bi_next = *return_bi;
++ *return_bi = bi;
++ }
++ bi = bi2;
++ }
++
++ /* fail any reads if this device is non-operational and
++ * the data has not reached the cache yet.
++ */
++ if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
++ (!test_bit(R5_Insync, &sh->dev[i].flags) ||
++ test_bit(R5_ReadError, &sh->dev[i].flags))) {
++ spin_lock_irq(&sh->stripe_lock);
++ bi = sh->dev[i].toread;
++ sh->dev[i].toread = NULL;
++ spin_unlock_irq(&sh->stripe_lock);
++ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
++ wake_up(&conf->wait_for_overlap);
++ while (bi && bi->bi_iter.bi_sector <
++ sh->dev[i].sector + STRIPE_SECTORS) {
++ struct bio *nextbi =
++ r5_next_bio(bi, sh->dev[i].sector);
++ clear_bit(BIO_UPTODATE, &bi->bi_flags);
++ if (!raid5_dec_bi_active_stripes(bi)) {
++ bi->bi_next = *return_bi;
++ *return_bi = bi;
++ }
++ bi = nextbi;
++ }
++ }
++ if (bitmap_end)
++ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
++ STRIPE_SECTORS, 0, 0);
++ /* If we were in the middle of a write the parity block might
++ * still be locked - so just clear all R5_LOCKED flags
++ */
++ clear_bit(R5_LOCKED, &sh->dev[i].flags);
++ }
++
++ if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
++ if (atomic_dec_and_test(&conf->pending_full_writes))
++ md_wakeup_thread(conf->mddev->thread);
++}
++
++static void
++handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
++ struct stripe_head_state *s)
++{
++ int abort = 0;
++ int i;
++
++ clear_bit(STRIPE_SYNCING, &sh->state);
++ if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
++ wake_up(&conf->wait_for_overlap);
++ s->syncing = 0;
++ s->replacing = 0;
++ /* There is nothing more to do for sync/check/repair.
++ * Don't even need to abort as that is handled elsewhere
++ * if needed, and not always wanted e.g. if there is a known
++ * bad block here.
++ * For recover/replace we need to record a bad block on all
++ * non-sync devices, or abort the recovery
++ */
++ if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
++ /* During recovery devices cannot be removed, so
++ * locking and refcounting of rdevs is not needed
++ */
++ for (i = 0; i < conf->raid_disks; i++) {
++ struct md_rdev *rdev = conf->disks[i].rdev;
++ if (rdev
++ && !test_bit(Faulty, &rdev->flags)
++ && !test_bit(In_sync, &rdev->flags)
++ && !rdev_set_badblocks(rdev, sh->sector,
++ STRIPE_SECTORS, 0))
++ abort = 1;
++ rdev = conf->disks[i].replacement;
++ if (rdev
++ && !test_bit(Faulty, &rdev->flags)
++ && !test_bit(In_sync, &rdev->flags)
++ && !rdev_set_badblocks(rdev, sh->sector,
++ STRIPE_SECTORS, 0))
++ abort = 1;
++ }
++ if (abort)
++ conf->recovery_disabled =
++ conf->mddev->recovery_disabled;
++ }
++ md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
++}
++
++static int want_replace(struct stripe_head *sh, int disk_idx)
++{
++ struct md_rdev *rdev;
++ int rv = 0;
++ /* Doing recovery so rcu locking not required */
++ rdev = sh->raid_conf->disks[disk_idx].replacement;
++ if (rdev
++ && !test_bit(Faulty, &rdev->flags)
++ && !test_bit(In_sync, &rdev->flags)
++ && (rdev->recovery_offset <= sh->sector
++ || rdev->mddev->recovery_cp <= sh->sector))
++ rv = 1;
++
++ return rv;
++}
++
++/* fetch_block - checks the given member device to see if its data needs
++ * to be read or computed to satisfy a request.
++ *
++ * Returns 1 when no more member devices need to be checked, otherwise returns
++ * 0 to tell the loop in handle_stripe_fill to continue
++ */
++static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
++ int disk_idx, int disks)
++{
++ struct r5dev *dev = &sh->dev[disk_idx];
++ struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
++ &sh->dev[s->failed_num[1]] };
++
++ /* is the data in this block needed, and can we get it? */
++ if (!test_bit(R5_LOCKED, &dev->flags) &&
++ !test_bit(R5_UPTODATE, &dev->flags) &&
++ (dev->toread ||
++ (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
++ s->syncing || s->expanding ||
++ (s->replacing && want_replace(sh, disk_idx)) ||
++ (s->failed >= 1 && fdev[0]->toread) ||
++ (s->failed >= 2 && fdev[1]->toread) ||
++ (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
++ (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) &&
++ !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
++ ((sh->raid_conf->level == 6 ||
++ sh->sector >= sh->raid_conf->mddev->recovery_cp)
++ && s->failed && s->to_write &&
++ (s->to_write - s->non_overwrite <
++ sh->raid_conf->raid_disks - sh->raid_conf->max_degraded) &&
++ (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) {
++ /* we would like to get this block, possibly by computing it,
++ * otherwise read it if the backing disk is insync
++ */
++ BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
++ BUG_ON(test_bit(R5_Wantread, &dev->flags));
++ if ((s->uptodate == disks - 1) &&
++ (s->failed && (disk_idx == s->failed_num[0] ||
++ disk_idx == s->failed_num[1]))) {
++ /* have disk failed, and we're requested to fetch it;
++ * do compute it
++ */
++ pr_debug("Computing stripe %llu block %d\n",
++ (unsigned long long)sh->sector, disk_idx);
++ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
++ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
++ set_bit(R5_Wantcompute, &dev->flags);
++ sh->ops.target = disk_idx;
++ sh->ops.target2 = -1; /* no 2nd target */
++ s->req_compute = 1;
++ /* Careful: from this point on 'uptodate' is in the eye
++ * of raid_run_ops which services 'compute' operations
++ * before writes. R5_Wantcompute flags a block that will
++ * be R5_UPTODATE by the time it is needed for a
++ * subsequent operation.
++ */
++ s->uptodate++;
++ return 1;
++ } else if (s->uptodate == disks-2 && s->failed >= 2) {
++ /* Computing 2-failure is *very* expensive; only
++ * do it if failed >= 2
++ */
++ int other;
++ for (other = disks; other--; ) {
++ if (other == disk_idx)
++ continue;
++ if (!test_bit(R5_UPTODATE,
++ &sh->dev[other].flags))
++ break;
++ }
++ BUG_ON(other < 0);
++ pr_debug("Computing stripe %llu blocks %d,%d\n",
++ (unsigned long long)sh->sector,
++ disk_idx, other);
++ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
++ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
++ set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
++ set_bit(R5_Wantcompute, &sh->dev[other].flags);
++ sh->ops.target = disk_idx;
++ sh->ops.target2 = other;
++ s->uptodate += 2;
++ s->req_compute = 1;
++ return 1;
++ } else if (test_bit(R5_Insync, &dev->flags)) {
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantread, &dev->flags);
++ s->locked++;
++ pr_debug("Reading block %d (sync=%d)\n",
++ disk_idx, s->syncing);
++ }
++ }
++
++ return 0;
++}
++
++/**
++ * handle_stripe_fill - read or compute data to satisfy pending requests.
++ */
++static void handle_stripe_fill(struct stripe_head *sh,
++ struct stripe_head_state *s,
++ int disks)
++{
++ int i;
++
++ /* look for blocks to read/compute, skip this if a compute
++ * is already in flight, or if the stripe contents are in the
++ * midst of changing due to a write
++ */
++ if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
++ !sh->reconstruct_state)
++ for (i = disks; i--; )
++ if (fetch_block(sh, s, i, disks))
++ break;
++ set_bit(STRIPE_HANDLE, &sh->state);
++}
++
++/* handle_stripe_clean_event
++ * any written block on an uptodate or failed drive can be returned.
++ * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
++ * never LOCKED, so we don't need to test 'failed' directly.
++ */
++static void handle_stripe_clean_event(struct r5conf *conf,
++ struct stripe_head *sh, int disks, struct bio **return_bi)
++{
++ int i;
++ struct r5dev *dev;
++ int discard_pending = 0;
++
++ for (i = disks; i--; )
++ if (sh->dev[i].written) {
++ dev = &sh->dev[i];
++ if (!test_bit(R5_LOCKED, &dev->flags) &&
++ (test_bit(R5_UPTODATE, &dev->flags) ||
++ test_bit(R5_Discard, &dev->flags) ||
++ test_bit(R5_SkipCopy, &dev->flags))) {
++ /* We can return any write requests */
++ struct bio *wbi, *wbi2;
++ pr_debug("Return write for disc %d\n", i);
++ if (test_and_clear_bit(R5_Discard, &dev->flags))
++ clear_bit(R5_UPTODATE, &dev->flags);
++ if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
++ WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
++ dev->page = dev->orig_page;
++ }
++ wbi = dev->written;
++ dev->written = NULL;
++ while (wbi && wbi->bi_iter.bi_sector <
++ dev->sector + STRIPE_SECTORS) {
++ wbi2 = r5_next_bio(wbi, dev->sector);
++ if (!raid5_dec_bi_active_stripes(wbi)) {
++ md_write_end(conf->mddev);
++ wbi->bi_next = *return_bi;
++ *return_bi = wbi;
++ }
++ wbi = wbi2;
++ }
++ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
++ STRIPE_SECTORS,
++ !test_bit(STRIPE_DEGRADED, &sh->state),
++ 0);
++ } else if (test_bit(R5_Discard, &dev->flags))
++ discard_pending = 1;
++ WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
++ WARN_ON(dev->page != dev->orig_page);
++ }
++ if (!discard_pending &&
++ test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
++ clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
++ clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
++ if (sh->qd_idx >= 0) {
++ clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
++ clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
++ }
++ /* now that discard is done we can proceed with any sync */
++ clear_bit(STRIPE_DISCARD, &sh->state);
++ /*
++ * SCSI discard will change some bio fields and the stripe has
++ * no updated data, so remove it from hash list and the stripe
++ * will be reinitialized
++ */
++ spin_lock_irq(&conf->device_lock);
++ remove_hash(sh);
++ spin_unlock_irq(&conf->device_lock);
++ if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
++ set_bit(STRIPE_HANDLE, &sh->state);
++
++ }
++
++ if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
++ if (atomic_dec_and_test(&conf->pending_full_writes))
++ md_wakeup_thread(conf->mddev->thread);
++}
++
++static void handle_stripe_dirtying(struct r5conf *conf,
++ struct stripe_head *sh,
++ struct stripe_head_state *s,
++ int disks)
++{
++ int rmw = 0, rcw = 0, i;
++ sector_t recovery_cp = conf->mddev->recovery_cp;
++
++ /* RAID6 requires 'rcw' in current implementation.
++ * Otherwise, check whether resync is now happening or should start.
++ * If yes, then the array is dirty (after unclean shutdown or
++ * initial creation), so parity in some stripes might be inconsistent.
++ * In this case, we need to always do reconstruct-write, to ensure
++ * that in case of drive failure or read-error correction, we
++ * generate correct data from the parity.
++ */
++ if (conf->max_degraded == 2 ||
++ (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
++ s->failed == 0)) {
++ /* Calculate the real rcw later - for now make it
++ * look like rcw is cheaper
++ */
++ rcw = 1; rmw = 2;
++ pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
++ conf->max_degraded, (unsigned long long)recovery_cp,
++ (unsigned long long)sh->sector);
++ } else for (i = disks; i--; ) {
++ /* would I have to read this buffer for read_modify_write */
++ struct r5dev *dev = &sh->dev[i];
++ if ((dev->towrite || i == sh->pd_idx) &&
++ !test_bit(R5_LOCKED, &dev->flags) &&
++ !(test_bit(R5_UPTODATE, &dev->flags) ||
++ test_bit(R5_Wantcompute, &dev->flags))) {
++ if (test_bit(R5_Insync, &dev->flags))
++ rmw++;
++ else
++ rmw += 2*disks; /* cannot read it */
++ }
++ /* Would I have to read this buffer for reconstruct_write */
++ if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
++ !test_bit(R5_LOCKED, &dev->flags) &&
++ !(test_bit(R5_UPTODATE, &dev->flags) ||
++ test_bit(R5_Wantcompute, &dev->flags))) {
++ if (test_bit(R5_Insync, &dev->flags))
++ rcw++;
++ else
++ rcw += 2*disks;
++ }
++ }
++ pr_debug("for sector %llu, rmw=%d rcw=%d\n",
++ (unsigned long long)sh->sector, rmw, rcw);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ if (rmw < rcw && rmw > 0) {
++ /* prefer read-modify-write, but need to get some data */
++ if (conf->mddev->queue)
++ blk_add_trace_msg(conf->mddev->queue,
++ "raid5 rmw %llu %d",
++ (unsigned long long)sh->sector, rmw);
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if ((dev->towrite || i == sh->pd_idx) &&
++ !test_bit(R5_LOCKED, &dev->flags) &&
++ !(test_bit(R5_UPTODATE, &dev->flags) ||
++ test_bit(R5_Wantcompute, &dev->flags)) &&
++ test_bit(R5_Insync, &dev->flags)) {
++ if (test_bit(STRIPE_PREREAD_ACTIVE,
++ &sh->state)) {
++ pr_debug("Read_old block %d for r-m-w\n",
++ i);
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantread, &dev->flags);
++ s->locked++;
++ } else {
++ set_bit(STRIPE_DELAYED, &sh->state);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ }
++ }
++ }
++ }
++ if (rcw <= rmw && rcw > 0) {
++ /* want reconstruct write, but need to get some data */
++ int qread =0;
++ rcw = 0;
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (!test_bit(R5_OVERWRITE, &dev->flags) &&
++ i != sh->pd_idx && i != sh->qd_idx &&
++ !test_bit(R5_LOCKED, &dev->flags) &&
++ !(test_bit(R5_UPTODATE, &dev->flags) ||
++ test_bit(R5_Wantcompute, &dev->flags))) {
++ rcw++;
++ if (test_bit(R5_Insync, &dev->flags) &&
++ test_bit(STRIPE_PREREAD_ACTIVE,
++ &sh->state)) {
++ pr_debug("Read_old block "
++ "%d for Reconstruct\n", i);
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantread, &dev->flags);
++ s->locked++;
++ qread++;
++ } else {
++ set_bit(STRIPE_DELAYED, &sh->state);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ }
++ }
++ }
++ if (rcw && conf->mddev->queue)
++ blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
++ (unsigned long long)sh->sector,
++ rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
++ }
++
++ if (rcw > disks && rmw > disks &&
++ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ set_bit(STRIPE_DELAYED, &sh->state);
++
++ /* now if nothing is locked, and if we have enough data,
++ * we can start a write request
++ */
++ /* since handle_stripe can be called at any time we need to handle the
++ * case where a compute block operation has been submitted and then a
++ * subsequent call wants to start a write request. raid_run_ops only
++ * handles the case where compute block and reconstruct are requested
++ * simultaneously. If this is not the case then new writes need to be
++ * held off until the compute completes.
++ */
++ if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
++ (s->locked == 0 && (rcw == 0 || rmw == 0) &&
++ !test_bit(STRIPE_BIT_DELAY, &sh->state)))
++ schedule_reconstruction(sh, s, rcw == 0, 0);
++}
++
++static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
++ struct stripe_head_state *s, int disks)
++{
++ struct r5dev *dev = NULL;
++
++ set_bit(STRIPE_HANDLE, &sh->state);
++
++ switch (sh->check_state) {
++ case check_state_idle:
++ /* start a new check operation if there are no failures */
++ if (s->failed == 0) {
++ BUG_ON(s->uptodate != disks);
++ sh->check_state = check_state_run;
++ set_bit(STRIPE_OP_CHECK, &s->ops_request);
++ clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
++ s->uptodate--;
++ break;
++ }
++ dev = &sh->dev[s->failed_num[0]];
++ /* fall through */
++ case check_state_compute_result:
++ sh->check_state = check_state_idle;
++ if (!dev)
++ dev = &sh->dev[sh->pd_idx];
++
++ /* check that a write has not made the stripe insync */
++ if (test_bit(STRIPE_INSYNC, &sh->state))
++ break;
++
++ /* either failed parity check, or recovery is happening */
++ BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
++ BUG_ON(s->uptodate != disks);
++
++ set_bit(R5_LOCKED, &dev->flags);
++ s->locked++;
++ set_bit(R5_Wantwrite, &dev->flags);
++
++ clear_bit(STRIPE_DEGRADED, &sh->state);
++ set_bit(STRIPE_INSYNC, &sh->state);
++ break;
++ case check_state_run:
++ break; /* we will be called again upon completion */
++ case check_state_check_result:
++ sh->check_state = check_state_idle;
++
++ /* if a failure occurred during the check operation, leave
++ * STRIPE_INSYNC not set and let the stripe be handled again
++ */
++ if (s->failed)
++ break;
++
++ /* handle a successful check operation, if parity is correct
++ * we are done. Otherwise update the mismatch count and repair
++ * parity if !MD_RECOVERY_CHECK
++ */
++ if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
++ /* parity is correct (on disc,
++ * not in buffer any more)
++ */
++ set_bit(STRIPE_INSYNC, &sh->state);
++ else {
++ atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
++ if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
++ /* don't try to repair!! */
++ set_bit(STRIPE_INSYNC, &sh->state);
++ else {
++ sh->check_state = check_state_compute_run;
++ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
++ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
++ set_bit(R5_Wantcompute,
++ &sh->dev[sh->pd_idx].flags);
++ sh->ops.target = sh->pd_idx;
++ sh->ops.target2 = -1;
++ s->uptodate++;
++ }
++ }
++ break;
++ case check_state_compute_run:
++ break;
++ default:
++ printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
++ __func__, sh->check_state,
++ (unsigned long long) sh->sector);
++ BUG();
++ }
++}
++
++static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
++ struct stripe_head_state *s,
++ int disks)
++{
++ int pd_idx = sh->pd_idx;
++ int qd_idx = sh->qd_idx;
++ struct r5dev *dev;
++
++ set_bit(STRIPE_HANDLE, &sh->state);
++
++ BUG_ON(s->failed > 2);
++
++ /* Want to check and possibly repair P and Q.
++ * However there could be one 'failed' device, in which
++ * case we can only check one of them, possibly using the
++ * other to generate missing data
++ */
++
++ switch (sh->check_state) {
++ case check_state_idle:
++ /* start a new check operation if there are < 2 failures */
++ if (s->failed == s->q_failed) {
++ /* The only possible failed device holds Q, so it
++ * makes sense to check P (If anything else were failed,
++ * we would have used P to recreate it).
++ */
++ sh->check_state = check_state_run;
++ }
++ if (!s->q_failed && s->failed < 2) {
++ /* Q is not failed, and we didn't use it to generate
++ * anything, so it makes sense to check it
++ */
++ if (sh->check_state == check_state_run)
++ sh->check_state = check_state_run_pq;
++ else
++ sh->check_state = check_state_run_q;
++ }
++
++ /* discard potentially stale zero_sum_result */
++ sh->ops.zero_sum_result = 0;
++
++ if (sh->check_state == check_state_run) {
++ /* async_xor_zero_sum destroys the contents of P */
++ clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
++ s->uptodate--;
++ }
++ if (sh->check_state >= check_state_run &&
++ sh->check_state <= check_state_run_pq) {
++ /* async_syndrome_zero_sum preserves P and Q, so
++ * no need to mark them !uptodate here
++ */
++ set_bit(STRIPE_OP_CHECK, &s->ops_request);
++ break;
++ }
++
++ /* we have 2-disk failure */
++ BUG_ON(s->failed != 2);
++ /* fall through */
++ case check_state_compute_result:
++ sh->check_state = check_state_idle;
++
++ /* check that a write has not made the stripe insync */
++ if (test_bit(STRIPE_INSYNC, &sh->state))
++ break;
++
++ /* now write out any block on a failed drive,
++ * or P or Q if they were recomputed
++ */
++ BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
++ if (s->failed == 2) {
++ dev = &sh->dev[s->failed_num[1]];
++ s->locked++;
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantwrite, &dev->flags);
++ }
++ if (s->failed >= 1) {
++ dev = &sh->dev[s->failed_num[0]];
++ s->locked++;
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantwrite, &dev->flags);
++ }
++ if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
++ dev = &sh->dev[pd_idx];
++ s->locked++;
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantwrite, &dev->flags);
++ }
++ if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
++ dev = &sh->dev[qd_idx];
++ s->locked++;
++ set_bit(R5_LOCKED, &dev->flags);
++ set_bit(R5_Wantwrite, &dev->flags);
++ }
++ clear_bit(STRIPE_DEGRADED, &sh->state);
++
++ set_bit(STRIPE_INSYNC, &sh->state);
++ break;
++ case check_state_run:
++ case check_state_run_q:
++ case check_state_run_pq:
++ break; /* we will be called again upon completion */
++ case check_state_check_result:
++ sh->check_state = check_state_idle;
++
++ /* handle a successful check operation, if parity is correct
++ * we are done. Otherwise update the mismatch count and repair
++ * parity if !MD_RECOVERY_CHECK
++ */
++ if (sh->ops.zero_sum_result == 0) {
++ /* both parities are correct */
++ if (!s->failed)
++ set_bit(STRIPE_INSYNC, &sh->state);
++ else {
++ /* in contrast to the raid5 case we can validate
++ * parity, but still have a failure to write
++ * back
++ */
++ sh->check_state = check_state_compute_result;
++ /* Returning at this point means that we may go
++ * off and bring p and/or q uptodate again so
++ * we make sure to check zero_sum_result again
++ * to verify if p or q need writeback
++ */
++ }
++ } else {
++ atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
++ if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
++ /* don't try to repair!! */
++ set_bit(STRIPE_INSYNC, &sh->state);
++ else {
++ int *target = &sh->ops.target;
++
++ sh->ops.target = -1;
++ sh->ops.target2 = -1;
++ sh->check_state = check_state_compute_run;
++ set_bit(STRIPE_COMPUTE_RUN, &sh->state);
++ set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
++ if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
++ set_bit(R5_Wantcompute,
++ &sh->dev[pd_idx].flags);
++ *target = pd_idx;
++ target = &sh->ops.target2;
++ s->uptodate++;
++ }
++ if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
++ set_bit(R5_Wantcompute,
++ &sh->dev[qd_idx].flags);
++ *target = qd_idx;
++ s->uptodate++;
++ }
++ }
++ }
++ break;
++ case check_state_compute_run:
++ break;
++ default:
++ printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
++ __func__, sh->check_state,
++ (unsigned long long) sh->sector);
++ BUG();
++ }
++}
++
++static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
++{
++ int i;
++
++ /* We have read all the blocks in this stripe and now we need to
++ * copy some of them into a target stripe for expand.
++ */
++ struct dma_async_tx_descriptor *tx = NULL;
++ clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
++ for (i = 0; i < sh->disks; i++)
++ if (i != sh->pd_idx && i != sh->qd_idx) {
++ int dd_idx, j;
++ struct stripe_head *sh2;
++ struct async_submit_ctl submit;
++
++ sector_t bn = compute_blocknr(sh, i, 1);
++ sector_t s = raid5_compute_sector(conf, bn, 0,
++ &dd_idx, NULL);
++ sh2 = get_active_stripe(conf, s, 0, 1, 1);
++ if (sh2 == NULL)
++ /* so far only the early blocks of this stripe
++ * have been requested. When later blocks
++ * get requested, we will try again
++ */
++ continue;
++ if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
++ test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
++ /* must have already done this block */
++ release_stripe(sh2);
++ continue;
++ }
++
++ /* place all the copies on one channel */
++ init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
++ tx = async_memcpy(sh2->dev[dd_idx].page,
++ sh->dev[i].page, 0, 0, STRIPE_SIZE,
++ &submit);
++
++ set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
++ set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
++ for (j = 0; j < conf->raid_disks; j++)
++ if (j != sh2->pd_idx &&
++ j != sh2->qd_idx &&
++ !test_bit(R5_Expanded, &sh2->dev[j].flags))
++ break;
++ if (j == conf->raid_disks) {
++ set_bit(STRIPE_EXPAND_READY, &sh2->state);
++ set_bit(STRIPE_HANDLE, &sh2->state);
++ }
++ release_stripe(sh2);
++
++ }
++ /* done submitting copies, wait for them to complete */
++ async_tx_quiesce(&tx);
++}
++
++/*
++ * handle_stripe - do things to a stripe.
++ *
++ * We lock the stripe by setting STRIPE_ACTIVE and then examine the
++ * state of various bits to see what needs to be done.
++ * Possible results:
++ * return some read requests which now have data
++ * return some write requests which are safely on storage
++ * schedule a read on some buffers
++ * schedule a write of some buffers
++ * return confirmation of parity correctness
++ *
++ */
++
++static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
++{
++ struct r5conf *conf = sh->raid_conf;
++ int disks = sh->disks;
++ struct r5dev *dev;
++ int i;
++ int do_recovery = 0;
++
++ memset(s, 0, sizeof(*s));
++
++ s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
++ s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
++ s->failed_num[0] = -1;
++ s->failed_num[1] = -1;
++
++ /* Now to look around and see what can be done */
++ rcu_read_lock();
++ for (i=disks; i--; ) {
++ struct md_rdev *rdev;
++ sector_t first_bad;
++ int bad_sectors;
++ int is_bad = 0;
++
++ dev = &sh->dev[i];
++
++ pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
++ i, dev->flags,
++ dev->toread, dev->towrite, dev->written);
++ /* maybe we can reply to a read
++ *
++ * new wantfill requests are only permitted while
++ * ops_complete_biofill is guaranteed to be inactive
++ */
++ if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
++ !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
++ set_bit(R5_Wantfill, &dev->flags);
++
++ /* now count some things */
++ if (test_bit(R5_LOCKED, &dev->flags))
++ s->locked++;
++ if (test_bit(R5_UPTODATE, &dev->flags))
++ s->uptodate++;
++ if (test_bit(R5_Wantcompute, &dev->flags)) {
++ s->compute++;
++ BUG_ON(s->compute > 2);
++ }
++
++ if (test_bit(R5_Wantfill, &dev->flags))
++ s->to_fill++;
++ else if (dev->toread)
++ s->to_read++;
++ if (dev->towrite) {
++ s->to_write++;
++ if (!test_bit(R5_OVERWRITE, &dev->flags))
++ s->non_overwrite++;
++ }
++ if (dev->written)
++ s->written++;
++ /* Prefer to use the replacement for reads, but only
++ * if it is recovered enough and has no bad blocks.
++ */
++ rdev = rcu_dereference(conf->disks[i].replacement);
++ if (rdev && !test_bit(Faulty, &rdev->flags) &&
++ rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
++ !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
++ &first_bad, &bad_sectors))
++ set_bit(R5_ReadRepl, &dev->flags);
++ else {
++ if (rdev)
++ set_bit(R5_NeedReplace, &dev->flags);
++ rdev = rcu_dereference(conf->disks[i].rdev);
++ clear_bit(R5_ReadRepl, &dev->flags);
++ }
++ if (rdev && test_bit(Faulty, &rdev->flags))
++ rdev = NULL;
++ if (rdev) {
++ is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
++ &first_bad, &bad_sectors);
++ if (s->blocked_rdev == NULL
++ && (test_bit(Blocked, &rdev->flags)
++ || is_bad < 0)) {
++ if (is_bad < 0)
++ set_bit(BlockedBadBlocks,
++ &rdev->flags);
++ s->blocked_rdev = rdev;
++ atomic_inc(&rdev->nr_pending);
++ }
++ }
++ clear_bit(R5_Insync, &dev->flags);
++ if (!rdev)
++ /* Not in-sync */;
++ else if (is_bad) {
++ /* also not in-sync */
++ if (!test_bit(WriteErrorSeen, &rdev->flags) &&
++ test_bit(R5_UPTODATE, &dev->flags)) {
++ /* treat as in-sync, but with a read error
++ * which we can now try to correct
++ */
++ set_bit(R5_Insync, &dev->flags);
++ set_bit(R5_ReadError, &dev->flags);
++ }
++ } else if (test_bit(In_sync, &rdev->flags))
++ set_bit(R5_Insync, &dev->flags);
++ else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
++ /* in sync if before recovery_offset */
++ set_bit(R5_Insync, &dev->flags);
++ else if (test_bit(R5_UPTODATE, &dev->flags) &&
++ test_bit(R5_Expanded, &dev->flags))
++ /* If we've reshaped into here, we assume it is Insync.
++ * We will shortly update recovery_offset to make
++ * it official.
++ */
++ set_bit(R5_Insync, &dev->flags);
++
++ if (test_bit(R5_WriteError, &dev->flags)) {
++ /* This flag does not apply to '.replacement'
++ * only to .rdev, so make sure to check that*/
++ struct md_rdev *rdev2 = rcu_dereference(
++ conf->disks[i].rdev);
++ if (rdev2 == rdev)
++ clear_bit(R5_Insync, &dev->flags);
++ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
++ s->handle_bad_blocks = 1;
++ atomic_inc(&rdev2->nr_pending);
++ } else
++ clear_bit(R5_WriteError, &dev->flags);
++ }
++ if (test_bit(R5_MadeGood, &dev->flags)) {
++ /* This flag does not apply to '.replacement'
++ * only to .rdev, so make sure to check that*/
++ struct md_rdev *rdev2 = rcu_dereference(
++ conf->disks[i].rdev);
++ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
++ s->handle_bad_blocks = 1;
++ atomic_inc(&rdev2->nr_pending);
++ } else
++ clear_bit(R5_MadeGood, &dev->flags);
++ }
++ if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
++ struct md_rdev *rdev2 = rcu_dereference(
++ conf->disks[i].replacement);
++ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
++ s->handle_bad_blocks = 1;
++ atomic_inc(&rdev2->nr_pending);
++ } else
++ clear_bit(R5_MadeGoodRepl, &dev->flags);
++ }
++ if (!test_bit(R5_Insync, &dev->flags)) {
++ /* The ReadError flag will just be confusing now */
++ clear_bit(R5_ReadError, &dev->flags);
++ clear_bit(R5_ReWrite, &dev->flags);
++ }
++ if (test_bit(R5_ReadError, &dev->flags))
++ clear_bit(R5_Insync, &dev->flags);
++ if (!test_bit(R5_Insync, &dev->flags)) {
++ if (s->failed < 2)
++ s->failed_num[s->failed] = i;
++ s->failed++;
++ if (rdev && !test_bit(Faulty, &rdev->flags))
++ do_recovery = 1;
++ }
++ }
++ if (test_bit(STRIPE_SYNCING, &sh->state)) {
++ /* If there is a failed device being replaced,
++ * we must be recovering.
++ * else if we are after recovery_cp, we must be syncing
++ * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
++ * else we can only be replacing
++ * sync and recovery both need to read all devices, and so
++ * use the same flag.
++ */
++ if (do_recovery ||
++ sh->sector >= conf->mddev->recovery_cp ||
++ test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
++ s->syncing = 1;
++ else
++ s->replacing = 1;
++ }
++ rcu_read_unlock();
++}
++
++static void handle_stripe(struct stripe_head *sh)
++{
++ struct stripe_head_state s;
++ struct r5conf *conf = sh->raid_conf;
++ int i;
++ int prexor;
++ int disks = sh->disks;
++ struct r5dev *pdev, *qdev;
++
++ clear_bit(STRIPE_HANDLE, &sh->state);
++ if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
++ /* already being handled, ensure it gets handled
++ * again when current action finishes */
++ set_bit(STRIPE_HANDLE, &sh->state);
++ return;
++ }
++
++ if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
++ spin_lock(&sh->stripe_lock);
++ /* Cannot process 'sync' concurrently with 'discard' */
++ if (!test_bit(STRIPE_DISCARD, &sh->state) &&
++ test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
++ set_bit(STRIPE_SYNCING, &sh->state);
++ clear_bit(STRIPE_INSYNC, &sh->state);
++ clear_bit(STRIPE_REPLACED, &sh->state);
++ }
++ spin_unlock(&sh->stripe_lock);
++ }
++ clear_bit(STRIPE_DELAYED, &sh->state);
++
++ pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
++ "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
++ (unsigned long long)sh->sector, sh->state,
++ atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
++ sh->check_state, sh->reconstruct_state);
++
++ analyse_stripe(sh, &s);
++
++ if (s.handle_bad_blocks) {
++ set_bit(STRIPE_HANDLE, &sh->state);
++ goto finish;
++ }
++
++ if (unlikely(s.blocked_rdev)) {
++ if (s.syncing || s.expanding || s.expanded ||
++ s.replacing || s.to_write || s.written) {
++ set_bit(STRIPE_HANDLE, &sh->state);
++ goto finish;
++ }
++ /* There is nothing for the blocked_rdev to block */
++ rdev_dec_pending(s.blocked_rdev, conf->mddev);
++ s.blocked_rdev = NULL;
++ }
++
++ if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
++ set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
++ set_bit(STRIPE_BIOFILL_RUN, &sh->state);
++ }
++
++ pr_debug("locked=%d uptodate=%d to_read=%d"
++ " to_write=%d failed=%d failed_num=%d,%d\n",
++ s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
++ s.failed_num[0], s.failed_num[1]);
++ /* check if the array has lost more than max_degraded devices and,
++ * if so, some requests might need to be failed.
++ */
++ if (s.failed > conf->max_degraded) {
++ sh->check_state = 0;
++ sh->reconstruct_state = 0;
++ if (s.to_read+s.to_write+s.written)
++ handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
++ if (s.syncing + s.replacing)
++ handle_failed_sync(conf, sh, &s);
++ }
++
++ /* Now we check to see if any write operations have recently
++ * completed
++ */
++ prexor = 0;
++ if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
++ prexor = 1;
++ if (sh->reconstruct_state == reconstruct_state_drain_result ||
++ sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
++ sh->reconstruct_state = reconstruct_state_idle;
++
++ /* All the 'written' buffers and the parity block are ready to
++ * be written back to disk
++ */
++ BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
++ !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
++ BUG_ON(sh->qd_idx >= 0 &&
++ !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
++ !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
++ for (i = disks; i--; ) {
++ struct r5dev *dev = &sh->dev[i];
++ if (test_bit(R5_LOCKED, &dev->flags) &&
++ (i == sh->pd_idx || i == sh->qd_idx ||
++ dev->written)) {
++ pr_debug("Writing block %d\n", i);
++ set_bit(R5_Wantwrite, &dev->flags);
++ if (prexor)
++ continue;
++ if (s.failed > 1)
++ continue;
++ if (!test_bit(R5_Insync, &dev->flags) ||
++ ((i == sh->pd_idx || i == sh->qd_idx) &&
++ s.failed == 0))
++ set_bit(STRIPE_INSYNC, &sh->state);
++ }
++ }
++ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ s.dec_preread_active = 1;
++ }
++
++ /*
++ * might be able to return some write requests if the parity blocks
++ * are safe, or on a failed drive
++ */
++ pdev = &sh->dev[sh->pd_idx];
++ s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
++ || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
++ qdev = &sh->dev[sh->qd_idx];
++ s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
++ || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
++ || conf->level < 6;
++
++ if (s.written &&
++ (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
++ && !test_bit(R5_LOCKED, &pdev->flags)
++ && (test_bit(R5_UPTODATE, &pdev->flags) ||
++ test_bit(R5_Discard, &pdev->flags))))) &&
++ (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
++ && !test_bit(R5_LOCKED, &qdev->flags)
++ && (test_bit(R5_UPTODATE, &qdev->flags) ||
++ test_bit(R5_Discard, &qdev->flags))))))
++ handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
++
++ /* Now we might consider reading some blocks, either to check/generate
++ * parity, or to satisfy requests
++ * or to load a block that is being partially written.
++ */
++ if (s.to_read || s.non_overwrite
++ || (conf->level == 6 && s.to_write && s.failed)
++ || (s.syncing && (s.uptodate + s.compute < disks))
++ || s.replacing
++ || s.expanding)
++ handle_stripe_fill(sh, &s, disks);
++
++ /* Now to consider new write requests and what else, if anything
++ * should be read. We do not handle new writes when:
++ * 1/ A 'write' operation (copy+xor) is already in flight.
++ * 2/ A 'check' operation is in flight, as it may clobber the parity
++ * block.
++ */
++ if (s.to_write && !sh->reconstruct_state && !sh->check_state)
++ handle_stripe_dirtying(conf, sh, &s, disks);
++
++ /* maybe we need to check and possibly fix the parity for this stripe
++ * Any reads will already have been scheduled, so we just see if enough
++ * data is available. The parity check is held off while parity
++ * dependent operations are in flight.
++ */
++ if (sh->check_state ||
++ (s.syncing && s.locked == 0 &&
++ !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
++ !test_bit(STRIPE_INSYNC, &sh->state))) {
++ if (conf->level == 6)
++ handle_parity_checks6(conf, sh, &s, disks);
++ else
++ handle_parity_checks5(conf, sh, &s, disks);
++ }
++
++ if ((s.replacing || s.syncing) && s.locked == 0
++ && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
++ && !test_bit(STRIPE_REPLACED, &sh->state)) {
++ /* Write out to replacement devices where possible */
++ for (i = 0; i < conf->raid_disks; i++)
++ if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
++ WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
++ set_bit(R5_WantReplace, &sh->dev[i].flags);
++ set_bit(R5_LOCKED, &sh->dev[i].flags);
++ s.locked++;
++ }
++ if (s.replacing)
++ set_bit(STRIPE_INSYNC, &sh->state);
++ set_bit(STRIPE_REPLACED, &sh->state);
++ }
++ if ((s.syncing || s.replacing) && s.locked == 0 &&
++ !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
++ test_bit(STRIPE_INSYNC, &sh->state)) {
++ md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
++ clear_bit(STRIPE_SYNCING, &sh->state);
++ if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
++ wake_up(&conf->wait_for_overlap);
++ }
++
++ /* If the failed drives are just a ReadError, then we might need
++ * to progress the repair/check process
++ */
++ if (s.failed <= conf->max_degraded && !conf->mddev->ro)
++ for (i = 0; i < s.failed; i++) {
++ struct r5dev *dev = &sh->dev[s.failed_num[i]];
++ if (test_bit(R5_ReadError, &dev->flags)
++ && !test_bit(R5_LOCKED, &dev->flags)
++ && test_bit(R5_UPTODATE, &dev->flags)
++ ) {
++ if (!test_bit(R5_ReWrite, &dev->flags)) {
++ set_bit(R5_Wantwrite, &dev->flags);
++ set_bit(R5_ReWrite, &dev->flags);
++ set_bit(R5_LOCKED, &dev->flags);
++ s.locked++;
++ } else {
++ /* let's read it back */
++ set_bit(R5_Wantread, &dev->flags);
++ set_bit(R5_LOCKED, &dev->flags);
++ s.locked++;
++ }
++ }
++ }
++
++ /* Finish reconstruct operations initiated by the expansion process */
++ if (sh->reconstruct_state == reconstruct_state_result) {
++ struct stripe_head *sh_src
++ = get_active_stripe(conf, sh->sector, 1, 1, 1);
++ if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
++ /* sh cannot be written until sh_src has been read.
++ * so arrange for sh to be delayed a little
++ */
++ set_bit(STRIPE_DELAYED, &sh->state);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
++ &sh_src->state))
++ atomic_inc(&conf->preread_active_stripes);
++ release_stripe(sh_src);
++ goto finish;
++ }
++ if (sh_src)
++ release_stripe(sh_src);
++
++ sh->reconstruct_state = reconstruct_state_idle;
++ clear_bit(STRIPE_EXPANDING, &sh->state);
++ for (i = conf->raid_disks; i--; ) {
++ set_bit(R5_Wantwrite, &sh->dev[i].flags);
++ set_bit(R5_LOCKED, &sh->dev[i].flags);
++ s.locked++;
++ }
++ }
++
++ if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
++ !sh->reconstruct_state) {
++ /* Need to write out all blocks after computing parity */
++ sh->disks = conf->raid_disks;
++ stripe_set_idx(sh->sector, conf, 0, sh);
++ schedule_reconstruction(sh, &s, 1, 1);
++ } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
++ clear_bit(STRIPE_EXPAND_READY, &sh->state);
++ atomic_dec(&conf->reshape_stripes);
++ wake_up(&conf->wait_for_overlap);
++ md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
++ }
++
++ if (s.expanding && s.locked == 0 &&
++ !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
++ handle_stripe_expansion(conf, sh);
++
++finish:
++ /* wait for this device to become unblocked */
++ if (unlikely(s.blocked_rdev)) {
++ if (conf->mddev->external)
++ md_wait_for_blocked_rdev(s.blocked_rdev,
++ conf->mddev);
++ else
++ /* Internal metadata will immediately
++ * be written by raid5d, so we don't
++ * need to wait here.
++ */
++ rdev_dec_pending(s.blocked_rdev,
++ conf->mddev);
++ }
++
++ if (s.handle_bad_blocks)
++ for (i = disks; i--; ) {
++ struct md_rdev *rdev;
++ struct r5dev *dev = &sh->dev[i];
++ if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
++ /* We own a safe reference to the rdev */
++ rdev = conf->disks[i].rdev;
++ if (!rdev_set_badblocks(rdev, sh->sector,
++ STRIPE_SECTORS, 0))
++ md_error(conf->mddev, rdev);
++ rdev_dec_pending(rdev, conf->mddev);
++ }
++ if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
++ rdev = conf->disks[i].rdev;
++ rdev_clear_badblocks(rdev, sh->sector,
++ STRIPE_SECTORS, 0);
++ rdev_dec_pending(rdev, conf->mddev);
++ }
++ if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
++ rdev = conf->disks[i].replacement;
++ if (!rdev)
++ /* rdev have been moved down */
++ rdev = conf->disks[i].rdev;
++ rdev_clear_badblocks(rdev, sh->sector,
++ STRIPE_SECTORS, 0);
++ rdev_dec_pending(rdev, conf->mddev);
++ }
++ }
++
++ if (s.ops_request)
++ raid_run_ops(sh, s.ops_request);
++
++ ops_run_io(sh, &s);
++
++ if (s.dec_preread_active) {
++ /* We delay this until after ops_run_io so that if make_request
++ * is waiting on a flush, it won't continue until the writes
++ * have actually been submitted.
++ */
++ atomic_dec(&conf->preread_active_stripes);
++ if (atomic_read(&conf->preread_active_stripes) <
++ IO_THRESHOLD)
++ md_wakeup_thread(conf->mddev->thread);
++ }
++
++ return_io(s.return_bi);
++
++ clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
++}
++
++static void raid5_activate_delayed(struct r5conf *conf)
++{
++ if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
++ while (!list_empty(&conf->delayed_list)) {
++ struct list_head *l = conf->delayed_list.next;
++ struct stripe_head *sh;
++ sh = list_entry(l, struct stripe_head, lru);
++ list_del_init(l);
++ clear_bit(STRIPE_DELAYED, &sh->state);
++ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ atomic_inc(&conf->preread_active_stripes);
++ list_add_tail(&sh->lru, &conf->hold_list);
++ raid5_wakeup_stripe_thread(sh);
++ }
++ }
++}
++
++static void activate_bit_delay(struct r5conf *conf,
++ struct list_head *temp_inactive_list)
++{
++ /* device_lock is held */
++ struct list_head head;
++ list_add(&head, &conf->bitmap_list);
++ list_del_init(&conf->bitmap_list);
++ while (!list_empty(&head)) {
++ struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
++ int hash;
++ list_del_init(&sh->lru);
++ atomic_inc(&sh->count);
++ hash = sh->hash_lock_index;
++ __release_stripe(conf, sh, &temp_inactive_list[hash]);
++ }
++}
++
++int md_raid5_congested(struct mddev *mddev, int bits)
++{
++ struct r5conf *conf = mddev->private;
++
++ /* No difference between reads and writes. Just check
++ * how busy the stripe_cache is
++ */
++
++ if (conf->inactive_blocked)
++ return 1;
++ if (conf->quiesce)
++ return 1;
++ if (atomic_read(&conf->empty_inactive_list_nr))
++ return 1;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(md_raid5_congested);
++
++static int raid5_congested(void *data, int bits)
++{
++ struct mddev *mddev = data;
++
++ return mddev_congested(mddev, bits) ||
++ md_raid5_congested(mddev, bits);
++}
++
++/* We want read requests to align with chunks where possible,
++ * but write requests don't need to.
++ */
++static int raid5_mergeable_bvec(struct request_queue *q,
++ struct bvec_merge_data *bvm,
++ struct bio_vec *biovec)
++{
++ struct mddev *mddev = q->queuedata;
++ sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
++ int max;
++ unsigned int chunk_sectors = mddev->chunk_sectors;
++ unsigned int bio_sectors = bvm->bi_size >> 9;
++
++ if ((bvm->bi_rw & 1) == WRITE)
++ return biovec->bv_len; /* always allow writes to be mergeable */
++
++ if (mddev->new_chunk_sectors < mddev->chunk_sectors)
++ chunk_sectors = mddev->new_chunk_sectors;
++ max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
++ if (max < 0) max = 0;
++ if (max <= biovec->bv_len && bio_sectors == 0)
++ return biovec->bv_len;
++ else
++ return max;
++}
++
++static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
++{
++ sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
++ unsigned int chunk_sectors = mddev->chunk_sectors;
++ unsigned int bio_sectors = bio_sectors(bio);
++
++ if (mddev->new_chunk_sectors < mddev->chunk_sectors)
++ chunk_sectors = mddev->new_chunk_sectors;
++ return chunk_sectors >=
++ ((sector & (chunk_sectors - 1)) + bio_sectors);
++}
++
++/*
++ * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
++ * later sampled by raid5d.
++ */
++static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&conf->device_lock, flags);
++
++ bi->bi_next = conf->retry_read_aligned_list;
++ conf->retry_read_aligned_list = bi;
++
++ spin_unlock_irqrestore(&conf->device_lock, flags);
++ md_wakeup_thread(conf->mddev->thread);
++}
++
++static struct bio *remove_bio_from_retry(struct r5conf *conf)
++{
++ struct bio *bi;
++
++ bi = conf->retry_read_aligned;
++ if (bi) {
++ conf->retry_read_aligned = NULL;
++ return bi;
++ }
++ bi = conf->retry_read_aligned_list;
++ if(bi) {
++ conf->retry_read_aligned_list = bi->bi_next;
++ bi->bi_next = NULL;
++ /*
++ * this sets the active strip count to 1 and the processed
++ * strip count to zero (upper 8 bits)
++ */
++ raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
++ }
++
++ return bi;
++}
++
++/*
++ * The "raid5_align_endio" should check if the read succeeded and if it
++ * did, call bio_endio on the original bio (having bio_put the new bio
++ * first).
++ * If the read failed..
++ */
++static void raid5_align_endio(struct bio *bi, int error)
++{
++ struct bio* raid_bi = bi->bi_private;
++ struct mddev *mddev;
++ struct r5conf *conf;
++ int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
++ struct md_rdev *rdev;
++
++ bio_put(bi);
++
++ rdev = (void*)raid_bi->bi_next;
++ raid_bi->bi_next = NULL;
++ mddev = rdev->mddev;
++ conf = mddev->private;
++
++ rdev_dec_pending(rdev, conf->mddev);
++
++ if (!error && uptodate) {
++ trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
++ raid_bi, 0);
++ bio_endio(raid_bi, 0);
++ if (atomic_dec_and_test(&conf->active_aligned_reads))
++ wake_up(&conf->wait_for_stripe);
++ return;
++ }
++
++ pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
++
++ add_bio_to_retry(raid_bi, conf);
++}
++
++static int bio_fits_rdev(struct bio *bi)
++{
++ struct request_queue *q = bdev_get_queue(bi->bi_bdev);
++
++ if (bio_sectors(bi) > queue_max_sectors(q))
++ return 0;
++ blk_recount_segments(q, bi);
++ if (bi->bi_phys_segments > queue_max_segments(q))
++ return 0;
++
++ if (q->merge_bvec_fn)
++ /* it's too hard to apply the merge_bvec_fn at this stage,
++ * just just give up
++ */
++ return 0;
++
++ return 1;
++}
++
++static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
++{
++ struct r5conf *conf = mddev->private;
++ int dd_idx;
++ struct bio* align_bi;
++ struct md_rdev *rdev;
++ sector_t end_sector;
++
++ if (!in_chunk_boundary(mddev, raid_bio)) {
++ pr_debug("chunk_aligned_read : non aligned\n");
++ return 0;
++ }
++ /*
++ * use bio_clone_mddev to make a copy of the bio
++ */
++ align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
++ if (!align_bi)
++ return 0;
++ /*
++ * set bi_end_io to a new function, and set bi_private to the
++ * original bio.
++ */
++ align_bi->bi_end_io = raid5_align_endio;
++ align_bi->bi_private = raid_bio;
++ /*
++ * compute position
++ */
++ align_bi->bi_iter.bi_sector =
++ raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
++ 0, &dd_idx, NULL);
++
++ end_sector = bio_end_sector(align_bi);
++ rcu_read_lock();
++ rdev = rcu_dereference(conf->disks[dd_idx].replacement);
++ if (!rdev || test_bit(Faulty, &rdev->flags) ||
++ rdev->recovery_offset < end_sector) {
++ rdev = rcu_dereference(conf->disks[dd_idx].rdev);
++ if (rdev &&
++ (test_bit(Faulty, &rdev->flags) ||
++ !(test_bit(In_sync, &rdev->flags) ||
++ rdev->recovery_offset >= end_sector)))
++ rdev = NULL;
++ }
++ if (rdev) {
++ sector_t first_bad;
++ int bad_sectors;
++
++ atomic_inc(&rdev->nr_pending);
++ rcu_read_unlock();
++ raid_bio->bi_next = (void*)rdev;
++ align_bi->bi_bdev = rdev->bdev;
++ __clear_bit(BIO_SEG_VALID, &align_bi->bi_flags);
++
++ if (!bio_fits_rdev(align_bi) ||
++ is_badblock(rdev, align_bi->bi_iter.bi_sector,
++ bio_sectors(align_bi),
++ &first_bad, &bad_sectors)) {
++ /* too big in some way, or has a known bad block */
++ bio_put(align_bi);
++ rdev_dec_pending(rdev, mddev);
++ return 0;
++ }
++
++ /* No reshape active, so we can trust rdev->data_offset */
++ align_bi->bi_iter.bi_sector += rdev->data_offset;
++
++ spin_lock_irq(&conf->device_lock);
++ wait_event_lock_irq(conf->wait_for_stripe,
++ conf->quiesce == 0,
++ conf->device_lock);
++ atomic_inc(&conf->active_aligned_reads);
++ spin_unlock_irq(&conf->device_lock);
++
++ if (mddev->gendisk)
++ trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
++ align_bi, disk_devt(mddev->gendisk),
++ raid_bio->bi_iter.bi_sector);
++ generic_make_request(align_bi);
++ return 1;
++ } else {
++ rcu_read_unlock();
++ bio_put(align_bi);
++ return 0;
++ }
++}
++
++/* __get_priority_stripe - get the next stripe to process
++ *
++ * Full stripe writes are allowed to pass preread active stripes up until
++ * the bypass_threshold is exceeded. In general the bypass_count
++ * increments when the handle_list is handled before the hold_list; however, it
++ * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
++ * stripe with in flight i/o. The bypass_count will be reset when the
++ * head of the hold_list has changed, i.e. the head was promoted to the
++ * handle_list.
++ */
++static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
++{
++ struct stripe_head *sh = NULL, *tmp;
++ struct list_head *handle_list = NULL;
++ struct r5worker_group *wg = NULL;
++
++ if (conf->worker_cnt_per_group == 0) {
++ handle_list = &conf->handle_list;
++ } else if (group != ANY_GROUP) {
++ handle_list = &conf->worker_groups[group].handle_list;
++ wg = &conf->worker_groups[group];
++ } else {
++ int i;
++ for (i = 0; i < conf->group_cnt; i++) {
++ handle_list = &conf->worker_groups[i].handle_list;
++ wg = &conf->worker_groups[i];
++ if (!list_empty(handle_list))
++ break;
++ }
++ }
++
++ pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
++ __func__,
++ list_empty(handle_list) ? "empty" : "busy",
++ list_empty(&conf->hold_list) ? "empty" : "busy",
++ atomic_read(&conf->pending_full_writes), conf->bypass_count);
++
++ if (!list_empty(handle_list)) {
++ sh = list_entry(handle_list->next, typeof(*sh), lru);
++
++ if (list_empty(&conf->hold_list))
++ conf->bypass_count = 0;
++ else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
++ if (conf->hold_list.next == conf->last_hold)
++ conf->bypass_count++;
++ else {
++ conf->last_hold = conf->hold_list.next;
++ conf->bypass_count -= conf->bypass_threshold;
++ if (conf->bypass_count < 0)
++ conf->bypass_count = 0;
++ }
++ }
++ } else if (!list_empty(&conf->hold_list) &&
++ ((conf->bypass_threshold &&
++ conf->bypass_count > conf->bypass_threshold) ||
++ atomic_read(&conf->pending_full_writes) == 0)) {
++
++ list_for_each_entry(tmp, &conf->hold_list, lru) {
++ if (conf->worker_cnt_per_group == 0 ||
++ group == ANY_GROUP ||
++ !cpu_online(tmp->cpu) ||
++ cpu_to_group(tmp->cpu) == group) {
++ sh = tmp;
++ break;
++ }
++ }
++
++ if (sh) {
++ conf->bypass_count -= conf->bypass_threshold;
++ if (conf->bypass_count < 0)
++ conf->bypass_count = 0;
++ }
++ wg = NULL;
++ }
++
++ if (!sh)
++ return NULL;
++
++ if (wg) {
++ wg->stripes_cnt--;
++ sh->group = NULL;
++ }
++ list_del_init(&sh->lru);
++ BUG_ON(atomic_inc_return(&sh->count) != 1);
++ return sh;
++}
++
++struct raid5_plug_cb {
++ struct blk_plug_cb cb;
++ struct list_head list;
++ struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
++};
++
++static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
++{
++ struct raid5_plug_cb *cb = container_of(
++ blk_cb, struct raid5_plug_cb, cb);
++ struct stripe_head *sh;
++ struct mddev *mddev = cb->cb.data;
++ struct r5conf *conf = mddev->private;
++ int cnt = 0;
++ int hash;
++
++ if (cb->list.next && !list_empty(&cb->list)) {
++ spin_lock_irq(&conf->device_lock);
++ while (!list_empty(&cb->list)) {
++ sh = list_first_entry(&cb->list, struct stripe_head, lru);
++ list_del_init(&sh->lru);
++ /*
++ * avoid race release_stripe_plug() sees
++ * STRIPE_ON_UNPLUG_LIST clear but the stripe
++ * is still in our list
++ */
++ smp_mb__before_atomic();
++ clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
++ /*
++ * STRIPE_ON_RELEASE_LIST could be set here. In that
++ * case, the count is always > 1 here
++ */
++ hash = sh->hash_lock_index;
++ __release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
++ cnt++;
++ }
++ spin_unlock_irq(&conf->device_lock);
++ }
++ release_inactive_stripe_list(conf, cb->temp_inactive_list,
++ NR_STRIPE_HASH_LOCKS);
++ if (mddev->queue)
++ trace_block_unplug(mddev->queue, cnt, !from_schedule);
++ kfree(cb);
++}
++
++static void release_stripe_plug(struct mddev *mddev,
++ struct stripe_head *sh)
++{
++ struct blk_plug_cb *blk_cb = blk_check_plugged(
++ raid5_unplug, mddev,
++ sizeof(struct raid5_plug_cb));
++ struct raid5_plug_cb *cb;
++
++ if (!blk_cb) {
++ release_stripe(sh);
++ return;
++ }
++
++ cb = container_of(blk_cb, struct raid5_plug_cb, cb);
++
++ if (cb->list.next == NULL) {
++ int i;
++ INIT_LIST_HEAD(&cb->list);
++ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
++ INIT_LIST_HEAD(cb->temp_inactive_list + i);
++ }
++
++ if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
++ list_add_tail(&sh->lru, &cb->list);
++ else
++ release_stripe(sh);
++}
++
++static void make_discard_request(struct mddev *mddev, struct bio *bi)
++{
++ struct r5conf *conf = mddev->private;
++ sector_t logical_sector, last_sector;
++ struct stripe_head *sh;
++ int remaining;
++ int stripe_sectors;
++
++ if (mddev->reshape_position != MaxSector)
++ /* Skip discard while reshape is happening */
++ return;
++
++ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
++ last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
++
++ bi->bi_next = NULL;
++ bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
++
++ stripe_sectors = conf->chunk_sectors *
++ (conf->raid_disks - conf->max_degraded);
++ logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
++ stripe_sectors);
++ sector_div(last_sector, stripe_sectors);
++
++ logical_sector *= conf->chunk_sectors;
++ last_sector *= conf->chunk_sectors;
++
++ for (; logical_sector < last_sector;
++ logical_sector += STRIPE_SECTORS) {
++ DEFINE_WAIT(w);
++ int d;
++ again:
++ sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
++ prepare_to_wait(&conf->wait_for_overlap, &w,
++ TASK_UNINTERRUPTIBLE);
++ set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
++ if (test_bit(STRIPE_SYNCING, &sh->state)) {
++ release_stripe(sh);
++ schedule();
++ goto again;
++ }
++ clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
++ spin_lock_irq(&sh->stripe_lock);
++ for (d = 0; d < conf->raid_disks; d++) {
++ if (d == sh->pd_idx || d == sh->qd_idx)
++ continue;
++ if (sh->dev[d].towrite || sh->dev[d].toread) {
++ set_bit(R5_Overlap, &sh->dev[d].flags);
++ spin_unlock_irq(&sh->stripe_lock);
++ release_stripe(sh);
++ schedule();
++ goto again;
++ }
++ }
++ set_bit(STRIPE_DISCARD, &sh->state);
++ finish_wait(&conf->wait_for_overlap, &w);
++ for (d = 0; d < conf->raid_disks; d++) {
++ if (d == sh->pd_idx || d == sh->qd_idx)
++ continue;
++ sh->dev[d].towrite = bi;
++ set_bit(R5_OVERWRITE, &sh->dev[d].flags);
++ raid5_inc_bi_active_stripes(bi);
++ }
++ spin_unlock_irq(&sh->stripe_lock);
++ if (conf->mddev->bitmap) {
++ for (d = 0;
++ d < conf->raid_disks - conf->max_degraded;
++ d++)
++ bitmap_startwrite(mddev->bitmap,
++ sh->sector,
++ STRIPE_SECTORS,
++ 0);
++ sh->bm_seq = conf->seq_flush + 1;
++ set_bit(STRIPE_BIT_DELAY, &sh->state);
++ }
++
++ set_bit(STRIPE_HANDLE, &sh->state);
++ clear_bit(STRIPE_DELAYED, &sh->state);
++ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ atomic_inc(&conf->preread_active_stripes);
++ release_stripe_plug(mddev, sh);
++ }
++
++ remaining = raid5_dec_bi_active_stripes(bi);
++ if (remaining == 0) {
++ md_write_end(mddev);
++ bio_endio(bi, 0);
++ }
++}
++
++static void make_request(struct mddev *mddev, struct bio * bi)
++{
++ struct r5conf *conf = mddev->private;
++ int dd_idx;
++ sector_t new_sector;
++ sector_t logical_sector, last_sector;
++ struct stripe_head *sh;
++ const int rw = bio_data_dir(bi);
++ int remaining;
++ DEFINE_WAIT(w);
++ bool do_prepare;
++
++ if (unlikely(bi->bi_rw & REQ_FLUSH)) {
++ md_flush_request(mddev, bi);
++ return;
++ }
++
++ md_write_start(mddev, bi);
++
++ if (rw == READ &&
++ mddev->reshape_position == MaxSector &&
++ chunk_aligned_read(mddev,bi))
++ return;
++
++ if (unlikely(bi->bi_rw & REQ_DISCARD)) {
++ make_discard_request(mddev, bi);
++ return;
++ }
++
++ logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
++ last_sector = bio_end_sector(bi);
++ bi->bi_next = NULL;
++ bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
++
++ prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
++ for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
++ int previous;
++ int seq;
++
++ do_prepare = false;
++ retry:
++ seq = read_seqcount_begin(&conf->gen_lock);
++ previous = 0;
++ if (do_prepare)
++ prepare_to_wait(&conf->wait_for_overlap, &w,
++ TASK_UNINTERRUPTIBLE);
++ if (unlikely(conf->reshape_progress != MaxSector)) {
++ /* spinlock is needed as reshape_progress may be
++ * 64bit on a 32bit platform, and so it might be
++ * possible to see a half-updated value
++ * Of course reshape_progress could change after
++ * the lock is dropped, so once we get a reference
++ * to the stripe that we think it is, we will have
++ * to check again.
++ */
++ spin_lock_irq(&conf->device_lock);
++ if (mddev->reshape_backwards
++ ? logical_sector < conf->reshape_progress
++ : logical_sector >= conf->reshape_progress) {
++ previous = 1;
++ } else {
++ if (mddev->reshape_backwards
++ ? logical_sector < conf->reshape_safe
++ : logical_sector >= conf->reshape_safe) {
++ spin_unlock_irq(&conf->device_lock);
++ schedule();
++ do_prepare = true;
++ goto retry;
++ }
++ }
++ spin_unlock_irq(&conf->device_lock);
++ }
++
++ new_sector = raid5_compute_sector(conf, logical_sector,
++ previous,
++ &dd_idx, NULL);
++ pr_debug("raid456: make_request, sector %llu logical %llu\n",
++ (unsigned long long)new_sector,
++ (unsigned long long)logical_sector);
++
++ sh = get_active_stripe(conf, new_sector, previous,
++ (bi->bi_rw&RWA_MASK), 0);
++ if (sh) {
++ if (unlikely(previous)) {
++ /* expansion might have moved on while waiting for a
++ * stripe, so we must do the range check again.
++ * Expansion could still move past after this
++ * test, but as we are holding a reference to
++ * 'sh', we know that if that happens,
++ * STRIPE_EXPANDING will get set and the expansion
++ * won't proceed until we finish with the stripe.
++ */
++ int must_retry = 0;
++ spin_lock_irq(&conf->device_lock);
++ if (mddev->reshape_backwards
++ ? logical_sector >= conf->reshape_progress
++ : logical_sector < conf->reshape_progress)
++ /* mismatch, need to try again */
++ must_retry = 1;
++ spin_unlock_irq(&conf->device_lock);
++ if (must_retry) {
++ release_stripe(sh);
++ schedule();
++ do_prepare = true;
++ goto retry;
++ }
++ }
++ if (read_seqcount_retry(&conf->gen_lock, seq)) {
++ /* Might have got the wrong stripe_head
++ * by accident
++ */
++ release_stripe(sh);
++ goto retry;
++ }
++
++ if (rw == WRITE &&
++ logical_sector >= mddev->suspend_lo &&
++ logical_sector < mddev->suspend_hi) {
++ release_stripe(sh);
++ /* As the suspend_* range is controlled by
++ * userspace, we want an interruptible
++ * wait.
++ */
++ flush_signals(current);
++ prepare_to_wait(&conf->wait_for_overlap,
++ &w, TASK_INTERRUPTIBLE);
++ if (logical_sector >= mddev->suspend_lo &&
++ logical_sector < mddev->suspend_hi) {
++ schedule();
++ do_prepare = true;
++ }
++ goto retry;
++ }
++
++ if (test_bit(STRIPE_EXPANDING, &sh->state) ||
++ !add_stripe_bio(sh, bi, dd_idx, rw)) {
++ /* Stripe is busy expanding or
++ * add failed due to overlap. Flush everything
++ * and wait a while
++ */
++ md_wakeup_thread(mddev->thread);
++ release_stripe(sh);
++ schedule();
++ do_prepare = true;
++ goto retry;
++ }
++ set_bit(STRIPE_HANDLE, &sh->state);
++ clear_bit(STRIPE_DELAYED, &sh->state);
++ if ((bi->bi_rw & REQ_SYNC) &&
++ !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
++ atomic_inc(&conf->preread_active_stripes);
++ release_stripe_plug(mddev, sh);
++ } else {
++ /* cannot get stripe for read-ahead, just give-up */
++ clear_bit(BIO_UPTODATE, &bi->bi_flags);
++ break;
++ }
++ }
++ finish_wait(&conf->wait_for_overlap, &w);
++
++ remaining = raid5_dec_bi_active_stripes(bi);
++ if (remaining == 0) {
++
++ if ( rw == WRITE )
++ md_write_end(mddev);
++
++ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
++ bi, 0);
++ bio_endio(bi, 0);
++ }
++}
++
++static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
++
++static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
++{
++ /* reshaping is quite different to recovery/resync so it is
++ * handled quite separately ... here.
++ *
++ * On each call to sync_request, we gather one chunk worth of
++ * destination stripes and flag them as expanding.
++ * Then we find all the source stripes and request reads.
++ * As the reads complete, handle_stripe will copy the data
++ * into the destination stripe and release that stripe.
++ */
++ struct r5conf *conf = mddev->private;
++ struct stripe_head *sh;
++ sector_t first_sector, last_sector;
++ int raid_disks = conf->previous_raid_disks;
++ int data_disks = raid_disks - conf->max_degraded;
++ int new_data_disks = conf->raid_disks - conf->max_degraded;
++ int i;
++ int dd_idx;
++ sector_t writepos, readpos, safepos;
++ sector_t stripe_addr;
++ int reshape_sectors;
++ struct list_head stripes;
++
++ if (sector_nr == 0) {
++ /* If restarting in the middle, skip the initial sectors */
++ if (mddev->reshape_backwards &&
++ conf->reshape_progress < raid5_size(mddev, 0, 0)) {
++ sector_nr = raid5_size(mddev, 0, 0)
++ - conf->reshape_progress;
++ } else if (!mddev->reshape_backwards &&
++ conf->reshape_progress > 0)
++ sector_nr = conf->reshape_progress;
++ sector_div(sector_nr, new_data_disks);
++ if (sector_nr) {
++ mddev->curr_resync_completed = sector_nr;
++ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
++ *skipped = 1;
++ return sector_nr;
++ }
++ }
++
++ /* We need to process a full chunk at a time.
++ * If old and new chunk sizes differ, we need to process the
++ * largest of these
++ */
++ if (mddev->new_chunk_sectors > mddev->chunk_sectors)
++ reshape_sectors = mddev->new_chunk_sectors;
++ else
++ reshape_sectors = mddev->chunk_sectors;
++
++ /* We update the metadata at least every 10 seconds, or when
++ * the data about to be copied would over-write the source of
++ * the data at the front of the range. i.e. one new_stripe
++ * along from reshape_progress new_maps to after where
++ * reshape_safe old_maps to
++ */
++ writepos = conf->reshape_progress;
++ sector_div(writepos, new_data_disks);
++ readpos = conf->reshape_progress;
++ sector_div(readpos, data_disks);
++ safepos = conf->reshape_safe;
++ sector_div(safepos, data_disks);
++ if (mddev->reshape_backwards) {
++ writepos -= min_t(sector_t, reshape_sectors, writepos);
++ readpos += reshape_sectors;
++ safepos += reshape_sectors;
++ } else {
++ writepos += reshape_sectors;
++ readpos -= min_t(sector_t, reshape_sectors, readpos);
++ safepos -= min_t(sector_t, reshape_sectors, safepos);
++ }
++
++ /* Having calculated the 'writepos' possibly use it
++ * to set 'stripe_addr' which is where we will write to.
++ */
++ if (mddev->reshape_backwards) {
++ BUG_ON(conf->reshape_progress == 0);
++ stripe_addr = writepos;
++ BUG_ON((mddev->dev_sectors &
++ ~((sector_t)reshape_sectors - 1))
++ - reshape_sectors - stripe_addr
++ != sector_nr);
++ } else {
++ BUG_ON(writepos != sector_nr + reshape_sectors);
++ stripe_addr = sector_nr;
++ }
++
++ /* 'writepos' is the most advanced device address we might write.
++ * 'readpos' is the least advanced device address we might read.
++ * 'safepos' is the least address recorded in the metadata as having
++ * been reshaped.
++ * If there is a min_offset_diff, these are adjusted either by
++ * increasing the safepos/readpos if diff is negative, or
++ * increasing writepos if diff is positive.
++ * If 'readpos' is then behind 'writepos', there is no way that we can
++ * ensure safety in the face of a crash - that must be done by userspace
++ * making a backup of the data. So in that case there is no particular
++ * rush to update metadata.
++ * Otherwise if 'safepos' is behind 'writepos', then we really need to
++ * update the metadata to advance 'safepos' to match 'readpos' so that
++ * we can be safe in the event of a crash.
++ * So we insist on updating metadata if safepos is behind writepos and
++ * readpos is beyond writepos.
++ * In any case, update the metadata every 10 seconds.
++ * Maybe that number should be configurable, but I'm not sure it is
++ * worth it.... maybe it could be a multiple of safemode_delay???
++ */
++ if (conf->min_offset_diff < 0) {
++ safepos += -conf->min_offset_diff;
++ readpos += -conf->min_offset_diff;
++ } else
++ writepos += conf->min_offset_diff;
++
++ if ((mddev->reshape_backwards
++ ? (safepos > writepos && readpos < writepos)
++ : (safepos < writepos && readpos > writepos)) ||
++ time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
++ /* Cannot proceed until we've updated the superblock... */
++ wait_event(conf->wait_for_overlap,
++ atomic_read(&conf->reshape_stripes)==0
++ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
++ if (atomic_read(&conf->reshape_stripes) != 0)
++ return 0;
++ mddev->reshape_position = conf->reshape_progress;
++ mddev->curr_resync_completed = sector_nr;
++ conf->reshape_checkpoint = jiffies;
++ set_bit(MD_CHANGE_DEVS, &mddev->flags);
++ md_wakeup_thread(mddev->thread);
++ wait_event(mddev->sb_wait, mddev->flags == 0 ||
++ test_bit(MD_RECOVERY_INTR, &mddev->recovery));
++ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
++ return 0;
++ spin_lock_irq(&conf->device_lock);
++ conf->reshape_safe = mddev->reshape_position;
++ spin_unlock_irq(&conf->device_lock);
++ wake_up(&conf->wait_for_overlap);
++ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
++ }
++
++ INIT_LIST_HEAD(&stripes);
++ for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
++ int j;
++ int skipped_disk = 0;
++ sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
++ set_bit(STRIPE_EXPANDING, &sh->state);
++ atomic_inc(&conf->reshape_stripes);
++ /* If any of this stripe is beyond the end of the old
++ * array, then we need to zero those blocks
++ */
++ for (j=sh->disks; j--;) {
++ sector_t s;
++ if (j == sh->pd_idx)
++ continue;
++ if (conf->level == 6 &&
++ j == sh->qd_idx)
++ continue;
++ s = compute_blocknr(sh, j, 0);
++ if (s < raid5_size(mddev, 0, 0)) {
++ skipped_disk = 1;
++ continue;
++ }
++ memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
++ set_bit(R5_Expanded, &sh->dev[j].flags);
++ set_bit(R5_UPTODATE, &sh->dev[j].flags);
++ }
++ if (!skipped_disk) {
++ set_bit(STRIPE_EXPAND_READY, &sh->state);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ }
++ list_add(&sh->lru, &stripes);
++ }
++ spin_lock_irq(&conf->device_lock);
++ if (mddev->reshape_backwards)
++ conf->reshape_progress -= reshape_sectors * new_data_disks;
++ else
++ conf->reshape_progress += reshape_sectors * new_data_disks;
++ spin_unlock_irq(&conf->device_lock);
++ /* Ok, those stripe are ready. We can start scheduling
++ * reads on the source stripes.
++ * The source stripes are determined by mapping the first and last
++ * block on the destination stripes.
++ */
++ first_sector =
++ raid5_compute_sector(conf, stripe_addr*(new_data_disks),
++ 1, &dd_idx, NULL);
++ last_sector =
++ raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
++ * new_data_disks - 1),
++ 1, &dd_idx, NULL);
++ if (last_sector >= mddev->dev_sectors)
++ last_sector = mddev->dev_sectors - 1;
++ while (first_sector <= last_sector) {
++ sh = get_active_stripe(conf, first_sector, 1, 0, 1);
++ set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
++ set_bit(STRIPE_HANDLE, &sh->state);
++ release_stripe(sh);
++ first_sector += STRIPE_SECTORS;
++ }
++ /* Now that the sources are clearly marked, we can release
++ * the destination stripes
++ */
++ while (!list_empty(&stripes)) {
++ sh = list_entry(stripes.next, struct stripe_head, lru);
++ list_del_init(&sh->lru);
++ release_stripe(sh);
++ }
++ /* If this takes us to the resync_max point where we have to pause,
++ * then we need to write out the superblock.
++ */
++ sector_nr += reshape_sectors;
++ if ((sector_nr - mddev->curr_resync_completed) * 2
++ >= mddev->resync_max - mddev->curr_resync_completed) {
++ /* Cannot proceed until we've updated the superblock... */
++ wait_event(conf->wait_for_overlap,
++ atomic_read(&conf->reshape_stripes) == 0
++ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
++ if (atomic_read(&conf->reshape_stripes) != 0)
++ goto ret;
++ mddev->reshape_position = conf->reshape_progress;
++ mddev->curr_resync_completed = sector_nr;
++ conf->reshape_checkpoint = jiffies;
++ set_bit(MD_CHANGE_DEVS, &mddev->flags);
++ md_wakeup_thread(mddev->thread);
++ wait_event(mddev->sb_wait,
++ !test_bit(MD_CHANGE_DEVS, &mddev->flags)
++ || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
++ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
++ goto ret;
++ spin_lock_irq(&conf->device_lock);
++ conf->reshape_safe = mddev->reshape_position;
++ spin_unlock_irq(&conf->device_lock);
++ wake_up(&conf->wait_for_overlap);
++ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
++ }
++ret:
++ return reshape_sectors;
++}
++
++/* FIXME go_faster isn't used */
++static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
++{
++ struct r5conf *conf = mddev->private;
++ struct stripe_head *sh;
++ sector_t max_sector = mddev->dev_sectors;
++ sector_t sync_blocks;
++ int still_degraded = 0;
++ int i;
++
++ if (sector_nr >= max_sector) {
++ /* just being told to finish up .. nothing much to do */
++
++ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
++ end_reshape(conf);
++ return 0;
++ }
++
++ if (mddev->curr_resync < max_sector) /* aborted */
++ bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
++ &sync_blocks, 1);
++ else /* completed sync */
++ conf->fullsync = 0;
++ bitmap_close_sync(mddev->bitmap);
++
++ return 0;
++ }
++
++ /* Allow raid5_quiesce to complete */
++ wait_event(conf->wait_for_overlap, conf->quiesce != 2);
++
++ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
++ return reshape_request(mddev, sector_nr, skipped);
++
++ /* No need to check resync_max as we never do more than one
++ * stripe, and as resync_max will always be on a chunk boundary,
++ * if the check in md_do_sync didn't fire, there is no chance
++ * of overstepping resync_max here
++ */
++
++ /* if there is too many failed drives and we are trying
++ * to resync, then assert that we are finished, because there is
++ * nothing we can do.
++ */
++ if (mddev->degraded >= conf->max_degraded &&
++ test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
++ sector_t rv = mddev->dev_sectors - sector_nr;
++ *skipped = 1;
++ return rv;
++ }
++ if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
++ !conf->fullsync &&
++ !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
++ sync_blocks >= STRIPE_SECTORS) {
++ /* we can skip this block, and probably more */
++ sync_blocks /= STRIPE_SECTORS;
++ *skipped = 1;
++ return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
++ }
++
++ bitmap_cond_end_sync(mddev->bitmap, sector_nr);
++
++ sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
++ if (sh == NULL) {
++ sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
++ /* make sure we don't swamp the stripe cache if someone else
++ * is trying to get access
++ */
++ schedule_timeout_uninterruptible(1);
++ }
++ /* Need to check if array will still be degraded after recovery/resync
++ * We don't need to check the 'failed' flag as when that gets set,
++ * recovery aborts.
++ */
++ for (i = 0; i < conf->raid_disks; i++)
++ if (conf->disks[i].rdev == NULL)
++ still_degraded = 1;
++
++ bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
++
++ set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
++ set_bit(STRIPE_HANDLE, &sh->state);
++
++ release_stripe(sh);
++
++ return STRIPE_SECTORS;
++}
++
++static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
++{
++ /* We may not be able to submit a whole bio at once as there
++ * may not be enough stripe_heads available.
++ * We cannot pre-allocate enough stripe_heads as we may need
++ * more than exist in the cache (if we allow ever large chunks).
++ * So we do one stripe head at a time and record in
++ * ->bi_hw_segments how many have been done.
++ *
++ * We *know* that this entire raid_bio is in one chunk, so
++ * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
++ */
++ struct stripe_head *sh;
++ int dd_idx;
++ sector_t sector, logical_sector, last_sector;
++ int scnt = 0;
++ int remaining;
++ int handled = 0;
++
++ logical_sector = raid_bio->bi_iter.bi_sector &
++ ~((sector_t)STRIPE_SECTORS-1);
++ sector = raid5_compute_sector(conf, logical_sector,
++ 0, &dd_idx, NULL);
++ last_sector = bio_end_sector(raid_bio);
++
++ for (; logical_sector < last_sector;
++ logical_sector += STRIPE_SECTORS,
++ sector += STRIPE_SECTORS,
++ scnt++) {
++
++ if (scnt < raid5_bi_processed_stripes(raid_bio))
++ /* already done this stripe */
++ continue;
++
++ sh = get_active_stripe(conf, sector, 0, 1, 1);
++
++ if (!sh) {
++ /* failed to get a stripe - must wait */
++ raid5_set_bi_processed_stripes(raid_bio, scnt);
++ conf->retry_read_aligned = raid_bio;
++ return handled;
++ }
++
++ if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
++ release_stripe(sh);
++ raid5_set_bi_processed_stripes(raid_bio, scnt);
++ conf->retry_read_aligned = raid_bio;
++ return handled;
++ }
++
++ set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
++ handle_stripe(sh);
++ release_stripe(sh);
++ handled++;
++ }
++ remaining = raid5_dec_bi_active_stripes(raid_bio);
++ if (remaining == 0) {
++ trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
++ raid_bio, 0);
++ bio_endio(raid_bio, 0);
++ }
++ if (atomic_dec_and_test(&conf->active_aligned_reads))
++ wake_up(&conf->wait_for_stripe);
++ return handled;
++}
++
++static int handle_active_stripes(struct r5conf *conf, int group,
++ struct r5worker *worker,
++ struct list_head *temp_inactive_list)
++{
++ struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
++ int i, batch_size = 0, hash;
++ bool release_inactive = false;
++
++ while (batch_size < MAX_STRIPE_BATCH &&
++ (sh = __get_priority_stripe(conf, group)) != NULL)
++ batch[batch_size++] = sh;
++
++ if (batch_size == 0) {
++ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
++ if (!list_empty(temp_inactive_list + i))
++ break;
++ if (i == NR_STRIPE_HASH_LOCKS)
++ return batch_size;
++ release_inactive = true;
++ }
++ spin_unlock_irq(&conf->device_lock);
++
++ release_inactive_stripe_list(conf, temp_inactive_list,
++ NR_STRIPE_HASH_LOCKS);
++
++ if (release_inactive) {
++ spin_lock_irq(&conf->device_lock);
++ return 0;
++ }
++
++ for (i = 0; i < batch_size; i++)
++ handle_stripe(batch[i]);
++
++ cond_resched();
++
++ spin_lock_irq(&conf->device_lock);
++ for (i = 0; i < batch_size; i++) {
++ hash = batch[i]->hash_lock_index;
++ __release_stripe(conf, batch[i], &temp_inactive_list[hash]);
++ }
++ return batch_size;
++}
++
++static void raid5_do_work(struct work_struct *work)
++{
++ struct r5worker *worker = container_of(work, struct r5worker, work);
++ struct r5worker_group *group = worker->group;
++ struct r5conf *conf = group->conf;
++ int group_id = group - conf->worker_groups;
++ int handled;
++ struct blk_plug plug;
++
++ pr_debug("+++ raid5worker active\n");
++
++ blk_start_plug(&plug);
++ handled = 0;
++ spin_lock_irq(&conf->device_lock);
++ while (1) {
++ int batch_size, released;
++
++ released = release_stripe_list(conf, worker->temp_inactive_list);
++
++ batch_size = handle_active_stripes(conf, group_id, worker,
++ worker->temp_inactive_list);
++ worker->working = false;
++ if (!batch_size && !released)
++ break;
++ handled += batch_size;
++ }
++ pr_debug("%d stripes handled\n", handled);
++
++ spin_unlock_irq(&conf->device_lock);
++ blk_finish_plug(&plug);
++
++ pr_debug("--- raid5worker inactive\n");
++}
++
++/*
++ * This is our raid5 kernel thread.
++ *
++ * We scan the hash table for stripes which can be handled now.
++ * During the scan, completed stripes are saved for us by the interrupt
++ * handler, so that they will not have to wait for our next wakeup.
++ */
++static void raid5d(struct md_thread *thread)
++{
++ struct mddev *mddev = thread->mddev;
++ struct r5conf *conf = mddev->private;
++ int handled;
++ struct blk_plug plug;
++
++ pr_debug("+++ raid5d active\n");
++
++ md_check_recovery(mddev);
++
++ blk_start_plug(&plug);
++ handled = 0;
++ spin_lock_irq(&conf->device_lock);
++ while (1) {
++ struct bio *bio;
++ int batch_size, released;
++
++ released = release_stripe_list(conf, conf->temp_inactive_list);
++
++ if (
++ !list_empty(&conf->bitmap_list)) {
++ /* Now is a good time to flush some bitmap updates */
++ conf->seq_flush++;
++ spin_unlock_irq(&conf->device_lock);
++ bitmap_unplug(mddev->bitmap);
++ spin_lock_irq(&conf->device_lock);
++ conf->seq_write = conf->seq_flush;
++ activate_bit_delay(conf, conf->temp_inactive_list);
++ }
++ raid5_activate_delayed(conf);
++
++ while ((bio = remove_bio_from_retry(conf))) {
++ int ok;
++ spin_unlock_irq(&conf->device_lock);
++ ok = retry_aligned_read(conf, bio);
++ spin_lock_irq(&conf->device_lock);
++ if (!ok)
++ break;
++ handled++;
++ }
++
++ batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
++ conf->temp_inactive_list);
++ if (!batch_size && !released)
++ break;
++ handled += batch_size;
++
++ if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
++ spin_unlock_irq(&conf->device_lock);
++ md_check_recovery(mddev);
++ spin_lock_irq(&conf->device_lock);
++ }
++ }
++ pr_debug("%d stripes handled\n", handled);
++
++ spin_unlock_irq(&conf->device_lock);
++
++ async_tx_issue_pending_all();
++ blk_finish_plug(&plug);
++
++ pr_debug("--- raid5d inactive\n");
++}
++
++static ssize_t
++raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
++{
++ struct r5conf *conf = mddev->private;
++ if (conf)
++ return sprintf(page, "%d\n", conf->max_nr_stripes);
++ else
++ return 0;
++}
++
++int
++raid5_set_cache_size(struct mddev *mddev, int size)
++{
++ struct r5conf *conf = mddev->private;
++ int err;
++ int hash;
++
++ if (size <= 16 || size > 32768)
++ return -EINVAL;
++ hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
++ while (size < conf->max_nr_stripes) {
++ if (drop_one_stripe(conf, hash))
++ conf->max_nr_stripes--;
++ else
++ break;
++ hash--;
++ if (hash < 0)
++ hash = NR_STRIPE_HASH_LOCKS - 1;
++ }
++ err = md_allow_write(mddev);
++ if (err)
++ return err;
++ hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
++ while (size > conf->max_nr_stripes) {
++ if (grow_one_stripe(conf, hash))
++ conf->max_nr_stripes++;
++ else break;
++ hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(raid5_set_cache_size);
++
++static ssize_t
++raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
++{
++ struct r5conf *conf = mddev->private;
++ unsigned long new;
++ int err;
++
++ if (len >= PAGE_SIZE)
++ return -EINVAL;
++ if (!conf)
++ return -ENODEV;
++
++ if (kstrtoul(page, 10, &new))
++ return -EINVAL;
++ err = raid5_set_cache_size(mddev, new);
++ if (err)
++ return err;
++ return len;
++}
++
++static struct md_sysfs_entry
++raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
++ raid5_show_stripe_cache_size,
++ raid5_store_stripe_cache_size);
++
++static ssize_t
++raid5_show_preread_threshold(struct mddev *mddev, char *page)
++{
++ struct r5conf *conf = mddev->private;
++ if (conf)
++ return sprintf(page, "%d\n", conf->bypass_threshold);
++ else
++ return 0;
++}
++
++static ssize_t
++raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
++{
++ struct r5conf *conf = mddev->private;
++ unsigned long new;
++ if (len >= PAGE_SIZE)
++ return -EINVAL;
++ if (!conf)
++ return -ENODEV;
++
++ if (kstrtoul(page, 10, &new))
++ return -EINVAL;
++ if (new > conf->max_nr_stripes)
++ return -EINVAL;
++ conf->bypass_threshold = new;
++ return len;
++}
++
++static struct md_sysfs_entry
++raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
++ S_IRUGO | S_IWUSR,
++ raid5_show_preread_threshold,
++ raid5_store_preread_threshold);
++
++static ssize_t
++raid5_show_skip_copy(struct mddev *mddev, char *page)
++{
++ struct r5conf *conf = mddev->private;
++ if (conf)
++ return sprintf(page, "%d\n", conf->skip_copy);
++ else
++ return 0;
++}
++
++static ssize_t
++raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
++{
++ struct r5conf *conf = mddev->private;
++ unsigned long new;
++ if (len >= PAGE_SIZE)
++ return -EINVAL;
++ if (!conf)
++ return -ENODEV;
++
++ if (kstrtoul(page, 10, &new))
++ return -EINVAL;
++ new = !!new;
++ if (new == conf->skip_copy)
++ return len;
++
++ mddev_suspend(mddev);
++ conf->skip_copy = new;
++ if (new)
++ mddev->queue->backing_dev_info.capabilities |=
++ BDI_CAP_STABLE_WRITES;
++ else
++ mddev->queue->backing_dev_info.capabilities &=
++ ~BDI_CAP_STABLE_WRITES;
++ mddev_resume(mddev);
++ return len;
++}
++
++static struct md_sysfs_entry
++raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
++ raid5_show_skip_copy,
++ raid5_store_skip_copy);
++
++static ssize_t
++stripe_cache_active_show(struct mddev *mddev, char *page)
++{
++ struct r5conf *conf = mddev->private;
++ if (conf)
++ return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
++ else
++ return 0;
++}
++
++static struct md_sysfs_entry
++raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
++
++static ssize_t
++raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
++{
++ struct r5conf *conf = mddev->private;
++ if (conf)
++ return sprintf(page, "%d\n", conf->worker_cnt_per_group);
++ else
++ return 0;
++}
++
++static int alloc_thread_groups(struct r5conf *conf, int cnt,
++ int *group_cnt,
++ int *worker_cnt_per_group,
++ struct r5worker_group **worker_groups);
++static ssize_t
++raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
++{
++ struct r5conf *conf = mddev->private;
++ unsigned long new;
++ int err;
++ struct r5worker_group *new_groups, *old_groups;
++ int group_cnt, worker_cnt_per_group;
++
++ if (len >= PAGE_SIZE)
++ return -EINVAL;
++ if (!conf)
++ return -ENODEV;
++
++ if (kstrtoul(page, 10, &new))
++ return -EINVAL;
++
++ if (new == conf->worker_cnt_per_group)
++ return len;
++
++ mddev_suspend(mddev);
++
++ old_groups = conf->worker_groups;
++ if (old_groups)
++ flush_workqueue(raid5_wq);
++
++ err = alloc_thread_groups(conf, new,
++ &group_cnt, &worker_cnt_per_group,
++ &new_groups);
++ if (!err) {
++ spin_lock_irq(&conf->device_lock);
++ conf->group_cnt = group_cnt;
++ conf->worker_cnt_per_group = worker_cnt_per_group;
++ conf->worker_groups = new_groups;
++ spin_unlock_irq(&conf->device_lock);
++
++ if (old_groups)
++ kfree(old_groups[0].workers);
++ kfree(old_groups);
++ }
++
++ mddev_resume(mddev);
++
++ if (err)
++ return err;
++ return len;
++}
++
++static struct md_sysfs_entry
++raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR,
++ raid5_show_group_thread_cnt,
++ raid5_store_group_thread_cnt);
++
++static struct attribute *raid5_attrs[] = {
++ &raid5_stripecache_size.attr,
++ &raid5_stripecache_active.attr,
++ &raid5_preread_bypass_threshold.attr,
++ &raid5_group_thread_cnt.attr,
++ &raid5_skip_copy.attr,
++ NULL,
++};
++static struct attribute_group raid5_attrs_group = {
++ .name = NULL,
++ .attrs = raid5_attrs,
++};
++
++static int alloc_thread_groups(struct r5conf *conf, int cnt,
++ int *group_cnt,
++ int *worker_cnt_per_group,
++ struct r5worker_group **worker_groups)
++{
++ int i, j, k;
++ ssize_t size;
++ struct r5worker *workers;
++
++ *worker_cnt_per_group = cnt;
++ if (cnt == 0) {
++ *group_cnt = 0;
++ *worker_groups = NULL;
++ return 0;
++ }
++ *group_cnt = num_possible_nodes();
++ size = sizeof(struct r5worker) * cnt;
++ workers = kzalloc(size * *group_cnt, GFP_NOIO);
++ *worker_groups = kzalloc(sizeof(struct r5worker_group) *
++ *group_cnt, GFP_NOIO);
++ if (!*worker_groups || !workers) {
++ kfree(workers);
++ kfree(*worker_groups);
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < *group_cnt; i++) {
++ struct r5worker_group *group;
++
++ group = &(*worker_groups)[i];
++ INIT_LIST_HEAD(&group->handle_list);
++ group->conf = conf;
++ group->workers = workers + i * cnt;
++
++ for (j = 0; j < cnt; j++) {
++ struct r5worker *worker = group->workers + j;
++ worker->group = group;
++ INIT_WORK(&worker->work, raid5_do_work);
++
++ for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++)
++ INIT_LIST_HEAD(worker->temp_inactive_list + k);
++ }
++ }
++
++ return 0;
++}
++
++static void free_thread_groups(struct r5conf *conf)
++{
++ if (conf->worker_groups)
++ kfree(conf->worker_groups[0].workers);
++ kfree(conf->worker_groups);
++ conf->worker_groups = NULL;
++}
++
++static sector_t
++raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
++{
++ struct r5conf *conf = mddev->private;
++
++ if (!sectors)
++ sectors = mddev->dev_sectors;
++ if (!raid_disks)
++ /* size is defined by the smallest of previous and new size */
++ raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
++
++ sectors &= ~((sector_t)mddev->chunk_sectors - 1);
++ sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
++ return sectors * (raid_disks - conf->max_degraded);
++}
++
++static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
++{
++ safe_put_page(percpu->spare_page);
++ kfree(percpu->scribble);
++ percpu->spare_page = NULL;
++ percpu->scribble = NULL;
++}
++
++static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
++{
++ if (conf->level == 6 && !percpu->spare_page)
++ percpu->spare_page = alloc_page(GFP_KERNEL);
++ if (!percpu->scribble)
++ percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
++
++ if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
++ free_scratch_buffer(conf, percpu);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static void raid5_free_percpu(struct r5conf *conf)
++{
++ unsigned long cpu;
++
++ if (!conf->percpu)
++ return;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ unregister_cpu_notifier(&conf->cpu_notify);
++#endif
++
++ get_online_cpus();
++ for_each_possible_cpu(cpu)
++ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
++ put_online_cpus();
++
++ free_percpu(conf->percpu);
++}
++
++static void free_conf(struct r5conf *conf)
++{
++ free_thread_groups(conf);
++ shrink_stripes(conf);
++ raid5_free_percpu(conf);
++ kfree(conf->disks);
++ kfree(conf->stripe_hashtbl);
++ kfree(conf);
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
++ void *hcpu)
++{
++ struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify);
++ long cpu = (long)hcpu;
++ struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
++
++ switch (action) {
++ case CPU_UP_PREPARE:
++ case CPU_UP_PREPARE_FROZEN:
++ if (alloc_scratch_buffer(conf, percpu)) {
++ pr_err("%s: failed memory allocation for cpu%ld\n",
++ __func__, cpu);
++ return notifier_from_errno(-ENOMEM);
++ }
++ break;
++ case CPU_DEAD:
++ case CPU_DEAD_FROZEN:
++ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
++ break;
++ default:
++ break;
++ }
++ return NOTIFY_OK;
++}
++#endif
++
++static int raid5_alloc_percpu(struct r5conf *conf)
++{
++ unsigned long cpu;
++ int err = 0;
++
++ conf->percpu = alloc_percpu(struct raid5_percpu);
++ if (!conf->percpu)
++ return -ENOMEM;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ conf->cpu_notify.notifier_call = raid456_cpu_notify;
++ conf->cpu_notify.priority = 0;
++ err = register_cpu_notifier(&conf->cpu_notify);
++ if (err)
++ return err;
++#endif
++
++ get_online_cpus();
++ for_each_present_cpu(cpu) {
++ err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
++ if (err) {
++ pr_err("%s: failed memory allocation for cpu%ld\n",
++ __func__, cpu);
++ break;
++ }
++ }
++ put_online_cpus();
++
++ return err;
++}
++
++static struct r5conf *setup_conf(struct mddev *mddev)
++{
++ struct r5conf *conf;
++ int raid_disk, memory, max_disks;
++ struct md_rdev *rdev;
++ struct disk_info *disk;
++ char pers_name[6];
++ int i;
++ int group_cnt, worker_cnt_per_group;
++ struct r5worker_group *new_group;
++
++ if (mddev->new_level != 5
++ && mddev->new_level != 4
++ && mddev->new_level != 6) {
++ printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
++ mdname(mddev), mddev->new_level);
++ return ERR_PTR(-EIO);
++ }
++ if ((mddev->new_level == 5
++ && !algorithm_valid_raid5(mddev->new_layout)) ||
++ (mddev->new_level == 6
++ && !algorithm_valid_raid6(mddev->new_layout))) {
++ printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
++ mdname(mddev), mddev->new_layout);
++ return ERR_PTR(-EIO);
++ }
++ if (mddev->new_level == 6 && mddev->raid_disks < 4) {
++ printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
++ mdname(mddev), mddev->raid_disks);
++ return ERR_PTR(-EINVAL);
++ }
++
++ if (!mddev->new_chunk_sectors ||
++ (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
++ !is_power_of_2(mddev->new_chunk_sectors)) {
++ printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
++ mdname(mddev), mddev->new_chunk_sectors << 9);
++ return ERR_PTR(-EINVAL);
++ }
++
++ conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
++ if (conf == NULL)
++ goto abort;
++ /* Don't enable multi-threading by default*/
++ if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group,
++ &new_group)) {
++ conf->group_cnt = group_cnt;
++ conf->worker_cnt_per_group = worker_cnt_per_group;
++ conf->worker_groups = new_group;
++ } else
++ goto abort;
++ spin_lock_init(&conf->device_lock);
++ seqcount_init(&conf->gen_lock);
++ init_waitqueue_head(&conf->wait_for_stripe);
++ init_waitqueue_head(&conf->wait_for_overlap);
++ INIT_LIST_HEAD(&conf->handle_list);
++ INIT_LIST_HEAD(&conf->hold_list);
++ INIT_LIST_HEAD(&conf->delayed_list);
++ INIT_LIST_HEAD(&conf->bitmap_list);
++ init_llist_head(&conf->released_stripes);
++ atomic_set(&conf->active_stripes, 0);
++ atomic_set(&conf->preread_active_stripes, 0);
++ atomic_set(&conf->active_aligned_reads, 0);
++ conf->bypass_threshold = BYPASS_THRESHOLD;
++ conf->recovery_disabled = mddev->recovery_disabled - 1;
++
++ conf->raid_disks = mddev->raid_disks;
++ if (mddev->reshape_position == MaxSector)
++ conf->previous_raid_disks = mddev->raid_disks;
++ else
++ conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
++ max_disks = max(conf->raid_disks, conf->previous_raid_disks);
++ conf->scribble_len = scribble_len(max_disks);
++
++ conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
++ GFP_KERNEL);
++ if (!conf->disks)
++ goto abort;
++
++ conf->mddev = mddev;
++
++ if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
++ goto abort;
++
++ /* We init hash_locks[0] separately to that it can be used
++ * as the reference lock in the spin_lock_nest_lock() call
++ * in lock_all_device_hash_locks_irq in order to convince
++ * lockdep that we know what we are doing.
++ */
++ spin_lock_init(conf->hash_locks);
++ for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
++ spin_lock_init(conf->hash_locks + i);
++
++ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
++ INIT_LIST_HEAD(conf->inactive_list + i);
++
++ for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
++ INIT_LIST_HEAD(conf->temp_inactive_list + i);
++
++ conf->level = mddev->new_level;
++ if (raid5_alloc_percpu(conf) != 0)
++ goto abort;
++
++ pr_debug("raid456: run(%s) called.\n", mdname(mddev));
++
++ rdev_for_each(rdev, mddev) {
++ raid_disk = rdev->raid_disk;
++ if (raid_disk >= max_disks
++ || raid_disk < 0)
++ continue;
++ disk = conf->disks + raid_disk;
++
++ if (test_bit(Replacement, &rdev->flags)) {
++ if (disk->replacement)
++ goto abort;
++ disk->replacement = rdev;
++ } else {
++ if (disk->rdev)
++ goto abort;
++ disk->rdev = rdev;
++ }
++
++ if (test_bit(In_sync, &rdev->flags)) {
++ char b[BDEVNAME_SIZE];
++ printk(KERN_INFO "md/raid:%s: device %s operational as raid"
++ " disk %d\n",
++ mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
++ } else if (rdev->saved_raid_disk != raid_disk)
++ /* Cannot rely on bitmap to complete recovery */
++ conf->fullsync = 1;
++ }
++
++ conf->chunk_sectors = mddev->new_chunk_sectors;
++ conf->level = mddev->new_level;
++ if (conf->level == 6)
++ conf->max_degraded = 2;
++ else
++ conf->max_degraded = 1;
++ conf->algorithm = mddev->new_layout;
++ conf->reshape_progress = mddev->reshape_position;
++ if (conf->reshape_progress != MaxSector) {
++ conf->prev_chunk_sectors = mddev->chunk_sectors;
++ conf->prev_algo = mddev->layout;
++ }
++
++ memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
++ max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
++ atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
++ if (grow_stripes(conf, NR_STRIPES)) {
++ printk(KERN_ERR
++ "md/raid:%s: couldn't allocate %dkB for buffers\n",
++ mdname(mddev), memory);
++ goto abort;
++ } else
++ printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
++ mdname(mddev), memory);
++
++ sprintf(pers_name, "raid%d", mddev->new_level);
++ conf->thread = md_register_thread(raid5d, mddev, pers_name);
++ if (!conf->thread) {
++ printk(KERN_ERR
++ "md/raid:%s: couldn't allocate thread.\n",
++ mdname(mddev));
++ goto abort;
++ }
++
++ return conf;
++
++ abort:
++ if (conf) {
++ free_conf(conf);
++ return ERR_PTR(-EIO);
++ } else
++ return ERR_PTR(-ENOMEM);
++}
++
++static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
++{
++ switch (algo) {
++ case ALGORITHM_PARITY_0:
++ if (raid_disk < max_degraded)
++ return 1;
++ break;
++ case ALGORITHM_PARITY_N:
++ if (raid_disk >= raid_disks - max_degraded)
++ return 1;
++ break;
++ case ALGORITHM_PARITY_0_6:
++ if (raid_disk == 0 ||
++ raid_disk == raid_disks - 1)
++ return 1;
++ break;
++ case ALGORITHM_LEFT_ASYMMETRIC_6:
++ case ALGORITHM_RIGHT_ASYMMETRIC_6:
++ case ALGORITHM_LEFT_SYMMETRIC_6:
++ case ALGORITHM_RIGHT_SYMMETRIC_6:
++ if (raid_disk == raid_disks - 1)
++ return 1;
++ }
++ return 0;
++}
++
++static int run(struct mddev *mddev)
++{
++ struct r5conf *conf;
++ int working_disks = 0;
++ int dirty_parity_disks = 0;
++ struct md_rdev *rdev;
++ sector_t reshape_offset = 0;
++ int i;
++ long long min_offset_diff = 0;
++ int first = 1;
++
++ if (mddev->recovery_cp != MaxSector)
++ printk(KERN_NOTICE "md/raid:%s: not clean"
++ " -- starting background reconstruction\n",
++ mdname(mddev));
++
++ rdev_for_each(rdev, mddev) {
++ long long diff;
++ if (rdev->raid_disk < 0)
++ continue;
++ diff = (rdev->new_data_offset - rdev->data_offset);
++ if (first) {
++ min_offset_diff = diff;
++ first = 0;
++ } else if (mddev->reshape_backwards &&
++ diff < min_offset_diff)
++ min_offset_diff = diff;
++ else if (!mddev->reshape_backwards &&
++ diff > min_offset_diff)
++ min_offset_diff = diff;
++ }
++
++ if (mddev->reshape_position != MaxSector) {
++ /* Check that we can continue the reshape.
++ * Difficulties arise if the stripe we would write to
++ * next is at or after the stripe we would read from next.
++ * For a reshape that changes the number of devices, this
++ * is only possible for a very short time, and mdadm makes
++ * sure that time appears to have past before assembling
++ * the array. So we fail if that time hasn't passed.
++ * For a reshape that keeps the number of devices the same
++ * mdadm must be monitoring the reshape can keeping the
++ * critical areas read-only and backed up. It will start
++ * the array in read-only mode, so we check for that.
++ */
++ sector_t here_new, here_old;
++ int old_disks;
++ int max_degraded = (mddev->level == 6 ? 2 : 1);
++
++ if (mddev->new_level != mddev->level) {
++ printk(KERN_ERR "md/raid:%s: unsupported reshape "
++ "required - aborting.\n",
++ mdname(mddev));
++ return -EINVAL;
++ }
++ old_disks = mddev->raid_disks - mddev->delta_disks;
++ /* reshape_position must be on a new-stripe boundary, and one
++ * further up in new geometry must map after here in old
++ * geometry.
++ */
++ here_new = mddev->reshape_position;
++ if (sector_div(here_new, mddev->new_chunk_sectors *
++ (mddev->raid_disks - max_degraded))) {
++ printk(KERN_ERR "md/raid:%s: reshape_position not "
++ "on a stripe boundary\n", mdname(mddev));
++ return -EINVAL;
++ }
++ reshape_offset = here_new * mddev->new_chunk_sectors;
++ /* here_new is the stripe we will write to */
++ here_old = mddev->reshape_position;
++ sector_div(here_old, mddev->chunk_sectors *
++ (old_disks-max_degraded));
++ /* here_old is the first stripe that we might need to read
++ * from */
++ if (mddev->delta_disks == 0) {
++ if ((here_new * mddev->new_chunk_sectors !=
++ here_old * mddev->chunk_sectors)) {
++ printk(KERN_ERR "md/raid:%s: reshape position is"
++ " confused - aborting\n", mdname(mddev));
++ return -EINVAL;
++ }
++ /* We cannot be sure it is safe to start an in-place
++ * reshape. It is only safe if user-space is monitoring
++ * and taking constant backups.
++ * mdadm always starts a situation like this in
++ * readonly mode so it can take control before
++ * allowing any writes. So just check for that.
++ */
++ if (abs(min_offset_diff) >= mddev->chunk_sectors &&
++ abs(min_offset_diff) >= mddev->new_chunk_sectors)
++ /* not really in-place - so OK */;
++ else if (mddev->ro == 0) {
++ printk(KERN_ERR "md/raid:%s: in-place reshape "
++ "must be started in read-only mode "
++ "- aborting\n",
++ mdname(mddev));
++ return -EINVAL;
++ }
++ } else if (mddev->reshape_backwards
++ ? (here_new * mddev->new_chunk_sectors + min_offset_diff <=
++ here_old * mddev->chunk_sectors)
++ : (here_new * mddev->new_chunk_sectors >=
++ here_old * mddev->chunk_sectors + (-min_offset_diff))) {
++ /* Reading from the same stripe as writing to - bad */
++ printk(KERN_ERR "md/raid:%s: reshape_position too early for "
++ "auto-recovery - aborting.\n",
++ mdname(mddev));
++ return -EINVAL;
++ }
++ printk(KERN_INFO "md/raid:%s: reshape will continue\n",
++ mdname(mddev));
++ /* OK, we should be able to continue; */
++ } else {
++ BUG_ON(mddev->level != mddev->new_level);
++ BUG_ON(mddev->layout != mddev->new_layout);
++ BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
++ BUG_ON(mddev->delta_disks != 0);
++ }
++
++ if (mddev->private == NULL)
++ conf = setup_conf(mddev);
++ else
++ conf = mddev->private;
++
++ if (IS_ERR(conf))
++ return PTR_ERR(conf);
++
++ conf->min_offset_diff = min_offset_diff;
++ mddev->thread = conf->thread;
++ conf->thread = NULL;
++ mddev->private = conf;
++
++ for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
++ i++) {
++ rdev = conf->disks[i].rdev;
++ if (!rdev && conf->disks[i].replacement) {
++ /* The replacement is all we have yet */
++ rdev = conf->disks[i].replacement;
++ conf->disks[i].replacement = NULL;
++ clear_bit(Replacement, &rdev->flags);
++ conf->disks[i].rdev = rdev;
++ }
++ if (!rdev)
++ continue;
++ if (conf->disks[i].replacement &&
++ conf->reshape_progress != MaxSector) {
++ /* replacements and reshape simply do not mix. */
++ printk(KERN_ERR "md: cannot handle concurrent "
++ "replacement and reshape.\n");
++ goto abort;
++ }
++ if (test_bit(In_sync, &rdev->flags)) {
++ working_disks++;
++ continue;
++ }
++ /* This disc is not fully in-sync. However if it
++ * just stored parity (beyond the recovery_offset),
++ * when we don't need to be concerned about the
++ * array being dirty.
++ * When reshape goes 'backwards', we never have
++ * partially completed devices, so we only need
++ * to worry about reshape going forwards.
++ */
++ /* Hack because v0.91 doesn't store recovery_offset properly. */
++ if (mddev->major_version == 0 &&
++ mddev->minor_version > 90)
++ rdev->recovery_offset = reshape_offset;
++
++ if (rdev->recovery_offset < reshape_offset) {
++ /* We need to check old and new layout */
++ if (!only_parity(rdev->raid_disk,
++ conf->algorithm,
++ conf->raid_disks,
++ conf->max_degraded))
++ continue;
++ }
++ if (!only_parity(rdev->raid_disk,
++ conf->prev_algo,
++ conf->previous_raid_disks,
++ conf->max_degraded))
++ continue;
++ dirty_parity_disks++;
++ }
++
++ /*
++ * 0 for a fully functional array, 1 or 2 for a degraded array.
++ */
++ mddev->degraded = calc_degraded(conf);
++
++ if (has_failed(conf)) {
++ printk(KERN_ERR "md/raid:%s: not enough operational devices"
++ " (%d/%d failed)\n",
++ mdname(mddev), mddev->degraded, conf->raid_disks);
++ goto abort;
++ }
++
++ /* device size must be a multiple of chunk size */
++ mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
++ mddev->resync_max_sectors = mddev->dev_sectors;
++
++ if (mddev->degraded > dirty_parity_disks &&
++ mddev->recovery_cp != MaxSector) {
++ if (mddev->ok_start_degraded)
++ printk(KERN_WARNING
++ "md/raid:%s: starting dirty degraded array"
++ " - data corruption possible.\n",
++ mdname(mddev));
++ else {
++ printk(KERN_ERR
++ "md/raid:%s: cannot start dirty degraded array.\n",
++ mdname(mddev));
++ goto abort;
++ }
++ }
++
++ if (mddev->degraded == 0)
++ printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
++ " devices, algorithm %d\n", mdname(mddev), conf->level,
++ mddev->raid_disks-mddev->degraded, mddev->raid_disks,
++ mddev->new_layout);
++ else
++ printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
++ " out of %d devices, algorithm %d\n",
++ mdname(mddev), conf->level,
++ mddev->raid_disks - mddev->degraded,
++ mddev->raid_disks, mddev->new_layout);
++
++ print_raid5_conf(conf);
++
++ if (conf->reshape_progress != MaxSector) {
++ conf->reshape_safe = conf->reshape_progress;
++ atomic_set(&conf->reshape_stripes, 0);
++ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
++ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
++ set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
++ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
++ mddev->sync_thread = md_register_thread(md_do_sync, mddev,
++ "reshape");
++ }
++
++ /* Ok, everything is just fine now */
++ if (mddev->to_remove == &raid5_attrs_group)
++ mddev->to_remove = NULL;
++ else if (mddev->kobj.sd &&
++ sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
++ printk(KERN_WARNING
++ "raid5: failed to create sysfs attributes for %s\n",
++ mdname(mddev));
++ md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
++
++ if (mddev->queue) {
++ int chunk_size;
++ bool discard_supported = true;
++ /* read-ahead size must cover two whole stripes, which
++ * is 2 * (datadisks) * chunksize where 'n' is the
++ * number of raid devices
++ */
++ int data_disks = conf->previous_raid_disks - conf->max_degraded;
++ int stripe = data_disks *
++ ((mddev->chunk_sectors << 9) / PAGE_SIZE);
++ if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
++ mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
++
++ blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
++
++ mddev->queue->backing_dev_info.congested_data = mddev;
++ mddev->queue->backing_dev_info.congested_fn = raid5_congested;
++
++ chunk_size = mddev->chunk_sectors << 9;
++ blk_queue_io_min(mddev->queue, chunk_size);
++ blk_queue_io_opt(mddev->queue, chunk_size *
++ (conf->raid_disks - conf->max_degraded));
++ mddev->queue->limits.raid_partial_stripes_expensive = 1;
++ /*
++ * We can only discard a whole stripe. It doesn't make sense to
++ * discard data disk but write parity disk
++ */
++ stripe = stripe * PAGE_SIZE;
++ /* Round up to power of 2, as discard handling
++ * currently assumes that */
++ while ((stripe-1) & stripe)
++ stripe = (stripe | (stripe-1)) + 1;
++ mddev->queue->limits.discard_alignment = stripe;
++ mddev->queue->limits.discard_granularity = stripe;
++ /*
++ * unaligned part of discard request will be ignored, so can't
++ * guarantee discard_zeroes_data
++ */
++ mddev->queue->limits.discard_zeroes_data = 0;
++
++ blk_queue_max_write_same_sectors(mddev->queue, 0);
++
++ rdev_for_each(rdev, mddev) {
++ disk_stack_limits(mddev->gendisk, rdev->bdev,
++ rdev->data_offset << 9);
++ disk_stack_limits(mddev->gendisk, rdev->bdev,
++ rdev->new_data_offset << 9);
++ /*
++ * discard_zeroes_data is required, otherwise data
++ * could be lost. Consider a scenario: discard a stripe
++ * (the stripe could be inconsistent if
++ * discard_zeroes_data is 0); write one disk of the
++ * stripe (the stripe could be inconsistent again
++ * depending on which disks are used to calculate
++ * parity); the disk is broken; The stripe data of this
++ * disk is lost.
++ */
++ if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
++ !bdev_get_queue(rdev->bdev)->
++ limits.discard_zeroes_data)
++ discard_supported = false;
++ /* Unfortunately, discard_zeroes_data is not currently
++ * a guarantee - just a hint. So we only allow DISCARD
++ * if the sysadmin has confirmed that only safe devices
++ * are in use by setting a module parameter.
++ */
++ if (!devices_handle_discard_safely) {
++ if (discard_supported) {
++ pr_info("md/raid456: discard support disabled due to uncertainty.\n");
++ pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
++ }
++ discard_supported = false;
++ }
++ }
++
++ if (discard_supported &&
++ mddev->queue->limits.max_discard_sectors >= stripe &&
++ mddev->queue->limits.discard_granularity >= stripe)
++ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
++ mddev->queue);
++ else
++ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
++ mddev->queue);
++ }
++
++ return 0;
++abort:
++ md_unregister_thread(&mddev->thread);
++ print_raid5_conf(conf);
++ free_conf(conf);
++ mddev->private = NULL;
++ printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
++ return -EIO;
++}
++
++static int stop(struct mddev *mddev)
++{
++ struct r5conf *conf = mddev->private;
++
++ md_unregister_thread(&mddev->thread);
++ if (mddev->queue)
++ mddev->queue->backing_dev_info.congested_fn = NULL;
++ free_conf(conf);
++ mddev->private = NULL;
++ mddev->to_remove = &raid5_attrs_group;
++ return 0;
++}
++
++static void status(struct seq_file *seq, struct mddev *mddev)
++{
++ struct r5conf *conf = mddev->private;
++ int i;
++
++ seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
++ mddev->chunk_sectors / 2, mddev->layout);
++ seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
++ for (i = 0; i < conf->raid_disks; i++)
++ seq_printf (seq, "%s",
++ conf->disks[i].rdev &&
++ test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
++ seq_printf (seq, "]");
++}
++
++static void print_raid5_conf (struct r5conf *conf)
++{
++ int i;
++ struct disk_info *tmp;
++
++ printk(KERN_DEBUG "RAID conf printout:\n");
++ if (!conf) {
++ printk("(conf==NULL)\n");
++ return;
++ }
++ printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
++ conf->raid_disks,
++ conf->raid_disks - conf->mddev->degraded);
++
++ for (i = 0; i < conf->raid_disks; i++) {
++ char b[BDEVNAME_SIZE];
++ tmp = conf->disks + i;
++ if (tmp->rdev)
++ printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
++ i, !test_bit(Faulty, &tmp->rdev->flags),
++ bdevname(tmp->rdev->bdev, b));
++ }
++}
++
++static int raid5_spare_active(struct mddev *mddev)
++{
++ int i;
++ struct r5conf *conf = mddev->private;
++ struct disk_info *tmp;
++ int count = 0;
++ unsigned long flags;
++
++ for (i = 0; i < conf->raid_disks; i++) {
++ tmp = conf->disks + i;
++ if (tmp->replacement
++ && tmp->replacement->recovery_offset == MaxSector
++ && !test_bit(Faulty, &tmp->replacement->flags)
++ && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
++ /* Replacement has just become active. */
++ if (!tmp->rdev
++ || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
++ count++;
++ if (tmp->rdev) {
++ /* Replaced device not technically faulty,
++ * but we need to be sure it gets removed
++ * and never re-added.
++ */
++ set_bit(Faulty, &tmp->rdev->flags);
++ sysfs_notify_dirent_safe(
++ tmp->rdev->sysfs_state);
++ }
++ sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
++ } else if (tmp->rdev
++ && tmp->rdev->recovery_offset == MaxSector
++ && !test_bit(Faulty, &tmp->rdev->flags)
++ && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
++ count++;
++ sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
++ }
++ }
++ spin_lock_irqsave(&conf->device_lock, flags);
++ mddev->degraded = calc_degraded(conf);
++ spin_unlock_irqrestore(&conf->device_lock, flags);
++ print_raid5_conf(conf);
++ return count;
++}
++
++static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
++{
++ struct r5conf *conf = mddev->private;
++ int err = 0;
++ int number = rdev->raid_disk;
++ struct md_rdev **rdevp;
++ struct disk_info *p = conf->disks + number;
++
++ print_raid5_conf(conf);
++ if (rdev == p->rdev)
++ rdevp = &p->rdev;
++ else if (rdev == p->replacement)
++ rdevp = &p->replacement;
++ else
++ return 0;
++
++ if (number >= conf->raid_disks &&
++ conf->reshape_progress == MaxSector)
++ clear_bit(In_sync, &rdev->flags);
++
++ if (test_bit(In_sync, &rdev->flags) ||
++ atomic_read(&rdev->nr_pending)) {
++ err = -EBUSY;
++ goto abort;
++ }
++ /* Only remove non-faulty devices if recovery
++ * isn't possible.
++ */
++ if (!test_bit(Faulty, &rdev->flags) &&
++ mddev->recovery_disabled != conf->recovery_disabled &&
++ !has_failed(conf) &&
++ (!p->replacement || p->replacement == rdev) &&
++ number < conf->raid_disks) {
++ err = -EBUSY;
++ goto abort;
++ }
++ *rdevp = NULL;
++ synchronize_rcu();
++ if (atomic_read(&rdev->nr_pending)) {
++ /* lost the race, try later */
++ err = -EBUSY;
++ *rdevp = rdev;
++ } else if (p->replacement) {
++ /* We must have just cleared 'rdev' */
++ p->rdev = p->replacement;
++ clear_bit(Replacement, &p->replacement->flags);
++ smp_mb(); /* Make sure other CPUs may see both as identical
++ * but will never see neither - if they are careful
++ */
++ p->replacement = NULL;
++ clear_bit(WantReplacement, &rdev->flags);
++ } else
++ /* We might have just removed the Replacement as faulty-
++ * clear the bit just in case
++ */
++ clear_bit(WantReplacement, &rdev->flags);
++abort:
++
++ print_raid5_conf(conf);
++ return err;
++}
++
++static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
++{
++ struct r5conf *conf = mddev->private;
++ int err = -EEXIST;
++ int disk;
++ struct disk_info *p;
++ int first = 0;
++ int last = conf->raid_disks - 1;
++
++ if (mddev->recovery_disabled == conf->recovery_disabled)
++ return -EBUSY;
++
++ if (rdev->saved_raid_disk < 0 && has_failed(conf))
++ /* no point adding a device */
++ return -EINVAL;
++
++ if (rdev->raid_disk >= 0)
++ first = last = rdev->raid_disk;
++
++ /*
++ * find the disk ... but prefer rdev->saved_raid_disk
++ * if possible.
++ */
++ if (rdev->saved_raid_disk >= 0 &&
++ rdev->saved_raid_disk >= first &&
++ conf->disks[rdev->saved_raid_disk].rdev == NULL)
++ first = rdev->saved_raid_disk;
++
++ for (disk = first; disk <= last; disk++) {
++ p = conf->disks + disk;
++ if (p->rdev == NULL) {
++ clear_bit(In_sync, &rdev->flags);
++ rdev->raid_disk = disk;
++ err = 0;
++ if (rdev->saved_raid_disk != disk)
++ conf->fullsync = 1;
++ rcu_assign_pointer(p->rdev, rdev);
++ goto out;
++ }
++ }
++ for (disk = first; disk <= last; disk++) {
++ p = conf->disks + disk;
++ if (test_bit(WantReplacement, &p->rdev->flags) &&
++ p->replacement == NULL) {
++ clear_bit(In_sync, &rdev->flags);
++ set_bit(Replacement, &rdev->flags);
++ rdev->raid_disk = disk;
++ err = 0;
++ conf->fullsync = 1;
++ rcu_assign_pointer(p->replacement, rdev);
++ break;
++ }
++ }
++out:
++ print_raid5_conf(conf);
++ return err;
++}
++
++static int raid5_resize(struct mddev *mddev, sector_t sectors)
++{
++ /* no resync is happening, and there is enough space
++ * on all devices, so we can resize.
++ * We need to make sure resync covers any new space.
++ * If the array is shrinking we should possibly wait until
++ * any io in the removed space completes, but it hardly seems
++ * worth it.
++ */
++ sector_t newsize;
++ sectors &= ~((sector_t)mddev->chunk_sectors - 1);
++ newsize = raid5_size(mddev, sectors, mddev->raid_disks);
++ if (mddev->external_size &&
++ mddev->array_sectors > newsize)
++ return -EINVAL;
++ if (mddev->bitmap) {
++ int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0);
++ if (ret)
++ return ret;
++ }
++ md_set_array_sectors(mddev, newsize);
++ set_capacity(mddev->gendisk, mddev->array_sectors);
++ revalidate_disk(mddev->gendisk);
++ if (sectors > mddev->dev_sectors &&
++ mddev->recovery_cp > mddev->dev_sectors) {
++ mddev->recovery_cp = mddev->dev_sectors;
++ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
++ }
++ mddev->dev_sectors = sectors;
++ mddev->resync_max_sectors = sectors;
++ return 0;
++}
++
++static int check_stripe_cache(struct mddev *mddev)
++{
++ /* Can only proceed if there are plenty of stripe_heads.
++ * We need a minimum of one full stripe,, and for sensible progress
++ * it is best to have about 4 times that.
++ * If we require 4 times, then the default 256 4K stripe_heads will
++ * allow for chunk sizes up to 256K, which is probably OK.
++ * If the chunk size is greater, user-space should request more
++ * stripe_heads first.
++ */
++ struct r5conf *conf = mddev->private;
++ if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
++ > conf->max_nr_stripes ||
++ ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
++ > conf->max_nr_stripes) {
++ printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
++ mdname(mddev),
++ ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
++ / STRIPE_SIZE)*4);
++ return 0;
++ }
++ return 1;
++}
++
++static int check_reshape(struct mddev *mddev)
++{
++ struct r5conf *conf = mddev->private;
++
++ if (mddev->delta_disks == 0 &&
++ mddev->new_layout == mddev->layout &&
++ mddev->new_chunk_sectors == mddev->chunk_sectors)
++ return 0; /* nothing to do */
++ if (has_failed(conf))
++ return -EINVAL;
++ if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) {
++ /* We might be able to shrink, but the devices must
++ * be made bigger first.
++ * For raid6, 4 is the minimum size.
++ * Otherwise 2 is the minimum
++ */
++ int min = 2;
++ if (mddev->level == 6)
++ min = 4;
++ if (mddev->raid_disks + mddev->delta_disks < min)
++ return -EINVAL;
++ }
++
++ if (!check_stripe_cache(mddev))
++ return -ENOSPC;
++
++ return resize_stripes(conf, (conf->previous_raid_disks
++ + mddev->delta_disks));
++}
++
++static int raid5_start_reshape(struct mddev *mddev)
++{
++ struct r5conf *conf = mddev->private;
++ struct md_rdev *rdev;
++ int spares = 0;
++ unsigned long flags;
++
++ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
++ return -EBUSY;
++
++ if (!check_stripe_cache(mddev))
++ return -ENOSPC;
++
++ if (has_failed(conf))
++ return -EINVAL;
++
++ rdev_for_each(rdev, mddev) {
++ if (!test_bit(In_sync, &rdev->flags)
++ && !test_bit(Faulty, &rdev->flags))
++ spares++;
++ }
++
++ if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
++ /* Not enough devices even to make a degraded array
++ * of that size
++ */
++ return -EINVAL;
++
++ /* Refuse to reduce size of the array. Any reductions in
++ * array size must be through explicit setting of array_size
++ * attribute.
++ */
++ if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
++ < mddev->array_sectors) {
++ printk(KERN_ERR "md/raid:%s: array size must be reduced "
++ "before number of disks\n", mdname(mddev));
++ return -EINVAL;
++ }
++
++ atomic_set(&conf->reshape_stripes, 0);
++ spin_lock_irq(&conf->device_lock);
++ write_seqcount_begin(&conf->gen_lock);
++ conf->previous_raid_disks = conf->raid_disks;
++ conf->raid_disks += mddev->delta_disks;
++ conf->prev_chunk_sectors = conf->chunk_sectors;
++ conf->chunk_sectors = mddev->new_chunk_sectors;
++ conf->prev_algo = conf->algorithm;
++ conf->algorithm = mddev->new_layout;
++ conf->generation++;
++ /* Code that selects data_offset needs to see the generation update
++ * if reshape_progress has been set - so a memory barrier needed.
++ */
++ smp_mb();
++ if (mddev->reshape_backwards)
++ conf->reshape_progress = raid5_size(mddev, 0, 0);
++ else
++ conf->reshape_progress = 0;
++ conf->reshape_safe = conf->reshape_progress;
++ write_seqcount_end(&conf->gen_lock);
++ spin_unlock_irq(&conf->device_lock);
++
++ /* Now make sure any requests that proceeded on the assumption
++ * the reshape wasn't running - like Discard or Read - have
++ * completed.
++ */
++ mddev_suspend(mddev);
++ mddev_resume(mddev);
++
++ /* Add some new drives, as many as will fit.
++ * We know there are enough to make the newly sized array work.
++ * Don't add devices if we are reducing the number of
++ * devices in the array. This is because it is not possible
++ * to correctly record the "partially reconstructed" state of
++ * such devices during the reshape and confusion could result.
++ */
++ if (mddev->delta_disks >= 0) {
++ rdev_for_each(rdev, mddev)
++ if (rdev->raid_disk < 0 &&
++ !test_bit(Faulty, &rdev->flags)) {
++ if (raid5_add_disk(mddev, rdev) == 0) {
++ if (rdev->raid_disk
++ >= conf->previous_raid_disks)
++ set_bit(In_sync, &rdev->flags);
++ else
++ rdev->recovery_offset = 0;
++
++ if (sysfs_link_rdev(mddev, rdev))
++ /* Failure here is OK */;
++ }
++ } else if (rdev->raid_disk >= conf->previous_raid_disks
++ && !test_bit(Faulty, &rdev->flags)) {
++ /* This is a spare that was manually added */
++ set_bit(In_sync, &rdev->flags);
++ }
++
++ /* When a reshape changes the number of devices,
++ * ->degraded is measured against the larger of the
++ * pre and post number of devices.
++ */
++ spin_lock_irqsave(&conf->device_lock, flags);
++ mddev->degraded = calc_degraded(conf);
++ spin_unlock_irqrestore(&conf->device_lock, flags);
++ }
++ mddev->raid_disks = conf->raid_disks;
++ mddev->reshape_position = conf->reshape_progress;
++ set_bit(MD_CHANGE_DEVS, &mddev->flags);
++
++ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
++ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
++ set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
++ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
++ mddev->sync_thread = md_register_thread(md_do_sync, mddev,
++ "reshape");
++ if (!mddev->sync_thread) {
++ mddev->recovery = 0;
++ spin_lock_irq(&conf->device_lock);
++ write_seqcount_begin(&conf->gen_lock);
++ mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
++ mddev->new_chunk_sectors =
++ conf->chunk_sectors = conf->prev_chunk_sectors;
++ mddev->new_layout = conf->algorithm = conf->prev_algo;
++ rdev_for_each(rdev, mddev)
++ rdev->new_data_offset = rdev->data_offset;
++ smp_wmb();
++ conf->generation --;
++ conf->reshape_progress = MaxSector;
++ mddev->reshape_position = MaxSector;
++ write_seqcount_end(&conf->gen_lock);
++ spin_unlock_irq(&conf->device_lock);
++ return -EAGAIN;
++ }
++ conf->reshape_checkpoint = jiffies;
++ md_wakeup_thread(mddev->sync_thread);
++ md_new_event(mddev);
++ return 0;
++}
++
++/* This is called from the reshape thread and should make any
++ * changes needed in 'conf'
++ */
++static void end_reshape(struct r5conf *conf)
++{
++
++ if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
++ struct md_rdev *rdev;
++
++ spin_lock_irq(&conf->device_lock);
++ conf->previous_raid_disks = conf->raid_disks;
++ rdev_for_each(rdev, conf->mddev)
++ rdev->data_offset = rdev->new_data_offset;
++ smp_wmb();
++ conf->reshape_progress = MaxSector;
++ spin_unlock_irq(&conf->device_lock);
++ wake_up(&conf->wait_for_overlap);
++
++ /* read-ahead size must cover two whole stripes, which is
++ * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
++ */
++ if (conf->mddev->queue) {
++ int data_disks = conf->raid_disks - conf->max_degraded;
++ int stripe = data_disks * ((conf->chunk_sectors << 9)
++ / PAGE_SIZE);
++ if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
++ conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
++ }
++ }
++}
++
++/* This is called from the raid5d thread with mddev_lock held.
++ * It makes config changes to the device.
++ */
++static void raid5_finish_reshape(struct mddev *mddev)
++{
++ struct r5conf *conf = mddev->private;
++
++ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
++
++ if (mddev->delta_disks > 0) {
++ md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
++ set_capacity(mddev->gendisk, mddev->array_sectors);
++ revalidate_disk(mddev->gendisk);
++ } else {
++ int d;
++ spin_lock_irq(&conf->device_lock);
++ mddev->degraded = calc_degraded(conf);
++ spin_unlock_irq(&conf->device_lock);
++ for (d = conf->raid_disks ;
++ d < conf->raid_disks - mddev->delta_disks;
++ d++) {
++ struct md_rdev *rdev = conf->disks[d].rdev;
++ if (rdev)
++ clear_bit(In_sync, &rdev->flags);
++ rdev = conf->disks[d].replacement;
++ if (rdev)
++ clear_bit(In_sync, &rdev->flags);
++ }
++ }
++ mddev->layout = conf->algorithm;
++ mddev->chunk_sectors = conf->chunk_sectors;
++ mddev->reshape_position = MaxSector;
++ mddev->delta_disks = 0;
++ mddev->reshape_backwards = 0;
++ }
++}
++
++static void raid5_quiesce(struct mddev *mddev, int state)
++{
++ struct r5conf *conf = mddev->private;
++
++ switch(state) {
++ case 2: /* resume for a suspend */
++ wake_up(&conf->wait_for_overlap);
++ break;
++
++ case 1: /* stop all writes */
++ lock_all_device_hash_locks_irq(conf);
++ /* '2' tells resync/reshape to pause so that all
++ * active stripes can drain
++ */
++ conf->quiesce = 2;
++ wait_event_cmd(conf->wait_for_stripe,
++ atomic_read(&conf->active_stripes) == 0 &&
++ atomic_read(&conf->active_aligned_reads) == 0,
++ unlock_all_device_hash_locks_irq(conf),
++ lock_all_device_hash_locks_irq(conf));
++ conf->quiesce = 1;
++ unlock_all_device_hash_locks_irq(conf);
++ /* allow reshape to continue */
++ wake_up(&conf->wait_for_overlap);
++ break;
++
++ case 0: /* re-enable writes */
++ lock_all_device_hash_locks_irq(conf);
++ conf->quiesce = 0;
++ wake_up(&conf->wait_for_stripe);
++ wake_up(&conf->wait_for_overlap);
++ unlock_all_device_hash_locks_irq(conf);
++ break;
++ }
++}
++
++static void *raid45_takeover_raid0(struct mddev *mddev, int level)
++{
++ struct r0conf *raid0_conf = mddev->private;
++ sector_t sectors;
++
++ /* for raid0 takeover only one zone is supported */
++ if (raid0_conf->nr_strip_zones > 1) {
++ printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
++ mdname(mddev));
++ return ERR_PTR(-EINVAL);
++ }
++
++ sectors = raid0_conf->strip_zone[0].zone_end;
++ sector_div(sectors, raid0_conf->strip_zone[0].nb_dev);
++ mddev->dev_sectors = sectors;
++ mddev->new_level = level;
++ mddev->new_layout = ALGORITHM_PARITY_N;
++ mddev->new_chunk_sectors = mddev->chunk_sectors;
++ mddev->raid_disks += 1;
++ mddev->delta_disks = 1;
++ /* make sure it will be not marked as dirty */
++ mddev->recovery_cp = MaxSector;
++
++ return setup_conf(mddev);
++}
++
++static void *raid5_takeover_raid1(struct mddev *mddev)
++{
++ int chunksect;
++
++ if (mddev->raid_disks != 2 ||
++ mddev->degraded > 1)
++ return ERR_PTR(-EINVAL);
++
++ /* Should check if there are write-behind devices? */
++
++ chunksect = 64*2; /* 64K by default */
++
++ /* The array must be an exact multiple of chunksize */
++ while (chunksect && (mddev->array_sectors & (chunksect-1)))
++ chunksect >>= 1;
++
++ if ((chunksect<<9) < STRIPE_SIZE)
++ /* array size does not allow a suitable chunk size */
++ return ERR_PTR(-EINVAL);
++
++ mddev->new_level = 5;
++ mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
++ mddev->new_chunk_sectors = chunksect;
++
++ return setup_conf(mddev);
++}
++
++static void *raid5_takeover_raid6(struct mddev *mddev)
++{
++ int new_layout;
++
++ switch (mddev->layout) {
++ case ALGORITHM_LEFT_ASYMMETRIC_6:
++ new_layout = ALGORITHM_LEFT_ASYMMETRIC;
++ break;
++ case ALGORITHM_RIGHT_ASYMMETRIC_6:
++ new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC_6:
++ new_layout = ALGORITHM_LEFT_SYMMETRIC;
++ break;
++ case ALGORITHM_RIGHT_SYMMETRIC_6:
++ new_layout = ALGORITHM_RIGHT_SYMMETRIC;
++ break;
++ case ALGORITHM_PARITY_0_6:
++ new_layout = ALGORITHM_PARITY_0;
++ break;
++ case ALGORITHM_PARITY_N:
++ new_layout = ALGORITHM_PARITY_N;
++ break;
++ default:
++ return ERR_PTR(-EINVAL);
++ }
++ mddev->new_level = 5;
++ mddev->new_layout = new_layout;
++ mddev->delta_disks = -1;
++ mddev->raid_disks -= 1;
++ return setup_conf(mddev);
++}
++
++static int raid5_check_reshape(struct mddev *mddev)
++{
++ /* For a 2-drive array, the layout and chunk size can be changed
++ * immediately as not restriping is needed.
++ * For larger arrays we record the new value - after validation
++ * to be used by a reshape pass.
++ */
++ struct r5conf *conf = mddev->private;
++ int new_chunk = mddev->new_chunk_sectors;
++
++ if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
++ return -EINVAL;
++ if (new_chunk > 0) {
++ if (!is_power_of_2(new_chunk))
++ return -EINVAL;
++ if (new_chunk < (PAGE_SIZE>>9))
++ return -EINVAL;
++ if (mddev->array_sectors & (new_chunk-1))
++ /* not factor of array size */
++ return -EINVAL;
++ }
++
++ /* They look valid */
++
++ if (mddev->raid_disks == 2) {
++ /* can make the change immediately */
++ if (mddev->new_layout >= 0) {
++ conf->algorithm = mddev->new_layout;
++ mddev->layout = mddev->new_layout;
++ }
++ if (new_chunk > 0) {
++ conf->chunk_sectors = new_chunk ;
++ mddev->chunk_sectors = new_chunk;
++ }
++ set_bit(MD_CHANGE_DEVS, &mddev->flags);
++ md_wakeup_thread(mddev->thread);
++ }
++ return check_reshape(mddev);
++}
++
++static int raid6_check_reshape(struct mddev *mddev)
++{
++ int new_chunk = mddev->new_chunk_sectors;
++
++ if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
++ return -EINVAL;
++ if (new_chunk > 0) {
++ if (!is_power_of_2(new_chunk))
++ return -EINVAL;
++ if (new_chunk < (PAGE_SIZE >> 9))
++ return -EINVAL;
++ if (mddev->array_sectors & (new_chunk-1))
++ /* not factor of array size */
++ return -EINVAL;
++ }
++
++ /* They look valid */
++ return check_reshape(mddev);
++}
++
++static void *raid5_takeover(struct mddev *mddev)
++{
++ /* raid5 can take over:
++ * raid0 - if there is only one strip zone - make it a raid4 layout
++ * raid1 - if there are two drives. We need to know the chunk size
++ * raid4 - trivial - just use a raid4 layout.
++ * raid6 - Providing it is a *_6 layout
++ */
++ if (mddev->level == 0)
++ return raid45_takeover_raid0(mddev, 5);
++ if (mddev->level == 1)
++ return raid5_takeover_raid1(mddev);
++ if (mddev->level == 4) {
++ mddev->new_layout = ALGORITHM_PARITY_N;
++ mddev->new_level = 5;
++ return setup_conf(mddev);
++ }
++ if (mddev->level == 6)
++ return raid5_takeover_raid6(mddev);
++
++ return ERR_PTR(-EINVAL);
++}
++
++static void *raid4_takeover(struct mddev *mddev)
++{
++ /* raid4 can take over:
++ * raid0 - if there is only one strip zone
++ * raid5 - if layout is right
++ */
++ if (mddev->level == 0)
++ return raid45_takeover_raid0(mddev, 4);
++ if (mddev->level == 5 &&
++ mddev->layout == ALGORITHM_PARITY_N) {
++ mddev->new_layout = 0;
++ mddev->new_level = 4;
++ return setup_conf(mddev);
++ }
++ return ERR_PTR(-EINVAL);
++}
++
++static struct md_personality raid5_personality;
++
++static void *raid6_takeover(struct mddev *mddev)
++{
++ /* Currently can only take over a raid5. We map the
++ * personality to an equivalent raid6 personality
++ * with the Q block at the end.
++ */
++ int new_layout;
++
++ if (mddev->pers != &raid5_personality)
++ return ERR_PTR(-EINVAL);
++ if (mddev->degraded > 1)
++ return ERR_PTR(-EINVAL);
++ if (mddev->raid_disks > 253)
++ return ERR_PTR(-EINVAL);
++ if (mddev->raid_disks < 3)
++ return ERR_PTR(-EINVAL);
++
++ switch (mddev->layout) {
++ case ALGORITHM_LEFT_ASYMMETRIC:
++ new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
++ break;
++ case ALGORITHM_RIGHT_ASYMMETRIC:
++ new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
++ break;
++ case ALGORITHM_LEFT_SYMMETRIC:
++ new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
++ break;
++ case ALGORITHM_RIGHT_SYMMETRIC:
++ new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
++ break;
++ case ALGORITHM_PARITY_0:
++ new_layout = ALGORITHM_PARITY_0_6;
++ break;
++ case ALGORITHM_PARITY_N:
++ new_layout = ALGORITHM_PARITY_N;
++ break;
++ default:
++ return ERR_PTR(-EINVAL);
++ }
++ mddev->new_level = 6;
++ mddev->new_layout = new_layout;
++ mddev->delta_disks = 1;
++ mddev->raid_disks += 1;
++ return setup_conf(mddev);
++}
++
++static struct md_personality raid6_personality =
++{
++ .name = "raid6",
++ .level = 6,
++ .owner = THIS_MODULE,
++ .make_request = make_request,
++ .run = run,
++ .stop = stop,
++ .status = status,
++ .error_handler = error,
++ .hot_add_disk = raid5_add_disk,
++ .hot_remove_disk= raid5_remove_disk,
++ .spare_active = raid5_spare_active,
++ .sync_request = sync_request,
++ .resize = raid5_resize,
++ .size = raid5_size,
++ .check_reshape = raid6_check_reshape,
++ .start_reshape = raid5_start_reshape,
++ .finish_reshape = raid5_finish_reshape,
++ .quiesce = raid5_quiesce,
++ .takeover = raid6_takeover,
++};
++static struct md_personality raid5_personality =
++{
++ .name = "raid5",
++ .level = 5,
++ .owner = THIS_MODULE,
++ .make_request = make_request,
++ .run = run,
++ .stop = stop,
++ .status = status,
++ .error_handler = error,
++ .hot_add_disk = raid5_add_disk,
++ .hot_remove_disk= raid5_remove_disk,
++ .spare_active = raid5_spare_active,
++ .sync_request = sync_request,
++ .resize = raid5_resize,
++ .size = raid5_size,
++ .check_reshape = raid5_check_reshape,
++ .start_reshape = raid5_start_reshape,
++ .finish_reshape = raid5_finish_reshape,
++ .quiesce = raid5_quiesce,
++ .takeover = raid5_takeover,
++};
++
++static struct md_personality raid4_personality =
++{
++ .name = "raid4",
++ .level = 4,
++ .owner = THIS_MODULE,
++ .make_request = make_request,
++ .run = run,
++ .stop = stop,
++ .status = status,
++ .error_handler = error,
++ .hot_add_disk = raid5_add_disk,
++ .hot_remove_disk= raid5_remove_disk,
++ .spare_active = raid5_spare_active,
++ .sync_request = sync_request,
++ .resize = raid5_resize,
++ .size = raid5_size,
++ .check_reshape = raid5_check_reshape,
++ .start_reshape = raid5_start_reshape,
++ .finish_reshape = raid5_finish_reshape,
++ .quiesce = raid5_quiesce,
++ .takeover = raid4_takeover,
++};
++
++static int __init raid5_init(void)
++{
++ raid5_wq = alloc_workqueue("raid5wq",
++ WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
++ if (!raid5_wq)
++ return -ENOMEM;
++ register_md_personality(&raid6_personality);
++ register_md_personality(&raid5_personality);
++ register_md_personality(&raid4_personality);
++ return 0;
++}
++
++static void raid5_exit(void)
++{
++ unregister_md_personality(&raid6_personality);
++ unregister_md_personality(&raid5_personality);
++ unregister_md_personality(&raid4_personality);
++ destroy_workqueue(raid5_wq);
++}
++
++module_init(raid5_init);
++module_exit(raid5_exit);
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
++MODULE_ALIAS("md-personality-4"); /* RAID5 */
++MODULE_ALIAS("md-raid5");
++MODULE_ALIAS("md-raid4");
++MODULE_ALIAS("md-level-5");
++MODULE_ALIAS("md-level-4");
++MODULE_ALIAS("md-personality-8"); /* RAID6 */
++MODULE_ALIAS("md-raid6");
++MODULE_ALIAS("md-level-6");
++
++/* This used to be two separate modules, they were: */
++MODULE_ALIAS("raid5");
++MODULE_ALIAS("raid6");
+diff -Nur linux-3.18.9.orig/drivers/md/raid5.h linux-3.18.9/drivers/md/raid5.h
+--- linux-3.18.9.orig/drivers/md/raid5.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/md/raid5.h 2015-03-15 16:03:03.724094875 -0500
@@ -457,6 +457,7 @@
int recovery_disabled;
/* per cpu variables */
@@ -4624,9 +19600,9 @@ diff -Nur linux-3.18.8.orig/drivers/md/raid5.h linux-3.18.8/drivers/md/raid5.h
struct page *spare_page; /* Used when checking P/Q in raid6 */
void *scribble; /* space for constructing buffer
* lists and performing address
-diff -Nur linux-3.18.8.orig/drivers/misc/hwlat_detector.c linux-3.18.8/drivers/misc/hwlat_detector.c
---- linux-3.18.8.orig/drivers/misc/hwlat_detector.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/drivers/misc/hwlat_detector.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/misc/hwlat_detector.c linux-3.18.9/drivers/misc/hwlat_detector.c
+--- linux-3.18.9.orig/drivers/misc/hwlat_detector.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/drivers/misc/hwlat_detector.c 2015-03-15 16:03:03.724094875 -0500
@@ -0,0 +1,1240 @@
+/*
+ * hwlat_detector.c - A simple Hardware Latency detector.
@@ -5868,9 +20844,9 @@ diff -Nur linux-3.18.8.orig/drivers/misc/hwlat_detector.c linux-3.18.8/drivers/m
+
+module_init(detector_init);
+module_exit(detector_exit);
-diff -Nur linux-3.18.8.orig/drivers/misc/Kconfig linux-3.18.8/drivers/misc/Kconfig
---- linux-3.18.8.orig/drivers/misc/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/misc/Kconfig 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/misc/Kconfig linux-3.18.9/drivers/misc/Kconfig
+--- linux-3.18.9.orig/drivers/misc/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/misc/Kconfig 2015-03-15 16:03:03.724094875 -0500
@@ -54,6 +54,7 @@
config ATMEL_TCLIB
bool "Atmel AT32/AT91 Timer/Counter Library"
@@ -5941,9 +20917,9 @@ diff -Nur linux-3.18.8.orig/drivers/misc/Kconfig linux-3.18.8/drivers/misc/Kconf
config PHANTOM
tristate "Sensable PHANToM (PCI)"
depends on PCI
-diff -Nur linux-3.18.8.orig/drivers/misc/Makefile linux-3.18.8/drivers/misc/Makefile
---- linux-3.18.8.orig/drivers/misc/Makefile 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/misc/Makefile 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/misc/Makefile linux-3.18.9/drivers/misc/Makefile
+--- linux-3.18.9.orig/drivers/misc/Makefile 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/misc/Makefile 2015-03-15 16:03:03.724094875 -0500
@@ -38,6 +38,7 @@
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
@@ -5952,9 +20928,9 @@ diff -Nur linux-3.18.8.orig/drivers/misc/Makefile linux-3.18.8/drivers/misc/Make
obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
-diff -Nur linux-3.18.8.orig/drivers/mmc/host/mmci.c linux-3.18.8/drivers/mmc/host/mmci.c
---- linux-3.18.8.orig/drivers/mmc/host/mmci.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/mmc/host/mmci.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/mmc/host/mmci.c linux-3.18.9/drivers/mmc/host/mmci.c
+--- linux-3.18.9.orig/drivers/mmc/host/mmci.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/mmc/host/mmci.c 2015-03-15 16:03:03.732094876 -0500
@@ -1153,15 +1153,12 @@
struct sg_mapping_iter *sg_miter = &host->sg_miter;
struct variant_data *variant = host->variant;
@@ -5980,9 +20956,9 @@ diff -Nur linux-3.18.8.orig/drivers/mmc/host/mmci.c linux-3.18.8/drivers/mmc/hos
/*
* If we have less than the fifo 'half-full' threshold to transfer,
* trigger a PIO interrupt as soon as any data is available.
-diff -Nur linux-3.18.8.orig/drivers/net/ethernet/3com/3c59x.c linux-3.18.8/drivers/net/ethernet/3com/3c59x.c
---- linux-3.18.8.orig/drivers/net/ethernet/3com/3c59x.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/net/ethernet/3com/3c59x.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/net/ethernet/3com/3c59x.c linux-3.18.9/drivers/net/ethernet/3com/3c59x.c
+--- linux-3.18.9.orig/drivers/net/ethernet/3com/3c59x.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/net/ethernet/3com/3c59x.c 2015-03-15 16:03:03.732094876 -0500
@@ -842,9 +842,9 @@
{
struct vortex_private *vp = netdev_priv(dev);
@@ -6010,9 +20986,9 @@ diff -Nur linux-3.18.8.orig/drivers/net/ethernet/3com/3c59x.c linux-3.18.8/drive
}
}
-diff -Nur linux-3.18.8.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-3.18.8/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
---- linux-3.18.8.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-3.18.9/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+--- linux-3.18.9.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-03-15 16:03:03.732094876 -0500
@@ -2213,11 +2213,7 @@
}
@@ -6026,9 +21002,9 @@ diff -Nur linux-3.18.8.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linu
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
-diff -Nur linux-3.18.8.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-3.18.8/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
---- linux-3.18.8.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-3.18.9/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+--- linux-3.18.9.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-03-15 16:03:03.732094876 -0500
@@ -1880,8 +1880,7 @@
return NETDEV_TX_OK;
}
@@ -6039,9 +21015,9 @@ diff -Nur linux-3.18.8.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linu
if (atl1e_tpd_avail(adapter) < tpd_req) {
/* no enough descriptor, just stop queue */
-diff -Nur linux-3.18.8.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-3.18.8/drivers/net/ethernet/chelsio/cxgb/sge.c
---- linux-3.18.8.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-3.18.9/drivers/net/ethernet/chelsio/cxgb/sge.c
+--- linux-3.18.9.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-03-15 16:03:03.732094876 -0500
@@ -1663,8 +1663,7 @@
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
@@ -6052,9 +21028,9 @@ diff -Nur linux-3.18.8.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-3.18.8
reclaim_completed_tx(sge, q);
-diff -Nur linux-3.18.8.orig/drivers/net/ethernet/freescale/gianfar.c linux-3.18.8/drivers/net/ethernet/freescale/gianfar.c
---- linux-3.18.8.orig/drivers/net/ethernet/freescale/gianfar.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/net/ethernet/freescale/gianfar.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/net/ethernet/freescale/gianfar.c linux-3.18.9/drivers/net/ethernet/freescale/gianfar.c
+--- linux-3.18.9.orig/drivers/net/ethernet/freescale/gianfar.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/net/ethernet/freescale/gianfar.c 2015-03-15 16:03:03.732094876 -0500
@@ -1483,7 +1483,7 @@
if (netif_running(ndev)) {
@@ -6108,9 +21084,9 @@ diff -Nur linux-3.18.8.orig/drivers/net/ethernet/freescale/gianfar.c linux-3.18.
}
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
-diff -Nur linux-3.18.8.orig/drivers/net/ethernet/neterion/s2io.c linux-3.18.8/drivers/net/ethernet/neterion/s2io.c
---- linux-3.18.8.orig/drivers/net/ethernet/neterion/s2io.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/net/ethernet/neterion/s2io.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/net/ethernet/neterion/s2io.c linux-3.18.9/drivers/net/ethernet/neterion/s2io.c
+--- linux-3.18.9.orig/drivers/net/ethernet/neterion/s2io.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/net/ethernet/neterion/s2io.c 2015-03-15 16:03:03.736094876 -0500
@@ -4084,12 +4084,7 @@
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
@@ -6125,9 +21101,9 @@ diff -Nur linux-3.18.8.orig/drivers/net/ethernet/neterion/s2io.c linux-3.18.8/dr
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
-diff -Nur linux-3.18.8.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-3.18.8/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
---- linux-3.18.8.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-3.18.9/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+--- linux-3.18.9.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-03-15 16:03:03.736094876 -0500
@@ -2137,10 +2137,8 @@
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
unsigned long flags;
@@ -6141,9 +21117,9 @@ diff -Nur linux-3.18.8.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
-diff -Nur linux-3.18.8.orig/drivers/net/ethernet/realtek/8139too.c linux-3.18.8/drivers/net/ethernet/realtek/8139too.c
---- linux-3.18.8.orig/drivers/net/ethernet/realtek/8139too.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/net/ethernet/realtek/8139too.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/net/ethernet/realtek/8139too.c linux-3.18.9/drivers/net/ethernet/realtek/8139too.c
+--- linux-3.18.9.orig/drivers/net/ethernet/realtek/8139too.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/net/ethernet/realtek/8139too.c 2015-03-15 16:03:03.736094876 -0500
@@ -2215,7 +2215,7 @@
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
@@ -6153,9 +21129,9 @@ diff -Nur linux-3.18.8.orig/drivers/net/ethernet/realtek/8139too.c linux-3.18.8/
rtl8139_interrupt(irq, dev);
enable_irq(irq);
}
-diff -Nur linux-3.18.8.orig/drivers/net/ethernet/tehuti/tehuti.c linux-3.18.8/drivers/net/ethernet/tehuti/tehuti.c
---- linux-3.18.8.orig/drivers/net/ethernet/tehuti/tehuti.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/net/ethernet/tehuti/tehuti.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/net/ethernet/tehuti/tehuti.c linux-3.18.9/drivers/net/ethernet/tehuti/tehuti.c
+--- linux-3.18.9.orig/drivers/net/ethernet/tehuti/tehuti.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/net/ethernet/tehuti/tehuti.c 2015-03-15 16:03:03.744094876 -0500
@@ -1629,13 +1629,8 @@
unsigned long flags;
@@ -6172,9 +21148,9 @@ diff -Nur linux-3.18.8.orig/drivers/net/ethernet/tehuti/tehuti.c linux-3.18.8/dr
/* build tx descriptor */
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
-diff -Nur linux-3.18.8.orig/drivers/net/rionet.c linux-3.18.8/drivers/net/rionet.c
---- linux-3.18.8.orig/drivers/net/rionet.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/net/rionet.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/net/rionet.c linux-3.18.9/drivers/net/rionet.c
+--- linux-3.18.9.orig/drivers/net/rionet.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/net/rionet.c 2015-03-15 16:03:03.744094876 -0500
@@ -174,11 +174,7 @@
unsigned long flags;
int add_num = 1;
@@ -6188,9 +21164,9 @@ diff -Nur linux-3.18.8.orig/drivers/net/rionet.c linux-3.18.8/drivers/net/rionet
if (is_multicast_ether_addr(eth->h_dest))
add_num = nets[rnet->mport->id].nact;
-diff -Nur linux-3.18.8.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-3.18.8/drivers/net/wireless/orinoco/orinoco_usb.c
---- linux-3.18.8.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/net/wireless/orinoco/orinoco_usb.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-3.18.9/drivers/net/wireless/orinoco/orinoco_usb.c
+--- linux-3.18.9.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/net/wireless/orinoco/orinoco_usb.c 2015-03-15 16:03:03.744094876 -0500
@@ -699,7 +699,7 @@
while (!ctx->done.done && msecs--)
udelay(1000);
@@ -6200,9 +21176,9 @@ diff -Nur linux-3.18.8.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-3.1
ctx->done.done);
}
break;
-diff -Nur linux-3.18.8.orig/drivers/pci/access.c linux-3.18.8/drivers/pci/access.c
---- linux-3.18.8.orig/drivers/pci/access.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/pci/access.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/pci/access.c linux-3.18.9/drivers/pci/access.c
+--- linux-3.18.9.orig/drivers/pci/access.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/pci/access.c 2015-03-15 16:03:03.744094876 -0500
@@ -434,7 +434,7 @@
WARN_ON(!dev->block_cfg_access);
@@ -6212,9 +21188,9 @@ diff -Nur linux-3.18.8.orig/drivers/pci/access.c linux-3.18.8/drivers/pci/access
raw_spin_unlock_irqrestore(&pci_lock, flags);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
-diff -Nur linux-3.18.8.orig/drivers/scsi/fcoe/fcoe.c linux-3.18.8/drivers/scsi/fcoe/fcoe.c
---- linux-3.18.8.orig/drivers/scsi/fcoe/fcoe.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/scsi/fcoe/fcoe.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/scsi/fcoe/fcoe.c linux-3.18.9/drivers/scsi/fcoe/fcoe.c
+--- linux-3.18.9.orig/drivers/scsi/fcoe/fcoe.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/scsi/fcoe/fcoe.c 2015-03-15 16:03:03.744094876 -0500
@@ -1286,7 +1286,7 @@
struct sk_buff *skb;
#ifdef CONFIG_SMP
@@ -6278,9 +21254,9 @@ diff -Nur linux-3.18.8.orig/drivers/scsi/fcoe/fcoe.c linux-3.18.8/drivers/scsi/f
kfree_skb(skb);
}
-diff -Nur linux-3.18.8.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.18.8/drivers/scsi/fcoe/fcoe_ctlr.c
---- linux-3.18.8.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/scsi/fcoe/fcoe_ctlr.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.18.9/drivers/scsi/fcoe/fcoe_ctlr.c
+--- linux-3.18.9.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/scsi/fcoe/fcoe_ctlr.c 2015-03-15 16:03:03.744094876 -0500
@@ -831,7 +831,7 @@
INIT_LIST_HEAD(&del_list);
@@ -6299,9 +21275,9 @@ diff -Nur linux-3.18.8.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.18.8/drivers/s
list_for_each_entry_safe(fcf, next, &del_list, list) {
/* Removes fcf from current list */
-diff -Nur linux-3.18.8.orig/drivers/scsi/libfc/fc_exch.c linux-3.18.8/drivers/scsi/libfc/fc_exch.c
---- linux-3.18.8.orig/drivers/scsi/libfc/fc_exch.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/scsi/libfc/fc_exch.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/scsi/libfc/fc_exch.c linux-3.18.9/drivers/scsi/libfc/fc_exch.c
+--- linux-3.18.9.orig/drivers/scsi/libfc/fc_exch.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/scsi/libfc/fc_exch.c 2015-03-15 16:03:03.744094876 -0500
@@ -816,10 +816,10 @@
}
memset(ep, 0, sizeof(*ep));
@@ -6315,9 +21291,9 @@ diff -Nur linux-3.18.8.orig/drivers/scsi/libfc/fc_exch.c linux-3.18.8/drivers/sc
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
-diff -Nur linux-3.18.8.orig/drivers/scsi/libsas/sas_ata.c linux-3.18.8/drivers/scsi/libsas/sas_ata.c
---- linux-3.18.8.orig/drivers/scsi/libsas/sas_ata.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/scsi/libsas/sas_ata.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/scsi/libsas/sas_ata.c linux-3.18.9/drivers/scsi/libsas/sas_ata.c
+--- linux-3.18.9.orig/drivers/scsi/libsas/sas_ata.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/scsi/libsas/sas_ata.c 2015-03-15 16:03:03.744094876 -0500
@@ -191,7 +191,7 @@
/* TODO: audit callers to ensure they are ready for qc_issue to
* unconditionally re-enable interrupts
@@ -6336,9 +21312,9 @@ diff -Nur linux-3.18.8.orig/drivers/scsi/libsas/sas_ata.c linux-3.18.8/drivers/s
return ret;
}
-diff -Nur linux-3.18.8.orig/drivers/scsi/qla2xxx/qla_inline.h linux-3.18.8/drivers/scsi/qla2xxx/qla_inline.h
---- linux-3.18.8.orig/drivers/scsi/qla2xxx/qla_inline.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/scsi/qla2xxx/qla_inline.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/scsi/qla2xxx/qla_inline.h linux-3.18.9/drivers/scsi/qla2xxx/qla_inline.h
+--- linux-3.18.9.orig/drivers/scsi/qla2xxx/qla_inline.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/scsi/qla2xxx/qla_inline.h 2015-03-15 16:03:03.744094876 -0500
@@ -59,12 +59,12 @@
{
unsigned long flags;
@@ -6354,9 +21330,9 @@ diff -Nur linux-3.18.8.orig/drivers/scsi/qla2xxx/qla_inline.h linux-3.18.8/drive
}
static inline uint8_t *
-diff -Nur linux-3.18.8.orig/drivers/tty/serial/8250/8250_core.c linux-3.18.8/drivers/tty/serial/8250/8250_core.c
---- linux-3.18.8.orig/drivers/tty/serial/8250/8250_core.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/tty/serial/8250/8250_core.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/tty/serial/8250/8250_core.c linux-3.18.9/drivers/tty/serial/8250/8250_core.c
+--- linux-3.18.9.orig/drivers/tty/serial/8250/8250_core.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/tty/serial/8250/8250_core.c 2015-03-15 16:03:03.784094876 -0500
@@ -37,6 +37,7 @@
#include <linux/nmi.h>
#include <linux/mutex.h>
@@ -6392,9 +21368,9 @@ diff -Nur linux-3.18.8.orig/drivers/tty/serial/8250/8250_core.c linux-3.18.8/dri
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
-diff -Nur linux-3.18.8.orig/drivers/tty/serial/amba-pl011.c linux-3.18.8/drivers/tty/serial/amba-pl011.c
---- linux-3.18.8.orig/drivers/tty/serial/amba-pl011.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/tty/serial/amba-pl011.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/tty/serial/amba-pl011.c linux-3.18.9/drivers/tty/serial/amba-pl011.c
+--- linux-3.18.9.orig/drivers/tty/serial/amba-pl011.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/tty/serial/amba-pl011.c 2015-03-15 16:03:03.784094876 -0500
@@ -1935,13 +1935,19 @@
clk_enable(uap->clk);
@@ -6428,9 +21404,9 @@ diff -Nur linux-3.18.8.orig/drivers/tty/serial/amba-pl011.c linux-3.18.8/drivers
clk_disable(uap->clk);
}
-diff -Nur linux-3.18.8.orig/drivers/tty/serial/omap-serial.c linux-3.18.8/drivers/tty/serial/omap-serial.c
---- linux-3.18.8.orig/drivers/tty/serial/omap-serial.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/tty/serial/omap-serial.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/tty/serial/omap-serial.c linux-3.18.9/drivers/tty/serial/omap-serial.c
+--- linux-3.18.9.orig/drivers/tty/serial/omap-serial.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/tty/serial/omap-serial.c 2015-03-15 16:03:03.784094876 -0500
@@ -1270,13 +1270,10 @@
pm_runtime_get_sync(up->dev);
@@ -6458,9 +21434,9 @@ diff -Nur linux-3.18.8.orig/drivers/tty/serial/omap-serial.c linux-3.18.8/driver
}
static int __init
-diff -Nur linux-3.18.8.orig/drivers/usb/core/hcd.c linux-3.18.8/drivers/usb/core/hcd.c
---- linux-3.18.8.orig/drivers/usb/core/hcd.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/usb/core/hcd.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/usb/core/hcd.c linux-3.18.9/drivers/usb/core/hcd.c
+--- linux-3.18.9.orig/drivers/usb/core/hcd.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/usb/core/hcd.c 2015-03-15 16:03:03.784094876 -0500
@@ -1681,9 +1681,9 @@
* and no one may trigger the above deadlock situation when
* running complete() in tasklet.
@@ -6473,9 +21449,9 @@ diff -Nur linux-3.18.8.orig/drivers/usb/core/hcd.c linux-3.18.8/drivers/usb/core
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
-diff -Nur linux-3.18.8.orig/drivers/usb/gadget/function/f_fs.c linux-3.18.8/drivers/usb/gadget/function/f_fs.c
---- linux-3.18.8.orig/drivers/usb/gadget/function/f_fs.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/usb/gadget/function/f_fs.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/usb/gadget/function/f_fs.c linux-3.18.9/drivers/usb/gadget/function/f_fs.c
+--- linux-3.18.9.orig/drivers/usb/gadget/function/f_fs.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/usb/gadget/function/f_fs.c 2015-03-15 16:03:03.784094876 -0500
@@ -1428,7 +1428,7 @@
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
@@ -6485,9 +21461,9 @@ diff -Nur linux-3.18.8.orig/drivers/usb/gadget/function/f_fs.c linux-3.18.8/driv
kfree(ffs->dev_name);
kfree(ffs);
}
-diff -Nur linux-3.18.8.orig/drivers/usb/gadget/legacy/inode.c linux-3.18.8/drivers/usb/gadget/legacy/inode.c
---- linux-3.18.8.orig/drivers/usb/gadget/legacy/inode.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/drivers/usb/gadget/legacy/inode.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/drivers/usb/gadget/legacy/inode.c linux-3.18.9/drivers/usb/gadget/legacy/inode.c
+--- linux-3.18.9.orig/drivers/usb/gadget/legacy/inode.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/drivers/usb/gadget/legacy/inode.c 2015-03-15 16:03:03.784094876 -0500
@@ -339,7 +339,7 @@
spin_unlock_irq (&epdata->dev->lock);
@@ -6506,9 +21482,9 @@ diff -Nur linux-3.18.8.orig/drivers/usb/gadget/legacy/inode.c linux-3.18.8/drive
if (epdata->status == -ECONNRESET)
epdata->status = -EINTR;
} else {
-diff -Nur linux-3.18.8.orig/fs/aio.c linux-3.18.8/fs/aio.c
---- linux-3.18.8.orig/fs/aio.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/fs/aio.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/fs/aio.c linux-3.18.9/fs/aio.c
+--- linux-3.18.9.orig/fs/aio.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/fs/aio.c 2015-03-15 16:03:03.788094876 -0500
@@ -40,6 +40,7 @@
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
@@ -6584,9 +21560,9 @@ diff -Nur linux-3.18.8.orig/fs/aio.c linux-3.18.8/fs/aio.c
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
{
unsigned i, new_nr;
-diff -Nur linux-3.18.8.orig/fs/autofs4/autofs_i.h linux-3.18.8/fs/autofs4/autofs_i.h
---- linux-3.18.8.orig/fs/autofs4/autofs_i.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/fs/autofs4/autofs_i.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/fs/autofs4/autofs_i.h linux-3.18.9/fs/autofs4/autofs_i.h
+--- linux-3.18.9.orig/fs/autofs4/autofs_i.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/fs/autofs4/autofs_i.h 2015-03-15 16:03:03.788094876 -0500
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
@@ -6595,9 +21571,9 @@ diff -Nur linux-3.18.8.orig/fs/autofs4/autofs_i.h linux-3.18.8/fs/autofs4/autofs
#include <asm/current.h>
#include <asm/uaccess.h>
-diff -Nur linux-3.18.8.orig/fs/autofs4/expire.c linux-3.18.8/fs/autofs4/expire.c
---- linux-3.18.8.orig/fs/autofs4/expire.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/fs/autofs4/expire.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/fs/autofs4/expire.c linux-3.18.9/fs/autofs4/expire.c
+--- linux-3.18.9.orig/fs/autofs4/expire.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/fs/autofs4/expire.c 2015-03-15 16:03:03.788094876 -0500
@@ -151,7 +151,7 @@
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
@@ -6607,9 +21583,9 @@ diff -Nur linux-3.18.8.orig/fs/autofs4/expire.c linux-3.18.8/fs/autofs4/expire.c
goto relock;
}
spin_unlock(&p->d_lock);
-diff -Nur linux-3.18.8.orig/fs/buffer.c linux-3.18.8/fs/buffer.c
---- linux-3.18.8.orig/fs/buffer.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/fs/buffer.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/fs/buffer.c linux-3.18.9/fs/buffer.c
+--- linux-3.18.9.orig/fs/buffer.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/fs/buffer.c 2015-03-15 16:03:03.788094876 -0500
@@ -301,8 +301,7 @@
* decide that the page is now completely done.
*/
@@ -6677,9 +21653,9 @@ diff -Nur linux-3.18.8.orig/fs/buffer.c linux-3.18.8/fs/buffer.c
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
-diff -Nur linux-3.18.8.orig/fs/dcache.c linux-3.18.8/fs/dcache.c
---- linux-3.18.8.orig/fs/dcache.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/fs/dcache.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/fs/dcache.c linux-3.18.9/fs/dcache.c
+--- linux-3.18.9.orig/fs/dcache.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/fs/dcache.c 2015-03-15 16:03:03.788094876 -0500
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
@@ -6706,9 +21682,9 @@ diff -Nur linux-3.18.8.orig/fs/dcache.c linux-3.18.8/fs/dcache.c
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
-diff -Nur linux-3.18.8.orig/fs/eventpoll.c linux-3.18.8/fs/eventpoll.c
---- linux-3.18.8.orig/fs/eventpoll.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/fs/eventpoll.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/fs/eventpoll.c linux-3.18.9/fs/eventpoll.c
+--- linux-3.18.9.orig/fs/eventpoll.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/fs/eventpoll.c 2015-03-15 16:03:03.788094876 -0500
@@ -505,12 +505,12 @@
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
@@ -6724,9 +21700,9 @@ diff -Nur linux-3.18.8.orig/fs/eventpoll.c linux-3.18.8/fs/eventpoll.c
}
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
-diff -Nur linux-3.18.8.orig/fs/exec.c linux-3.18.8/fs/exec.c
---- linux-3.18.8.orig/fs/exec.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/fs/exec.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/fs/exec.c linux-3.18.9/fs/exec.c
+--- linux-3.18.9.orig/fs/exec.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/fs/exec.c 2015-03-15 16:03:03.788094876 -0500
@@ -841,12 +841,14 @@
}
}
@@ -6742,9 +21718,9 @@ diff -Nur linux-3.18.8.orig/fs/exec.c linux-3.18.8/fs/exec.c
task_unlock(tsk);
if (old_mm) {
up_read(&old_mm->mmap_sem);
-diff -Nur linux-3.18.8.orig/fs/jbd/checkpoint.c linux-3.18.8/fs/jbd/checkpoint.c
---- linux-3.18.8.orig/fs/jbd/checkpoint.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/fs/jbd/checkpoint.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/fs/jbd/checkpoint.c linux-3.18.9/fs/jbd/checkpoint.c
+--- linux-3.18.9.orig/fs/jbd/checkpoint.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/fs/jbd/checkpoint.c 2015-03-15 16:03:03.788094876 -0500
@@ -129,6 +129,8 @@
if (journal->j_flags & JFS_ABORT)
return;
@@ -6754,9 +21730,9 @@ diff -Nur linux-3.18.8.orig/fs/jbd/checkpoint.c linux-3.18.8/fs/jbd/checkpoint.c
mutex_lock(&journal->j_checkpoint_mutex);
/*
-diff -Nur linux-3.18.8.orig/fs/jbd2/checkpoint.c linux-3.18.8/fs/jbd2/checkpoint.c
---- linux-3.18.8.orig/fs/jbd2/checkpoint.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/fs/jbd2/checkpoint.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/fs/jbd2/checkpoint.c linux-3.18.9/fs/jbd2/checkpoint.c
+--- linux-3.18.9.orig/fs/jbd2/checkpoint.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/fs/jbd2/checkpoint.c 2015-03-15 16:03:03.788094876 -0500
@@ -116,6 +116,8 @@
nblocks = jbd2_space_needed(journal);
while (jbd2_log_space_left(journal) < nblocks) {
@@ -6766,9 +21742,9 @@ diff -Nur linux-3.18.8.orig/fs/jbd2/checkpoint.c linux-3.18.8/fs/jbd2/checkpoint
mutex_lock(&journal->j_checkpoint_mutex);
/*
-diff -Nur linux-3.18.8.orig/fs/namespace.c linux-3.18.8/fs/namespace.c
---- linux-3.18.8.orig/fs/namespace.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/fs/namespace.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/fs/namespace.c linux-3.18.9/fs/namespace.c
+--- linux-3.18.9.orig/fs/namespace.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/fs/namespace.c 2015-03-15 16:03:03.788094876 -0500
@@ -14,6 +14,7 @@
#include <linux/mnt_namespace.h>
#include <linux/user_namespace.h>
@@ -6791,9 +21767,9 @@ diff -Nur linux-3.18.8.orig/fs/namespace.c linux-3.18.8/fs/namespace.c
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until
-diff -Nur linux-3.18.8.orig/fs/ntfs/aops.c linux-3.18.8/fs/ntfs/aops.c
---- linux-3.18.8.orig/fs/ntfs/aops.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/fs/ntfs/aops.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/fs/ntfs/aops.c linux-3.18.9/fs/ntfs/aops.c
+--- linux-3.18.9.orig/fs/ntfs/aops.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/fs/ntfs/aops.c 2015-03-15 16:03:03.788094876 -0500
@@ -107,8 +107,7 @@
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
@@ -6841,9 +21817,9 @@ diff -Nur linux-3.18.8.orig/fs/ntfs/aops.c linux-3.18.8/fs/ntfs/aops.c
}
/**
-diff -Nur linux-3.18.8.orig/fs/timerfd.c linux-3.18.8/fs/timerfd.c
---- linux-3.18.8.orig/fs/timerfd.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/fs/timerfd.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/fs/timerfd.c linux-3.18.9/fs/timerfd.c
+--- linux-3.18.9.orig/fs/timerfd.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/fs/timerfd.c 2015-03-15 16:03:03.788094876 -0500
@@ -449,7 +449,10 @@
break;
}
@@ -6856,9 +21832,9 @@ diff -Nur linux-3.18.8.orig/fs/timerfd.c linux-3.18.8/fs/timerfd.c
}
/*
-diff -Nur linux-3.18.8.orig/include/acpi/platform/aclinux.h linux-3.18.8/include/acpi/platform/aclinux.h
---- linux-3.18.8.orig/include/acpi/platform/aclinux.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/acpi/platform/aclinux.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/acpi/platform/aclinux.h linux-3.18.9/include/acpi/platform/aclinux.h
+--- linux-3.18.9.orig/include/acpi/platform/aclinux.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/acpi/platform/aclinux.h 2015-03-15 16:03:03.788094876 -0500
@@ -123,6 +123,7 @@
#define acpi_cache_t struct kmem_cache
@@ -6888,9 +21864,9 @@ diff -Nur linux-3.18.8.orig/include/acpi/platform/aclinux.h linux-3.18.8/include
/*
* OSL interfaces used by debugger/disassembler
*/
-diff -Nur linux-3.18.8.orig/include/asm-generic/bug.h linux-3.18.8/include/asm-generic/bug.h
---- linux-3.18.8.orig/include/asm-generic/bug.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/asm-generic/bug.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/asm-generic/bug.h linux-3.18.9/include/asm-generic/bug.h
+--- linux-3.18.9.orig/include/asm-generic/bug.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/asm-generic/bug.h 2015-03-15 16:03:03.792094875 -0500
@@ -206,6 +206,20 @@
# define WARN_ON_SMP(x) ({0;})
#endif
@@ -6912,9 +21888,9 @@ diff -Nur linux-3.18.8.orig/include/asm-generic/bug.h linux-3.18.8/include/asm-g
#endif /* __ASSEMBLY__ */
#endif
-diff -Nur linux-3.18.8.orig/include/linux/blkdev.h linux-3.18.8/include/linux/blkdev.h
---- linux-3.18.8.orig/include/linux/blkdev.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/blkdev.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/blkdev.h linux-3.18.9/include/linux/blkdev.h
+--- linux-3.18.9.orig/include/linux/blkdev.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/blkdev.h 2015-03-15 16:03:03.792094875 -0500
@@ -101,6 +101,7 @@
struct list_head queuelist;
union {
@@ -6932,9 +21908,9 @@ diff -Nur linux-3.18.8.orig/include/linux/blkdev.h linux-3.18.8/include/linux/bl
struct percpu_ref mq_usage_counter;
struct list_head all_q_node;
-diff -Nur linux-3.18.8.orig/include/linux/blk-mq.h linux-3.18.8/include/linux/blk-mq.h
---- linux-3.18.8.orig/include/linux/blk-mq.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/blk-mq.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/blk-mq.h linux-3.18.9/include/linux/blk-mq.h
+--- linux-3.18.9.orig/include/linux/blk-mq.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/blk-mq.h 2015-03-15 16:03:03.792094875 -0500
@@ -169,6 +169,7 @@
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
@@ -6943,9 +21919,9 @@ diff -Nur linux-3.18.8.orig/include/linux/blk-mq.h linux-3.18.8/include/linux/bl
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, int error);
-diff -Nur linux-3.18.8.orig/include/linux/bottom_half.h linux-3.18.8/include/linux/bottom_half.h
---- linux-3.18.8.orig/include/linux/bottom_half.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/bottom_half.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/bottom_half.h linux-3.18.9/include/linux/bottom_half.h
+--- linux-3.18.9.orig/include/linux/bottom_half.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/bottom_half.h 2015-03-15 16:03:03.792094875 -0500
@@ -4,6 +4,17 @@
#include <linux/preempt.h>
#include <linux/preempt_mask.h>
@@ -6971,9 +21947,9 @@ diff -Nur linux-3.18.8.orig/include/linux/bottom_half.h linux-3.18.8/include/lin
+#endif
#endif /* _LINUX_BH_H */
-diff -Nur linux-3.18.8.orig/include/linux/buffer_head.h linux-3.18.8/include/linux/buffer_head.h
---- linux-3.18.8.orig/include/linux/buffer_head.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/buffer_head.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/buffer_head.h linux-3.18.9/include/linux/buffer_head.h
+--- linux-3.18.9.orig/include/linux/buffer_head.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/buffer_head.h 2015-03-15 16:03:03.792094875 -0500
@@ -75,8 +75,52 @@
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
@@ -7027,9 +22003,9 @@ diff -Nur linux-3.18.8.orig/include/linux/buffer_head.h linux-3.18.8/include/lin
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
-diff -Nur linux-3.18.8.orig/include/linux/cgroup.h linux-3.18.8/include/linux/cgroup.h
---- linux-3.18.8.orig/include/linux/cgroup.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/cgroup.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/cgroup.h linux-3.18.9/include/linux/cgroup.h
+--- linux-3.18.9.orig/include/linux/cgroup.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/cgroup.h 2015-03-15 16:03:03.792094875 -0500
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/kernfs.h>
@@ -7046,9 +22022,9 @@ diff -Nur linux-3.18.8.orig/include/linux/cgroup.h linux-3.18.8/include/linux/cg
};
/* bits in struct cgroup_subsys_state flags field */
-diff -Nur linux-3.18.8.orig/include/linux/completion.h linux-3.18.8/include/linux/completion.h
---- linux-3.18.8.orig/include/linux/completion.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/completion.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/completion.h linux-3.18.9/include/linux/completion.h
+--- linux-3.18.9.orig/include/linux/completion.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/completion.h 2015-03-15 16:03:03.792094875 -0500
@@ -7,8 +7,7 @@
* Atomic wait-for-completion handler data structures.
* See kernel/sched/completion.c for details.
@@ -7082,9 +22058,9 @@ diff -Nur linux-3.18.8.orig/include/linux/completion.h linux-3.18.8/include/linu
}
/**
-diff -Nur linux-3.18.8.orig/include/linux/cpu.h linux-3.18.8/include/linux/cpu.h
---- linux-3.18.8.orig/include/linux/cpu.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/cpu.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/cpu.h linux-3.18.9/include/linux/cpu.h
+--- linux-3.18.9.orig/include/linux/cpu.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/cpu.h 2015-03-15 16:03:03.792094875 -0500
@@ -217,6 +217,8 @@
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
@@ -7103,9 +22079,9 @@ diff -Nur linux-3.18.8.orig/include/linux/cpu.h linux-3.18.8/include/linux/cpu.h
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
-diff -Nur linux-3.18.8.orig/include/linux/delay.h linux-3.18.8/include/linux/delay.h
---- linux-3.18.8.orig/include/linux/delay.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/delay.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/delay.h linux-3.18.9/include/linux/delay.h
+--- linux-3.18.9.orig/include/linux/delay.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/delay.h 2015-03-15 16:03:03.792094875 -0500
@@ -52,4 +52,10 @@
msleep(seconds * 1000);
}
@@ -7117,9 +22093,9 @@ diff -Nur linux-3.18.8.orig/include/linux/delay.h linux-3.18.8/include/linux/del
+#endif
+
#endif /* defined(_LINUX_DELAY_H) */
-diff -Nur linux-3.18.8.orig/include/linux/ftrace_event.h linux-3.18.8/include/linux/ftrace_event.h
---- linux-3.18.8.orig/include/linux/ftrace_event.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/ftrace_event.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/ftrace_event.h linux-3.18.9/include/linux/ftrace_event.h
+--- linux-3.18.9.orig/include/linux/ftrace_event.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/ftrace_event.h 2015-03-15 16:03:03.804094874 -0500
@@ -61,6 +61,9 @@
unsigned char flags;
unsigned char preempt_count;
@@ -7130,9 +22106,9 @@ diff -Nur linux-3.18.8.orig/include/linux/ftrace_event.h linux-3.18.8/include/li
};
#define FTRACE_MAX_EVENT \
-diff -Nur linux-3.18.8.orig/include/linux/highmem.h linux-3.18.8/include/linux/highmem.h
---- linux-3.18.8.orig/include/linux/highmem.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/highmem.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/highmem.h linux-3.18.9/include/linux/highmem.h
+--- linux-3.18.9.orig/include/linux/highmem.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/highmem.h 2015-03-15 16:03:03.804094874 -0500
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
@@ -7197,9 +22173,9 @@ diff -Nur linux-3.18.8.orig/include/linux/highmem.h linux-3.18.8/include/linux/h
#endif
}
-diff -Nur linux-3.18.8.orig/include/linux/hrtimer.h linux-3.18.8/include/linux/hrtimer.h
---- linux-3.18.8.orig/include/linux/hrtimer.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/hrtimer.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/hrtimer.h linux-3.18.9/include/linux/hrtimer.h
+--- linux-3.18.9.orig/include/linux/hrtimer.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/hrtimer.h 2015-03-15 16:03:03.804094874 -0500
@@ -111,6 +111,11 @@
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
@@ -7244,9 +22220,9 @@ diff -Nur linux-3.18.8.orig/include/linux/hrtimer.h linux-3.18.8/include/linux/h
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
-diff -Nur linux-3.18.8.orig/include/linux/idr.h linux-3.18.8/include/linux/idr.h
---- linux-3.18.8.orig/include/linux/idr.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/idr.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/idr.h linux-3.18.9/include/linux/idr.h
+--- linux-3.18.9.orig/include/linux/idr.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/idr.h 2015-03-15 16:03:03.804094874 -0500
@@ -95,10 +95,14 @@
* Each idr_preload() should be matched with an invocation of this
* function. See idr_preload() for details.
@@ -7262,9 +22238,9 @@ diff -Nur linux-3.18.8.orig/include/linux/idr.h linux-3.18.8/include/linux/idr.h
/**
* idr_find - return pointer for given id
-diff -Nur linux-3.18.8.orig/include/linux/init_task.h linux-3.18.8/include/linux/init_task.h
---- linux-3.18.8.orig/include/linux/init_task.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/init_task.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/init_task.h linux-3.18.9/include/linux/init_task.h
+--- linux-3.18.9.orig/include/linux/init_task.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/init_task.h 2015-03-15 16:03:03.804094874 -0500
@@ -147,9 +147,16 @@
# define INIT_PERF_EVENTS(tsk)
#endif
@@ -7291,9 +22267,9 @@ diff -Nur linux-3.18.8.orig/include/linux/init_task.h linux-3.18.8/include/linux
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
-diff -Nur linux-3.18.8.orig/include/linux/interrupt.h linux-3.18.8/include/linux/interrupt.h
---- linux-3.18.8.orig/include/linux/interrupt.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/interrupt.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/interrupt.h linux-3.18.9/include/linux/interrupt.h
+--- linux-3.18.9.orig/include/linux/interrupt.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/interrupt.h 2015-03-15 16:03:03.804094874 -0500
@@ -57,6 +57,7 @@
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
@@ -7460,9 +22436,9 @@ diff -Nur linux-3.18.8.orig/include/linux/interrupt.h linux-3.18.8/include/linux
/*
* Autoprobing for irqs:
*
-diff -Nur linux-3.18.8.orig/include/linux/irqdesc.h linux-3.18.8/include/linux/irqdesc.h
---- linux-3.18.8.orig/include/linux/irqdesc.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/irqdesc.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/irqdesc.h linux-3.18.9/include/linux/irqdesc.h
+--- linux-3.18.9.orig/include/linux/irqdesc.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/irqdesc.h 2015-03-15 16:03:03.804094874 -0500
@@ -63,6 +63,7 @@
unsigned int irqs_unhandled;
atomic_t threads_handled;
@@ -7471,9 +22447,9 @@ diff -Nur linux-3.18.8.orig/include/linux/irqdesc.h linux-3.18.8/include/linux/i
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
-diff -Nur linux-3.18.8.orig/include/linux/irqflags.h linux-3.18.8/include/linux/irqflags.h
---- linux-3.18.8.orig/include/linux/irqflags.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/irqflags.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/irqflags.h linux-3.18.9/include/linux/irqflags.h
+--- linux-3.18.9.orig/include/linux/irqflags.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/irqflags.h 2015-03-15 16:03:03.804094874 -0500
@@ -25,8 +25,6 @@
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
@@ -7524,9 +22500,9 @@ diff -Nur linux-3.18.8.orig/include/linux/irqflags.h linux-3.18.8/include/linux/
+#endif
+
#endif
-diff -Nur linux-3.18.8.orig/include/linux/irq.h linux-3.18.8/include/linux/irq.h
---- linux-3.18.8.orig/include/linux/irq.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/irq.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/irq.h linux-3.18.9/include/linux/irq.h
+--- linux-3.18.9.orig/include/linux/irq.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/irq.h 2015-03-15 16:03:03.804094874 -0500
@@ -73,6 +73,7 @@
* IRQ_IS_POLLED - Always polled by another interrupt. Exclude
* it from the spurious interrupt detection
@@ -7551,9 +22527,9 @@ diff -Nur linux-3.18.8.orig/include/linux/irq.h linux-3.18.8/include/linux/irq.h
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
-diff -Nur linux-3.18.8.orig/include/linux/irq_work.h linux-3.18.8/include/linux/irq_work.h
---- linux-3.18.8.orig/include/linux/irq_work.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/irq_work.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/irq_work.h linux-3.18.9/include/linux/irq_work.h
+--- linux-3.18.9.orig/include/linux/irq_work.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/irq_work.h 2015-03-15 16:03:03.804094874 -0500
@@ -16,6 +16,7 @@
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
@@ -7562,9 +22538,9 @@ diff -Nur linux-3.18.8.orig/include/linux/irq_work.h linux-3.18.8/include/linux/
struct irq_work {
unsigned long flags;
-diff -Nur linux-3.18.8.orig/include/linux/jbd_common.h linux-3.18.8/include/linux/jbd_common.h
---- linux-3.18.8.orig/include/linux/jbd_common.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/jbd_common.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/jbd_common.h linux-3.18.9/include/linux/jbd_common.h
+--- linux-3.18.9.orig/include/linux/jbd_common.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/jbd_common.h 2015-03-15 16:03:03.804094874 -0500
@@ -15,32 +15,56 @@
static inline void jbd_lock_bh_state(struct buffer_head *bh)
@@ -7622,9 +22598,9 @@ diff -Nur linux-3.18.8.orig/include/linux/jbd_common.h linux-3.18.8/include/linu
}
#endif
-diff -Nur linux-3.18.8.orig/include/linux/jump_label.h linux-3.18.8/include/linux/jump_label.h
---- linux-3.18.8.orig/include/linux/jump_label.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/jump_label.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/jump_label.h linux-3.18.9/include/linux/jump_label.h
+--- linux-3.18.9.orig/include/linux/jump_label.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/jump_label.h 2015-03-15 16:03:03.804094874 -0500
@@ -55,7 +55,8 @@
"%s used before call to jump_label_init", \
__func__)
@@ -7635,9 +22611,9 @@ diff -Nur linux-3.18.8.orig/include/linux/jump_label.h linux-3.18.8/include/linu
struct static_key {
atomic_t enabled;
-diff -Nur linux-3.18.8.orig/include/linux/kdb.h linux-3.18.8/include/linux/kdb.h
---- linux-3.18.8.orig/include/linux/kdb.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/kdb.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/kdb.h linux-3.18.9/include/linux/kdb.h
+--- linux-3.18.9.orig/include/linux/kdb.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/kdb.h 2015-03-15 16:03:03.804094874 -0500
@@ -116,7 +116,7 @@
extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
extern __printf(1, 2) int kdb_printf(const char *, ...);
@@ -7655,9 +22631,9 @@ diff -Nur linux-3.18.8.orig/include/linux/kdb.h linux-3.18.8/include/linux/kdb.h
static inline void kdb_init(int level) {}
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
char *help, short minlen) { return 0; }
-diff -Nur linux-3.18.8.orig/include/linux/kernel.h linux-3.18.8/include/linux/kernel.h
---- linux-3.18.8.orig/include/linux/kernel.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/kernel.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/kernel.h linux-3.18.9/include/linux/kernel.h
+--- linux-3.18.9.orig/include/linux/kernel.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/kernel.h 2015-03-15 16:03:03.804094874 -0500
@@ -451,6 +451,7 @@
SYSTEM_HALT,
SYSTEM_POWER_OFF,
@@ -7666,9 +22642,9 @@ diff -Nur linux-3.18.8.orig/include/linux/kernel.h linux-3.18.8/include/linux/ke
} system_state;
#define TAINT_PROPRIETARY_MODULE 0
-diff -Nur linux-3.18.8.orig/include/linux/lglock.h linux-3.18.8/include/linux/lglock.h
---- linux-3.18.8.orig/include/linux/lglock.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/lglock.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/lglock.h linux-3.18.9/include/linux/lglock.h
+--- linux-3.18.9.orig/include/linux/lglock.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/lglock.h 2015-03-15 16:03:03.804094874 -0500
@@ -34,22 +34,39 @@
#endif
@@ -7724,9 +22700,9 @@ diff -Nur linux-3.18.8.orig/include/linux/lglock.h linux-3.18.8/include/linux/lg
#else
/* When !CONFIG_SMP, map lglock to spinlock */
#define lglock spinlock
-diff -Nur linux-3.18.8.orig/include/linux/list_bl.h linux-3.18.8/include/linux/list_bl.h
---- linux-3.18.8.orig/include/linux/list_bl.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/list_bl.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/list_bl.h linux-3.18.9/include/linux/list_bl.h
+--- linux-3.18.9.orig/include/linux/list_bl.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/list_bl.h 2015-03-15 16:03:03.808094874 -0500
@@ -2,6 +2,7 @@
#define _LINUX_LIST_BL_H
@@ -7787,9 +22763,9 @@ diff -Nur linux-3.18.8.orig/include/linux/list_bl.h linux-3.18.8/include/linux/l
}
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
-diff -Nur linux-3.18.8.orig/include/linux/locallock.h linux-3.18.8/include/linux/locallock.h
---- linux-3.18.8.orig/include/linux/locallock.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/linux/locallock.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/locallock.h linux-3.18.9/include/linux/locallock.h
+--- linux-3.18.9.orig/include/linux/locallock.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/linux/locallock.h 2015-03-15 16:03:03.808094874 -0500
@@ -0,0 +1,270 @@
+#ifndef _LINUX_LOCALLOCK_H
+#define _LINUX_LOCALLOCK_H
@@ -8061,9 +23037,9 @@ diff -Nur linux-3.18.8.orig/include/linux/locallock.h linux-3.18.8/include/linux
+#endif
+
+#endif
-diff -Nur linux-3.18.8.orig/include/linux/mm_types.h linux-3.18.8/include/linux/mm_types.h
---- linux-3.18.8.orig/include/linux/mm_types.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/mm_types.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/mm_types.h linux-3.18.9/include/linux/mm_types.h
+--- linux-3.18.9.orig/include/linux/mm_types.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/mm_types.h 2015-03-15 16:03:03.808094874 -0500
@@ -11,6 +11,7 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
@@ -8082,9 +23058,9 @@ diff -Nur linux-3.18.8.orig/include/linux/mm_types.h linux-3.18.8/include/linux/
};
static inline void mm_init_cpumask(struct mm_struct *mm)
-diff -Nur linux-3.18.8.orig/include/linux/mutex.h linux-3.18.8/include/linux/mutex.h
---- linux-3.18.8.orig/include/linux/mutex.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/mutex.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/mutex.h linux-3.18.9/include/linux/mutex.h
+--- linux-3.18.9.orig/include/linux/mutex.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/mutex.h 2015-03-15 16:03:03.808094874 -0500
@@ -19,6 +19,17 @@
#include <asm/processor.h>
#include <linux/osq_lock.h>
@@ -8126,9 +23102,9 @@ diff -Nur linux-3.18.8.orig/include/linux/mutex.h linux-3.18.8/include/linux/mut
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#endif /* __LINUX_MUTEX_H */
-diff -Nur linux-3.18.8.orig/include/linux/mutex_rt.h linux-3.18.8/include/linux/mutex_rt.h
---- linux-3.18.8.orig/include/linux/mutex_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/linux/mutex_rt.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/mutex_rt.h linux-3.18.9/include/linux/mutex_rt.h
+--- linux-3.18.9.orig/include/linux/mutex_rt.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/linux/mutex_rt.h 2015-03-15 16:03:03.808094874 -0500
@@ -0,0 +1,84 @@
+#ifndef __LINUX_MUTEX_RT_H
+#define __LINUX_MUTEX_RT_H
@@ -8214,9 +23190,9 @@ diff -Nur linux-3.18.8.orig/include/linux/mutex_rt.h linux-3.18.8/include/linux/
+} while (0)
+
+#endif
-diff -Nur linux-3.18.8.orig/include/linux/netdevice.h linux-3.18.8/include/linux/netdevice.h
---- linux-3.18.8.orig/include/linux/netdevice.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/netdevice.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/netdevice.h linux-3.18.9/include/linux/netdevice.h
+--- linux-3.18.9.orig/include/linux/netdevice.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/netdevice.h 2015-03-15 16:03:03.808094874 -0500
@@ -2345,6 +2345,7 @@
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
@@ -8225,9 +23201,9 @@ diff -Nur linux-3.18.8.orig/include/linux/netdevice.h linux-3.18.8/include/linux
#ifdef CONFIG_NET_FLOW_LIMIT
struct sd_flow_limit __rcu *flow_limit;
-diff -Nur linux-3.18.8.orig/include/linux/netfilter/x_tables.h linux-3.18.8/include/linux/netfilter/x_tables.h
---- linux-3.18.8.orig/include/linux/netfilter/x_tables.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/netfilter/x_tables.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/netfilter/x_tables.h linux-3.18.9/include/linux/netfilter/x_tables.h
+--- linux-3.18.9.orig/include/linux/netfilter/x_tables.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/netfilter/x_tables.h 2015-03-15 16:03:03.808094874 -0500
@@ -3,6 +3,7 @@
@@ -8263,9 +23239,9 @@ diff -Nur linux-3.18.8.orig/include/linux/netfilter/x_tables.h linux-3.18.8/incl
}
/*
-diff -Nur linux-3.18.8.orig/include/linux/notifier.h linux-3.18.8/include/linux/notifier.h
---- linux-3.18.8.orig/include/linux/notifier.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/notifier.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/notifier.h linux-3.18.9/include/linux/notifier.h
+--- linux-3.18.9.orig/include/linux/notifier.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/notifier.h 2015-03-15 16:03:03.808094874 -0500
@@ -6,7 +6,7 @@
*
* Alan Cox <Alan.Cox@linux.org>
@@ -8345,9 +23321,9 @@ diff -Nur linux-3.18.8.orig/include/linux/notifier.h linux-3.18.8/include/linux/
/* CPU notfiers are defined in include/linux/cpu.h. */
/* netdevice notifiers are defined in include/linux/netdevice.h */
-diff -Nur linux-3.18.8.orig/include/linux/percpu.h linux-3.18.8/include/linux/percpu.h
---- linux-3.18.8.orig/include/linux/percpu.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/percpu.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/percpu.h linux-3.18.9/include/linux/percpu.h
+--- linux-3.18.9.orig/include/linux/percpu.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/percpu.h 2015-03-15 16:03:03.808094874 -0500
@@ -23,6 +23,35 @@
PERCPU_MODULE_RESERVE)
#endif
@@ -8384,9 +23360,9 @@ diff -Nur linux-3.18.8.orig/include/linux/percpu.h linux-3.18.8/include/linux/pe
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
-diff -Nur linux-3.18.8.orig/include/linux/pid.h linux-3.18.8/include/linux/pid.h
---- linux-3.18.8.orig/include/linux/pid.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/pid.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/pid.h linux-3.18.9/include/linux/pid.h
+--- linux-3.18.9.orig/include/linux/pid.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/pid.h 2015-03-15 16:03:03.808094874 -0500
@@ -2,6 +2,7 @@
#define _LINUX_PID_H
@@ -8395,9 +23371,9 @@ diff -Nur linux-3.18.8.orig/include/linux/pid.h linux-3.18.8/include/linux/pid.h
enum pid_type
{
-diff -Nur linux-3.18.8.orig/include/linux/preempt.h linux-3.18.8/include/linux/preempt.h
---- linux-3.18.8.orig/include/linux/preempt.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/preempt.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/preempt.h linux-3.18.9/include/linux/preempt.h
+--- linux-3.18.9.orig/include/linux/preempt.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/preempt.h 2015-03-15 16:03:03.808094874 -0500
@@ -33,6 +33,20 @@
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
@@ -8501,9 +23477,9 @@ diff -Nur linux-3.18.8.orig/include/linux/preempt.h linux-3.18.8/include/linux/p
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
-diff -Nur linux-3.18.8.orig/include/linux/preempt_mask.h linux-3.18.8/include/linux/preempt_mask.h
---- linux-3.18.8.orig/include/linux/preempt_mask.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/preempt_mask.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/preempt_mask.h linux-3.18.9/include/linux/preempt_mask.h
+--- linux-3.18.9.orig/include/linux/preempt_mask.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/preempt_mask.h 2015-03-15 16:03:03.808094874 -0500
@@ -44,16 +44,26 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
@@ -8541,9 +23517,9 @@ diff -Nur linux-3.18.8.orig/include/linux/preempt_mask.h linux-3.18.8/include/li
/*
* Are we in NMI context?
-diff -Nur linux-3.18.8.orig/include/linux/printk.h linux-3.18.8/include/linux/printk.h
---- linux-3.18.8.orig/include/linux/printk.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/printk.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/printk.h linux-3.18.9/include/linux/printk.h
+--- linux-3.18.9.orig/include/linux/printk.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/printk.h 2015-03-15 16:03:03.808094874 -0500
@@ -119,9 +119,11 @@
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
@@ -8564,9 +23540,9 @@ diff -Nur linux-3.18.8.orig/include/linux/printk.h linux-3.18.8/include/linux/pr
extern int printk_delay_msec;
extern int dmesg_restrict;
extern int kptr_restrict;
-diff -Nur linux-3.18.8.orig/include/linux/radix-tree.h linux-3.18.8/include/linux/radix-tree.h
---- linux-3.18.8.orig/include/linux/radix-tree.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/radix-tree.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/radix-tree.h linux-3.18.9/include/linux/radix-tree.h
+--- linux-3.18.9.orig/include/linux/radix-tree.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/radix-tree.h 2015-03-15 16:03:03.808094874 -0500
@@ -277,8 +277,13 @@
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
void ***results, unsigned long *indices,
@@ -8590,9 +23566,9 @@ diff -Nur linux-3.18.8.orig/include/linux/radix-tree.h linux-3.18.8/include/linu
}
/**
-diff -Nur linux-3.18.8.orig/include/linux/random.h linux-3.18.8/include/linux/random.h
---- linux-3.18.8.orig/include/linux/random.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/random.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/random.h linux-3.18.9/include/linux/random.h
+--- linux-3.18.9.orig/include/linux/random.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/random.h 2015-03-15 16:03:03.808094874 -0500
@@ -11,7 +11,7 @@
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
@@ -8602,9 +23578,9 @@ diff -Nur linux-3.18.8.orig/include/linux/random.h linux-3.18.8/include/linux/ra
extern void get_random_bytes(void *buf, int nbytes);
extern void get_random_bytes_arch(void *buf, int nbytes);
-diff -Nur linux-3.18.8.orig/include/linux/rcupdate.h linux-3.18.8/include/linux/rcupdate.h
---- linux-3.18.8.orig/include/linux/rcupdate.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/rcupdate.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/rcupdate.h linux-3.18.9/include/linux/rcupdate.h
+--- linux-3.18.9.orig/include/linux/rcupdate.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/rcupdate.h 2015-03-15 16:03:03.812094874 -0500
@@ -147,6 +147,9 @@
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -8689,9 +23665,9 @@ diff -Nur linux-3.18.8.orig/include/linux/rcupdate.h linux-3.18.8/include/linux/
local_bh_enable();
}
-diff -Nur linux-3.18.8.orig/include/linux/rcutree.h linux-3.18.8/include/linux/rcutree.h
---- linux-3.18.8.orig/include/linux/rcutree.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/rcutree.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/rcutree.h linux-3.18.9/include/linux/rcutree.h
+--- linux-3.18.9.orig/include/linux/rcutree.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/rcutree.h 2015-03-15 16:03:03.812094874 -0500
@@ -46,7 +46,11 @@
rcu_note_context_switch(cpu);
}
@@ -8742,9 +23718,9 @@ diff -Nur linux-3.18.8.orig/include/linux/rcutree.h linux-3.18.8/include/linux/r
+#endif
+
#endif /* __LINUX_RCUTREE_H */
-diff -Nur linux-3.18.8.orig/include/linux/rtmutex.h linux-3.18.8/include/linux/rtmutex.h
---- linux-3.18.8.orig/include/linux/rtmutex.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/rtmutex.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/rtmutex.h linux-3.18.9/include/linux/rtmutex.h
+--- linux-3.18.9.orig/include/linux/rtmutex.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/rtmutex.h 2015-03-15 16:03:03.812094874 -0500
@@ -14,10 +14,14 @@
#include <linux/linkage.h>
@@ -8818,9 +23794,9 @@ diff -Nur linux-3.18.8.orig/include/linux/rtmutex.h linux-3.18.8/include/linux/r
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout);
-diff -Nur linux-3.18.8.orig/include/linux/rwlock_rt.h linux-3.18.8/include/linux/rwlock_rt.h
---- linux-3.18.8.orig/include/linux/rwlock_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/linux/rwlock_rt.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/rwlock_rt.h linux-3.18.9/include/linux/rwlock_rt.h
+--- linux-3.18.9.orig/include/linux/rwlock_rt.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/linux/rwlock_rt.h 2015-03-15 16:03:03.812094874 -0500
@@ -0,0 +1,99 @@
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
@@ -8921,9 +23897,9 @@ diff -Nur linux-3.18.8.orig/include/linux/rwlock_rt.h linux-3.18.8/include/linux
+ } while (0)
+
+#endif
-diff -Nur linux-3.18.8.orig/include/linux/rwlock_types.h linux-3.18.8/include/linux/rwlock_types.h
---- linux-3.18.8.orig/include/linux/rwlock_types.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/rwlock_types.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/rwlock_types.h linux-3.18.9/include/linux/rwlock_types.h
+--- linux-3.18.9.orig/include/linux/rwlock_types.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/rwlock_types.h 2015-03-15 16:03:03.812094874 -0500
@@ -1,6 +1,10 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
@@ -8944,9 +23920,9 @@ diff -Nur linux-3.18.8.orig/include/linux/rwlock_types.h linux-3.18.8/include/li
+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
#endif /* __LINUX_RWLOCK_TYPES_H */
-diff -Nur linux-3.18.8.orig/include/linux/rwlock_types_rt.h linux-3.18.8/include/linux/rwlock_types_rt.h
---- linux-3.18.8.orig/include/linux/rwlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/linux/rwlock_types_rt.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/rwlock_types_rt.h linux-3.18.9/include/linux/rwlock_types_rt.h
+--- linux-3.18.9.orig/include/linux/rwlock_types_rt.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/linux/rwlock_types_rt.h 2015-03-15 16:03:03.812094874 -0500
@@ -0,0 +1,33 @@
+#ifndef __LINUX_RWLOCK_TYPES_RT_H
+#define __LINUX_RWLOCK_TYPES_RT_H
@@ -8981,9 +23957,9 @@ diff -Nur linux-3.18.8.orig/include/linux/rwlock_types_rt.h linux-3.18.8/include
+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+
+#endif
-diff -Nur linux-3.18.8.orig/include/linux/rwsem.h linux-3.18.8/include/linux/rwsem.h
---- linux-3.18.8.orig/include/linux/rwsem.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/rwsem.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/rwsem.h linux-3.18.9/include/linux/rwsem.h
+--- linux-3.18.9.orig/include/linux/rwsem.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/rwsem.h 2015-03-15 16:03:03.812094874 -0500
@@ -18,6 +18,10 @@
#include <linux/osq_lock.h>
#endif
@@ -9002,9 +23978,9 @@ diff -Nur linux-3.18.8.orig/include/linux/rwsem.h linux-3.18.8/include/linux/rws
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* _LINUX_RWSEM_H */
-diff -Nur linux-3.18.8.orig/include/linux/rwsem_rt.h linux-3.18.8/include/linux/rwsem_rt.h
---- linux-3.18.8.orig/include/linux/rwsem_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/linux/rwsem_rt.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/rwsem_rt.h linux-3.18.9/include/linux/rwsem_rt.h
+--- linux-3.18.9.orig/include/linux/rwsem_rt.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/linux/rwsem_rt.h 2015-03-15 16:03:03.812094874 -0500
@@ -0,0 +1,133 @@
+#ifndef _LINUX_RWSEM_RT_H
+#define _LINUX_RWSEM_RT_H
@@ -9139,9 +24115,9 @@ diff -Nur linux-3.18.8.orig/include/linux/rwsem_rt.h linux-3.18.8/include/linux/
+}
+#endif
+#endif
-diff -Nur linux-3.18.8.orig/include/linux/sched.h linux-3.18.8/include/linux/sched.h
---- linux-3.18.8.orig/include/linux/sched.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/sched.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/sched.h linux-3.18.9/include/linux/sched.h
+--- linux-3.18.9.orig/include/linux/sched.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/sched.h 2015-03-15 16:03:03.812094874 -0500
@@ -26,6 +26,7 @@
#include <linux/nodemask.h>
#include <linux/mm_types.h>
@@ -9525,9 +24501,9 @@ diff -Nur linux-3.18.8.orig/include/linux/sched.h linux-3.18.8/include/linux/sch
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
-diff -Nur linux-3.18.8.orig/include/linux/seqlock.h linux-3.18.8/include/linux/seqlock.h
---- linux-3.18.8.orig/include/linux/seqlock.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/seqlock.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/seqlock.h linux-3.18.9/include/linux/seqlock.h
+--- linux-3.18.9.orig/include/linux/seqlock.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/seqlock.h 2015-03-15 16:03:03.812094874 -0500
@@ -219,20 +219,30 @@
return __read_seqcount_retry(s, start);
}
@@ -9657,9 +24633,9 @@ diff -Nur linux-3.18.8.orig/include/linux/seqlock.h linux-3.18.8/include/linux/s
spin_unlock_irqrestore(&sl->lock, flags);
}
-diff -Nur linux-3.18.8.orig/include/linux/signal.h linux-3.18.8/include/linux/signal.h
---- linux-3.18.8.orig/include/linux/signal.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/signal.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/signal.h linux-3.18.9/include/linux/signal.h
+--- linux-3.18.9.orig/include/linux/signal.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/signal.h 2015-03-15 16:03:03.812094874 -0500
@@ -218,6 +218,7 @@
}
@@ -9668,9 +24644,9 @@ diff -Nur linux-3.18.8.orig/include/linux/signal.h linux-3.18.8/include/linux/si
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
-diff -Nur linux-3.18.8.orig/include/linux/skbuff.h linux-3.18.8/include/linux/skbuff.h
---- linux-3.18.8.orig/include/linux/skbuff.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/skbuff.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/skbuff.h linux-3.18.9/include/linux/skbuff.h
+--- linux-3.18.9.orig/include/linux/skbuff.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/skbuff.h 2015-03-15 16:03:03.816094874 -0500
@@ -172,6 +172,7 @@
__u32 qlen;
@@ -9692,9 +24668,9 @@ diff -Nur linux-3.18.8.orig/include/linux/skbuff.h linux-3.18.8/include/linux/sk
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
-diff -Nur linux-3.18.8.orig/include/linux/smp.h linux-3.18.8/include/linux/smp.h
---- linux-3.18.8.orig/include/linux/smp.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/smp.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/smp.h linux-3.18.9/include/linux/smp.h
+--- linux-3.18.9.orig/include/linux/smp.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/smp.h 2015-03-15 16:03:03.816094874 -0500
@@ -178,6 +178,9 @@
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
@@ -9705,9 +24681,9 @@ diff -Nur linux-3.18.8.orig/include/linux/smp.h linux-3.18.8/include/linux/smp.h
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
-diff -Nur linux-3.18.8.orig/include/linux/spinlock_api_smp.h linux-3.18.8/include/linux/spinlock_api_smp.h
---- linux-3.18.8.orig/include/linux/spinlock_api_smp.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/spinlock_api_smp.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/spinlock_api_smp.h linux-3.18.9/include/linux/spinlock_api_smp.h
+--- linux-3.18.9.orig/include/linux/spinlock_api_smp.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/spinlock_api_smp.h 2015-03-15 16:03:03.816094874 -0500
@@ -187,6 +187,8 @@
return 0;
}
@@ -9718,9 +24694,9 @@ diff -Nur linux-3.18.8.orig/include/linux/spinlock_api_smp.h linux-3.18.8/includ
+#endif
#endif /* __LINUX_SPINLOCK_API_SMP_H */
-diff -Nur linux-3.18.8.orig/include/linux/spinlock.h linux-3.18.8/include/linux/spinlock.h
---- linux-3.18.8.orig/include/linux/spinlock.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/spinlock.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/spinlock.h linux-3.18.9/include/linux/spinlock.h
+--- linux-3.18.9.orig/include/linux/spinlock.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/spinlock.h 2015-03-15 16:03:03.816094874 -0500
@@ -278,7 +278,11 @@
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
@@ -9752,9 +24728,9 @@ diff -Nur linux-3.18.8.orig/include/linux/spinlock.h linux-3.18.8/include/linux/
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* __LINUX_SPINLOCK_H */
-diff -Nur linux-3.18.8.orig/include/linux/spinlock_rt.h linux-3.18.8/include/linux/spinlock_rt.h
---- linux-3.18.8.orig/include/linux/spinlock_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/linux/spinlock_rt.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/spinlock_rt.h linux-3.18.9/include/linux/spinlock_rt.h
+--- linux-3.18.9.orig/include/linux/spinlock_rt.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/linux/spinlock_rt.h 2015-03-15 16:03:03.816094874 -0500
@@ -0,0 +1,167 @@
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
@@ -9923,9 +24899,9 @@ diff -Nur linux-3.18.8.orig/include/linux/spinlock_rt.h linux-3.18.8/include/lin
+ atomic_dec_and_spin_lock(atomic, lock)
+
+#endif
-diff -Nur linux-3.18.8.orig/include/linux/spinlock_types.h linux-3.18.8/include/linux/spinlock_types.h
---- linux-3.18.8.orig/include/linux/spinlock_types.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/spinlock_types.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/spinlock_types.h linux-3.18.9/include/linux/spinlock_types.h
+--- linux-3.18.9.orig/include/linux/spinlock_types.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/spinlock_types.h 2015-03-15 16:03:03.816094874 -0500
@@ -9,80 +9,15 @@
* Released under the General Public License (GPL).
*/
@@ -10014,9 +24990,9 @@ diff -Nur linux-3.18.8.orig/include/linux/spinlock_types.h linux-3.18.8/include/
-#include <linux/rwlock_types.h>
-
#endif /* __LINUX_SPINLOCK_TYPES_H */
-diff -Nur linux-3.18.8.orig/include/linux/spinlock_types_nort.h linux-3.18.8/include/linux/spinlock_types_nort.h
---- linux-3.18.8.orig/include/linux/spinlock_types_nort.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/linux/spinlock_types_nort.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/spinlock_types_nort.h linux-3.18.9/include/linux/spinlock_types_nort.h
+--- linux-3.18.9.orig/include/linux/spinlock_types_nort.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/linux/spinlock_types_nort.h 2015-03-15 16:03:03.816094874 -0500
@@ -0,0 +1,33 @@
+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
+#define __LINUX_SPINLOCK_TYPES_NORT_H
@@ -10051,9 +25027,9 @@ diff -Nur linux-3.18.8.orig/include/linux/spinlock_types_nort.h linux-3.18.8/inc
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+
+#endif
-diff -Nur linux-3.18.8.orig/include/linux/spinlock_types_raw.h linux-3.18.8/include/linux/spinlock_types_raw.h
---- linux-3.18.8.orig/include/linux/spinlock_types_raw.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/linux/spinlock_types_raw.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/spinlock_types_raw.h linux-3.18.9/include/linux/spinlock_types_raw.h
+--- linux-3.18.9.orig/include/linux/spinlock_types_raw.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/linux/spinlock_types_raw.h 2015-03-15 16:03:03.816094874 -0500
@@ -0,0 +1,56 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+#define __LINUX_SPINLOCK_TYPES_RAW_H
@@ -10111,9 +25087,9 @@ diff -Nur linux-3.18.8.orig/include/linux/spinlock_types_raw.h linux-3.18.8/incl
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+#endif
-diff -Nur linux-3.18.8.orig/include/linux/spinlock_types_rt.h linux-3.18.8/include/linux/spinlock_types_rt.h
---- linux-3.18.8.orig/include/linux/spinlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/linux/spinlock_types_rt.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/spinlock_types_rt.h linux-3.18.9/include/linux/spinlock_types_rt.h
+--- linux-3.18.9.orig/include/linux/spinlock_types_rt.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/linux/spinlock_types_rt.h 2015-03-15 16:03:03.816094874 -0500
@@ -0,0 +1,51 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RT_H
+#define __LINUX_SPINLOCK_TYPES_RT_H
@@ -10166,9 +25142,9 @@ diff -Nur linux-3.18.8.orig/include/linux/spinlock_types_rt.h linux-3.18.8/inclu
+ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
+
+#endif
-diff -Nur linux-3.18.8.orig/include/linux/srcu.h linux-3.18.8/include/linux/srcu.h
---- linux-3.18.8.orig/include/linux/srcu.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/srcu.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/srcu.h linux-3.18.9/include/linux/srcu.h
+--- linux-3.18.9.orig/include/linux/srcu.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/srcu.h 2015-03-15 16:03:03.820094874 -0500
@@ -84,10 +84,10 @@
void process_srcu(struct work_struct *work);
@@ -10197,9 +25173,9 @@ diff -Nur linux-3.18.8.orig/include/linux/srcu.h linux-3.18.8/include/linux/srcu
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
-diff -Nur linux-3.18.8.orig/include/linux/swap.h linux-3.18.8/include/linux/swap.h
---- linux-3.18.8.orig/include/linux/swap.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/swap.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/swap.h linux-3.18.9/include/linux/swap.h
+--- linux-3.18.9.orig/include/linux/swap.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/swap.h 2015-03-15 16:03:03.824094874 -0500
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/atomic.h>
@@ -10218,9 +25194,9 @@ diff -Nur linux-3.18.8.orig/include/linux/swap.h linux-3.18.8/include/linux/swap
static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
{
-diff -Nur linux-3.18.8.orig/include/linux/sysctl.h linux-3.18.8/include/linux/sysctl.h
---- linux-3.18.8.orig/include/linux/sysctl.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/sysctl.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/sysctl.h linux-3.18.9/include/linux/sysctl.h
+--- linux-3.18.9.orig/include/linux/sysctl.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/sysctl.h 2015-03-15 16:03:03.824094874 -0500
@@ -25,6 +25,7 @@
#include <linux/rcupdate.h>
#include <linux/wait.h>
@@ -10229,9 +25205,9 @@ diff -Nur linux-3.18.8.orig/include/linux/sysctl.h linux-3.18.8/include/linux/sy
#include <uapi/linux/sysctl.h>
/* For the /proc/sys support */
-diff -Nur linux-3.18.8.orig/include/linux/thread_info.h linux-3.18.8/include/linux/thread_info.h
---- linux-3.18.8.orig/include/linux/thread_info.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/thread_info.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/thread_info.h linux-3.18.9/include/linux/thread_info.h
+--- linux-3.18.9.orig/include/linux/thread_info.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/thread_info.h 2015-03-15 16:03:03.824094874 -0500
@@ -102,7 +102,17 @@
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
@@ -10251,9 +25227,9 @@ diff -Nur linux-3.18.8.orig/include/linux/thread_info.h linux-3.18.8/include/lin
#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
/*
-diff -Nur linux-3.18.8.orig/include/linux/timer.h linux-3.18.8/include/linux/timer.h
---- linux-3.18.8.orig/include/linux/timer.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/timer.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/timer.h linux-3.18.9/include/linux/timer.h
+--- linux-3.18.9.orig/include/linux/timer.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/timer.h 2015-03-15 16:03:03.824094874 -0500
@@ -241,7 +241,7 @@
extern int try_to_del_timer_sync(struct timer_list *timer);
@@ -10263,9 +25239,9 @@ diff -Nur linux-3.18.8.orig/include/linux/timer.h linux-3.18.8/include/linux/tim
extern int del_timer_sync(struct timer_list *timer);
#else
# define del_timer_sync(t) del_timer(t)
-diff -Nur linux-3.18.8.orig/include/linux/uaccess.h linux-3.18.8/include/linux/uaccess.h
---- linux-3.18.8.orig/include/linux/uaccess.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/uaccess.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/uaccess.h linux-3.18.9/include/linux/uaccess.h
+--- linux-3.18.9.orig/include/linux/uaccess.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/uaccess.h 2015-03-15 16:03:03.824094874 -0500
@@ -6,14 +6,9 @@
/*
@@ -10326,9 +25302,9 @@ diff -Nur linux-3.18.8.orig/include/linux/uaccess.h linux-3.18.8/include/linux/u
set_fs(old_fs); \
ret; \
})
-diff -Nur linux-3.18.8.orig/include/linux/uprobes.h linux-3.18.8/include/linux/uprobes.h
---- linux-3.18.8.orig/include/linux/uprobes.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/uprobes.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/uprobes.h linux-3.18.9/include/linux/uprobes.h
+--- linux-3.18.9.orig/include/linux/uprobes.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/uprobes.h 2015-03-15 16:03:03.824094874 -0500
@@ -27,6 +27,7 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
@@ -10337,9 +25313,9 @@ diff -Nur linux-3.18.8.orig/include/linux/uprobes.h linux-3.18.8/include/linux/u
struct vm_area_struct;
struct mm_struct;
-diff -Nur linux-3.18.8.orig/include/linux/vmstat.h linux-3.18.8/include/linux/vmstat.h
---- linux-3.18.8.orig/include/linux/vmstat.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/vmstat.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/vmstat.h linux-3.18.9/include/linux/vmstat.h
+--- linux-3.18.9.orig/include/linux/vmstat.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/vmstat.h 2015-03-15 16:03:03.824094874 -0500
@@ -33,7 +33,9 @@
*/
static inline void __count_vm_event(enum vm_event_item item)
@@ -10360,9 +25336,9 @@ diff -Nur linux-3.18.8.orig/include/linux/vmstat.h linux-3.18.8/include/linux/vm
}
static inline void count_vm_events(enum vm_event_item item, long delta)
-diff -Nur linux-3.18.8.orig/include/linux/wait.h linux-3.18.8/include/linux/wait.h
---- linux-3.18.8.orig/include/linux/wait.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/linux/wait.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/wait.h linux-3.18.9/include/linux/wait.h
+--- linux-3.18.9.orig/include/linux/wait.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/linux/wait.h 2015-03-15 16:03:03.824094874 -0500
@@ -8,6 +8,7 @@
#include <linux/spinlock.h>
#include <asm/current.h>
@@ -10371,9 +25347,9 @@ diff -Nur linux-3.18.8.orig/include/linux/wait.h linux-3.18.8/include/linux/wait
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
-diff -Nur linux-3.18.8.orig/include/linux/wait-simple.h linux-3.18.8/include/linux/wait-simple.h
---- linux-3.18.8.orig/include/linux/wait-simple.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/linux/wait-simple.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/wait-simple.h linux-3.18.9/include/linux/wait-simple.h
+--- linux-3.18.9.orig/include/linux/wait-simple.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/linux/wait-simple.h 2015-03-15 16:03:03.824094874 -0500
@@ -0,0 +1,207 @@
+#ifndef _LINUX_WAIT_SIMPLE_H
+#define _LINUX_WAIT_SIMPLE_H
@@ -10582,9 +25558,9 @@ diff -Nur linux-3.18.8.orig/include/linux/wait-simple.h linux-3.18.8/include/lin
+})
+
+#endif
-diff -Nur linux-3.18.8.orig/include/linux/work-simple.h linux-3.18.8/include/linux/work-simple.h
---- linux-3.18.8.orig/include/linux/work-simple.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/linux/work-simple.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/linux/work-simple.h linux-3.18.9/include/linux/work-simple.h
+--- linux-3.18.9.orig/include/linux/work-simple.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/linux/work-simple.h 2015-03-15 16:03:03.824094874 -0500
@@ -0,0 +1,24 @@
+#ifndef _LINUX_SWORK_H
+#define _LINUX_SWORK_H
@@ -10610,9 +25586,9 @@ diff -Nur linux-3.18.8.orig/include/linux/work-simple.h linux-3.18.8/include/lin
+void swork_put(void);
+
+#endif /* _LINUX_SWORK_H */
-diff -Nur linux-3.18.8.orig/include/net/dst.h linux-3.18.8/include/net/dst.h
---- linux-3.18.8.orig/include/net/dst.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/net/dst.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/net/dst.h linux-3.18.9/include/net/dst.h
+--- linux-3.18.9.orig/include/net/dst.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/net/dst.h 2015-03-15 16:03:03.824094874 -0500
@@ -403,7 +403,7 @@
static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
struct sk_buff *skb)
@@ -10622,9 +25598,9 @@ diff -Nur linux-3.18.8.orig/include/net/dst.h linux-3.18.8/include/net/dst.h
if (dst->pending_confirm) {
unsigned long now = jiffies;
-diff -Nur linux-3.18.8.orig/include/net/neighbour.h linux-3.18.8/include/net/neighbour.h
---- linux-3.18.8.orig/include/net/neighbour.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/net/neighbour.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/net/neighbour.h linux-3.18.9/include/net/neighbour.h
+--- linux-3.18.9.orig/include/net/neighbour.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/net/neighbour.h 2015-03-15 16:03:03.824094874 -0500
@@ -387,7 +387,7 @@
}
#endif
@@ -10643,9 +25619,9 @@ diff -Nur linux-3.18.8.orig/include/net/neighbour.h linux-3.18.8/include/net/nei
const struct net_device *dev)
{
unsigned int seq;
-diff -Nur linux-3.18.8.orig/include/net/netns/ipv4.h linux-3.18.8/include/net/netns/ipv4.h
---- linux-3.18.8.orig/include/net/netns/ipv4.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/include/net/netns/ipv4.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/net/netns/ipv4.h linux-3.18.9/include/net/netns/ipv4.h
+--- linux-3.18.9.orig/include/net/netns/ipv4.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/include/net/netns/ipv4.h 2015-03-15 16:03:03.824094874 -0500
@@ -67,6 +67,7 @@
int sysctl_icmp_echo_ignore_all;
@@ -10654,9 +25630,9 @@ diff -Nur linux-3.18.8.orig/include/net/netns/ipv4.h linux-3.18.8/include/net/ne
int sysctl_icmp_ignore_bogus_error_responses;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
-diff -Nur linux-3.18.8.orig/include/trace/events/hist.h linux-3.18.8/include/trace/events/hist.h
---- linux-3.18.8.orig/include/trace/events/hist.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/trace/events/hist.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/trace/events/hist.h linux-3.18.9/include/trace/events/hist.h
+--- linux-3.18.9.orig/include/trace/events/hist.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/trace/events/hist.h 2015-03-15 16:03:03.824094874 -0500
@@ -0,0 +1,72 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hist
@@ -10730,9 +25706,9 @@ diff -Nur linux-3.18.8.orig/include/trace/events/hist.h linux-3.18.8/include/tra
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
-diff -Nur linux-3.18.8.orig/include/trace/events/latency_hist.h linux-3.18.8/include/trace/events/latency_hist.h
---- linux-3.18.8.orig/include/trace/events/latency_hist.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/include/trace/events/latency_hist.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/include/trace/events/latency_hist.h linux-3.18.9/include/trace/events/latency_hist.h
+--- linux-3.18.9.orig/include/trace/events/latency_hist.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/include/trace/events/latency_hist.h 2015-03-15 16:03:03.824094874 -0500
@@ -0,0 +1,29 @@
+#ifndef _LATENCY_HIST_H
+#define _LATENCY_HIST_H
@@ -10763,9 +25739,9 @@ diff -Nur linux-3.18.8.orig/include/trace/events/latency_hist.h linux-3.18.8/inc
+}
+
+#endif /* _LATENCY_HIST_H */
-diff -Nur linux-3.18.8.orig/init/Kconfig linux-3.18.8/init/Kconfig
---- linux-3.18.8.orig/init/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/init/Kconfig 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/init/Kconfig linux-3.18.9/init/Kconfig
+--- linux-3.18.9.orig/init/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/init/Kconfig 2015-03-15 16:03:03.824094874 -0500
@@ -635,7 +635,7 @@
config RCU_FAST_NO_HZ
@@ -10808,9 +25784,9 @@ diff -Nur linux-3.18.8.orig/init/Kconfig linux-3.18.8/init/Kconfig
help
SLOB replaces the stock allocator with a drastically simpler
allocator. SLOB is generally more space efficient but
-diff -Nur linux-3.18.8.orig/init/main.c linux-3.18.8/init/main.c
---- linux-3.18.8.orig/init/main.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/init/main.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/init/main.c linux-3.18.9/init/main.c
+--- linux-3.18.9.orig/init/main.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/init/main.c 2015-03-15 16:03:03.828094874 -0500
@@ -533,6 +533,7 @@
setup_command_line(command_line);
setup_nr_cpu_ids();
@@ -10819,18 +25795,18 @@ diff -Nur linux-3.18.8.orig/init/main.c linux-3.18.8/init/main.c
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
build_all_zonelists(NULL, NULL);
-diff -Nur linux-3.18.8.orig/init/Makefile linux-3.18.8/init/Makefile
---- linux-3.18.8.orig/init/Makefile 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/init/Makefile 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/init/Makefile linux-3.18.9/init/Makefile
+--- linux-3.18.9.orig/init/Makefile 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/init/Makefile 2015-03-15 16:03:03.828094874 -0500
@@ -33,4 +33,4 @@
include/generated/compile.h: FORCE
@$($(quiet)chk_compile.h)
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
-diff -Nur linux-3.18.8.orig/ipc/mqueue.c linux-3.18.8/ipc/mqueue.c
---- linux-3.18.8.orig/ipc/mqueue.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/ipc/mqueue.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/ipc/mqueue.c linux-3.18.9/ipc/mqueue.c
+--- linux-3.18.9.orig/ipc/mqueue.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/ipc/mqueue.c 2015-03-15 16:03:03.828094874 -0500
@@ -923,12 +923,17 @@
struct msg_msg *message,
struct ext_wait_queue *receiver)
@@ -10875,9 +25851,9 @@ diff -Nur linux-3.18.8.orig/ipc/mqueue.c linux-3.18.8/ipc/mqueue.c
}
SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
-diff -Nur linux-3.18.8.orig/ipc/msg.c linux-3.18.8/ipc/msg.c
---- linux-3.18.8.orig/ipc/msg.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/ipc/msg.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/ipc/msg.c linux-3.18.9/ipc/msg.c
+--- linux-3.18.9.orig/ipc/msg.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/ipc/msg.c 2015-03-15 16:03:03.828094874 -0500
@@ -188,6 +188,12 @@
struct msg_receiver *msr, *t;
@@ -10927,9 +25903,9 @@ diff -Nur linux-3.18.8.orig/ipc/msg.c linux-3.18.8/ipc/msg.c
return 0;
}
-diff -Nur linux-3.18.8.orig/ipc/sem.c linux-3.18.8/ipc/sem.c
---- linux-3.18.8.orig/ipc/sem.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/ipc/sem.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/ipc/sem.c linux-3.18.9/ipc/sem.c
+--- linux-3.18.9.orig/ipc/sem.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/ipc/sem.c 2015-03-15 16:03:03.828094874 -0500
@@ -673,6 +673,13 @@
static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
@@ -10968,9 +25944,9 @@ diff -Nur linux-3.18.8.orig/ipc/sem.c linux-3.18.8/ipc/sem.c
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
-diff -Nur linux-3.18.8.orig/kernel/cgroup.c linux-3.18.8/kernel/cgroup.c
---- linux-3.18.8.orig/kernel/cgroup.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/cgroup.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/cgroup.c linux-3.18.9/kernel/cgroup.c
+--- linux-3.18.9.orig/kernel/cgroup.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/cgroup.c 2015-03-15 16:03:03.828094874 -0500
@@ -4355,10 +4355,10 @@
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
@@ -11003,9 +25979,9 @@ diff -Nur linux-3.18.8.orig/kernel/cgroup.c linux-3.18.8/kernel/cgroup.c
/*
* Used to destroy pidlists and separate to serve as flush domain.
-diff -Nur linux-3.18.8.orig/kernel/cpu.c linux-3.18.8/kernel/cpu.c
---- linux-3.18.8.orig/kernel/cpu.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/cpu.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/cpu.c linux-3.18.9/kernel/cpu.c
+--- linux-3.18.9.orig/kernel/cpu.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/cpu.c 2015-03-15 16:03:03.828094874 -0500
@@ -86,6 +86,290 @@
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
@@ -11385,9 +26361,9 @@ diff -Nur linux-3.18.8.orig/kernel/cpu.c linux-3.18.8/kernel/cpu.c
return err;
}
-diff -Nur linux-3.18.8.orig/kernel/debug/kdb/kdb_io.c linux-3.18.8/kernel/debug/kdb/kdb_io.c
---- linux-3.18.8.orig/kernel/debug/kdb/kdb_io.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/debug/kdb/kdb_io.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/debug/kdb/kdb_io.c linux-3.18.9/kernel/debug/kdb/kdb_io.c
+--- linux-3.18.9.orig/kernel/debug/kdb/kdb_io.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/debug/kdb/kdb_io.c 2015-03-15 16:03:03.828094874 -0500
@@ -554,7 +554,6 @@
int linecount;
int colcount;
@@ -11425,9 +26401,9 @@ diff -Nur linux-3.18.8.orig/kernel/debug/kdb/kdb_io.c linux-3.18.8/kernel/debug/
return r;
}
-diff -Nur linux-3.18.8.orig/kernel/events/core.c linux-3.18.8/kernel/events/core.c
---- linux-3.18.8.orig/kernel/events/core.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/events/core.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/events/core.c linux-3.18.9/kernel/events/core.c
+--- linux-3.18.9.orig/kernel/events/core.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/events/core.c 2015-03-15 16:03:03.832094874 -0500
@@ -6336,6 +6336,7 @@
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -11436,9 +26412,9 @@ diff -Nur linux-3.18.8.orig/kernel/events/core.c linux-3.18.8/kernel/events/core
/*
* Since hrtimers have a fixed rate, we can do a static freq->period
-diff -Nur linux-3.18.8.orig/kernel/exit.c linux-3.18.8/kernel/exit.c
---- linux-3.18.8.orig/kernel/exit.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/exit.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/exit.c linux-3.18.9/kernel/exit.c
+--- linux-3.18.9.orig/kernel/exit.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/exit.c 2015-03-15 16:03:03.832094874 -0500
@@ -147,7 +147,7 @@
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
@@ -11448,9 +26424,9 @@ diff -Nur linux-3.18.8.orig/kernel/exit.c linux-3.18.8/kernel/exit.c
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
-diff -Nur linux-3.18.8.orig/kernel/fork.c linux-3.18.8/kernel/fork.c
---- linux-3.18.8.orig/kernel/fork.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/fork.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/fork.c linux-3.18.9/kernel/fork.c
+--- linux-3.18.9.orig/kernel/fork.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/fork.c 2015-03-15 16:03:03.832094874 -0500
@@ -97,7 +97,7 @@
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
@@ -11548,9 +26524,9 @@ diff -Nur linux-3.18.8.orig/kernel/fork.c linux-3.18.8/kernel/fork.c
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
-diff -Nur linux-3.18.8.orig/kernel/futex.c linux-3.18.8/kernel/futex.c
---- linux-3.18.8.orig/kernel/futex.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/futex.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/futex.c linux-3.18.9/kernel/futex.c
+--- linux-3.18.9.orig/kernel/futex.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/futex.c 2015-03-15 16:03:03.832094874 -0500
@@ -738,7 +738,9 @@
* task still owns the PI-state:
*/
@@ -11689,9 +26665,9 @@ diff -Nur linux-3.18.8.orig/kernel/futex.c linux-3.18.8/kernel/futex.c
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
-diff -Nur linux-3.18.8.orig/kernel/irq/handle.c linux-3.18.8/kernel/irq/handle.c
---- linux-3.18.8.orig/kernel/irq/handle.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/irq/handle.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/irq/handle.c linux-3.18.9/kernel/irq/handle.c
+--- linux-3.18.9.orig/kernel/irq/handle.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/irq/handle.c 2015-03-15 16:03:03.832094874 -0500
@@ -133,6 +133,8 @@
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
@@ -11714,9 +26690,9 @@ diff -Nur linux-3.18.8.orig/kernel/irq/handle.c linux-3.18.8/kernel/irq/handle.c
if (!noirqdebug)
note_interrupt(irq, desc, retval);
-diff -Nur linux-3.18.8.orig/kernel/irq/manage.c linux-3.18.8/kernel/irq/manage.c
---- linux-3.18.8.orig/kernel/irq/manage.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/irq/manage.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/irq/manage.c linux-3.18.9/kernel/irq/manage.c
+--- linux-3.18.9.orig/kernel/irq/manage.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/irq/manage.c 2015-03-15 16:03:03.832094874 -0500
@@ -22,6 +22,7 @@
#include "internals.h"
@@ -11889,9 +26865,9 @@ diff -Nur linux-3.18.8.orig/kernel/irq/manage.c linux-3.18.8/kernel/irq/manage.c
/* Set default affinity mask once everything is setup */
setup_affinity(irq, desc, mask);
-diff -Nur linux-3.18.8.orig/kernel/irq/settings.h linux-3.18.8/kernel/irq/settings.h
---- linux-3.18.8.orig/kernel/irq/settings.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/irq/settings.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/irq/settings.h linux-3.18.9/kernel/irq/settings.h
+--- linux-3.18.9.orig/kernel/irq/settings.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/irq/settings.h 2015-03-15 16:03:03.836094874 -0500
@@ -15,6 +15,7 @@
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
@@ -11925,9 +26901,9 @@ diff -Nur linux-3.18.8.orig/kernel/irq/settings.h linux-3.18.8/kernel/irq/settin
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
{
return desc->status_use_accessors & _IRQ_PER_CPU;
-diff -Nur linux-3.18.8.orig/kernel/irq/spurious.c linux-3.18.8/kernel/irq/spurious.c
---- linux-3.18.8.orig/kernel/irq/spurious.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/irq/spurious.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/irq/spurious.c linux-3.18.9/kernel/irq/spurious.c
+--- linux-3.18.9.orig/kernel/irq/spurious.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/irq/spurious.c 2015-03-15 16:03:03.836094874 -0500
@@ -444,6 +444,10 @@
static int __init irqfixup_setup(char *str)
@@ -11950,9 +26926,9 @@ diff -Nur linux-3.18.8.orig/kernel/irq/spurious.c linux-3.18.8/kernel/irq/spurio
irqfixup = 2;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");
-diff -Nur linux-3.18.8.orig/kernel/irq_work.c linux-3.18.8/kernel/irq_work.c
---- linux-3.18.8.orig/kernel/irq_work.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/irq_work.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/irq_work.c linux-3.18.9/kernel/irq_work.c
+--- linux-3.18.9.orig/kernel/irq_work.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/irq_work.c 2015-03-15 16:03:03.836094874 -0500
@@ -22,7 +22,9 @@
static DEFINE_PER_CPU(struct llist_head, raised_list);
@@ -12049,9 +27025,9 @@ diff -Nur linux-3.18.8.orig/kernel/irq_work.c linux-3.18.8/kernel/irq_work.c
irq_work_run_list(raised);
irq_work_run_list(&__get_cpu_var(lazy_list));
}
-diff -Nur linux-3.18.8.orig/kernel/Kconfig.locks linux-3.18.8/kernel/Kconfig.locks
---- linux-3.18.8.orig/kernel/Kconfig.locks 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/Kconfig.locks 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/Kconfig.locks linux-3.18.9/kernel/Kconfig.locks
+--- linux-3.18.9.orig/kernel/Kconfig.locks 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/Kconfig.locks 2015-03-15 16:03:03.836094874 -0500
@@ -225,11 +225,11 @@
config MUTEX_SPIN_ON_OWNER
@@ -12066,9 +27042,9 @@ diff -Nur linux-3.18.8.orig/kernel/Kconfig.locks linux-3.18.8/kernel/Kconfig.loc
config ARCH_USE_QUEUE_RWLOCK
bool
-diff -Nur linux-3.18.8.orig/kernel/Kconfig.preempt linux-3.18.8/kernel/Kconfig.preempt
---- linux-3.18.8.orig/kernel/Kconfig.preempt 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/Kconfig.preempt 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/Kconfig.preempt linux-3.18.9/kernel/Kconfig.preempt
+--- linux-3.18.9.orig/kernel/Kconfig.preempt 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/Kconfig.preempt 2015-03-15 16:03:03.836094874 -0500
@@ -1,3 +1,16 @@
+config PREEMPT
+ bool
@@ -12121,9 +27097,9 @@ diff -Nur linux-3.18.8.orig/kernel/Kconfig.preempt linux-3.18.8/kernel/Kconfig.p
endchoice
config PREEMPT_COUNT
-diff -Nur linux-3.18.8.orig/kernel/ksysfs.c linux-3.18.8/kernel/ksysfs.c
---- linux-3.18.8.orig/kernel/ksysfs.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/ksysfs.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/ksysfs.c linux-3.18.9/kernel/ksysfs.c
+--- linux-3.18.9.orig/kernel/ksysfs.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/ksysfs.c 2015-03-15 16:03:03.836094874 -0500
@@ -136,6 +136,15 @@
#endif /* CONFIG_KEXEC */
@@ -12150,9 +27126,9 @@ diff -Nur linux-3.18.8.orig/kernel/ksysfs.c linux-3.18.8/kernel/ksysfs.c
NULL
};
-diff -Nur linux-3.18.8.orig/kernel/locking/lglock.c linux-3.18.8/kernel/locking/lglock.c
---- linux-3.18.8.orig/kernel/locking/lglock.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/locking/lglock.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/locking/lglock.c linux-3.18.9/kernel/locking/lglock.c
+--- linux-3.18.9.orig/kernel/locking/lglock.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/locking/lglock.c 2015-03-15 16:03:03.836094874 -0500
@@ -4,6 +4,15 @@
#include <linux/cpu.h>
#include <linux/string.h>
@@ -12297,9 +27273,9 @@ diff -Nur linux-3.18.8.orig/kernel/locking/lglock.c linux-3.18.8/kernel/locking/
+ }
+}
+#endif
-diff -Nur linux-3.18.8.orig/kernel/locking/lockdep.c linux-3.18.8/kernel/locking/lockdep.c
---- linux-3.18.8.orig/kernel/locking/lockdep.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/locking/lockdep.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/locking/lockdep.c linux-3.18.9/kernel/locking/lockdep.c
+--- linux-3.18.9.orig/kernel/locking/lockdep.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/locking/lockdep.c 2015-03-15 16:03:03.836094874 -0500
@@ -3542,6 +3542,7 @@
}
}
@@ -12316,9 +27292,9 @@ diff -Nur linux-3.18.8.orig/kernel/locking/lockdep.c linux-3.18.8/kernel/locking
if (!debug_locks)
print_irqtrace_events(current);
-diff -Nur linux-3.18.8.orig/kernel/locking/Makefile linux-3.18.8/kernel/locking/Makefile
---- linux-3.18.8.orig/kernel/locking/Makefile 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/locking/Makefile 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/locking/Makefile linux-3.18.9/kernel/locking/Makefile
+--- linux-3.18.9.orig/kernel/locking/Makefile 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/locking/Makefile 2015-03-15 16:03:03.836094874 -0500
@@ -1,5 +1,5 @@
-obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o
@@ -12350,9 +27326,9 @@ diff -Nur linux-3.18.8.orig/kernel/locking/Makefile linux-3.18.8/kernel/locking/
+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
-diff -Nur linux-3.18.8.orig/kernel/locking/percpu-rwsem.c linux-3.18.8/kernel/locking/percpu-rwsem.c
---- linux-3.18.8.orig/kernel/locking/percpu-rwsem.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/locking/percpu-rwsem.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/locking/percpu-rwsem.c linux-3.18.9/kernel/locking/percpu-rwsem.c
+--- linux-3.18.9.orig/kernel/locking/percpu-rwsem.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/locking/percpu-rwsem.c 2015-03-15 16:03:03.836094874 -0500
@@ -84,8 +84,12 @@
down_read(&brw->rw_sem);
@@ -12366,9 +27342,9 @@ diff -Nur linux-3.18.8.orig/kernel/locking/percpu-rwsem.c linux-3.18.8/kernel/lo
}
void percpu_up_read(struct percpu_rw_semaphore *brw)
-diff -Nur linux-3.18.8.orig/kernel/locking/rt.c linux-3.18.8/kernel/locking/rt.c
---- linux-3.18.8.orig/kernel/locking/rt.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/kernel/locking/rt.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/locking/rt.c linux-3.18.9/kernel/locking/rt.c
+--- linux-3.18.9.orig/kernel/locking/rt.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/kernel/locking/rt.c 2015-03-15 16:03:03.836094874 -0500
@@ -0,0 +1,437 @@
+/*
+ * kernel/rt.c
@@ -12807,9 +27783,9 @@ diff -Nur linux-3.18.8.orig/kernel/locking/rt.c linux-3.18.8/kernel/locking/rt.c
+ return 1;
+}
+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
-diff -Nur linux-3.18.8.orig/kernel/locking/rtmutex.c linux-3.18.8/kernel/locking/rtmutex.c
---- linux-3.18.8.orig/kernel/locking/rtmutex.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/locking/rtmutex.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/locking/rtmutex.c linux-3.18.9/kernel/locking/rtmutex.c
+--- linux-3.18.9.orig/kernel/locking/rtmutex.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/locking/rtmutex.c 2015-03-15 16:03:03.836094874 -0500
@@ -7,6 +7,11 @@
* Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
@@ -13833,9 +28809,9 @@ diff -Nur linux-3.18.8.orig/kernel/locking/rtmutex.c linux-3.18.8/kernel/locking
+}
+EXPORT_SYMBOL(ww_mutex_unlock);
+#endif
-diff -Nur linux-3.18.8.orig/kernel/locking/rtmutex_common.h linux-3.18.8/kernel/locking/rtmutex_common.h
---- linux-3.18.8.orig/kernel/locking/rtmutex_common.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/locking/rtmutex_common.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/locking/rtmutex_common.h linux-3.18.9/kernel/locking/rtmutex_common.h
+--- linux-3.18.9.orig/kernel/locking/rtmutex_common.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/locking/rtmutex_common.h 2015-03-15 16:03:03.836094874 -0500
@@ -49,6 +49,7 @@
struct rb_node pi_tree_entry;
struct task_struct *task;
@@ -13869,9 +28845,9 @@ diff -Nur linux-3.18.8.orig/kernel/locking/rtmutex_common.h linux-3.18.8/kernel/
+}
+
#endif
-diff -Nur linux-3.18.8.orig/kernel/locking/spinlock.c linux-3.18.8/kernel/locking/spinlock.c
---- linux-3.18.8.orig/kernel/locking/spinlock.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/locking/spinlock.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/locking/spinlock.c linux-3.18.9/kernel/locking/spinlock.c
+--- linux-3.18.9.orig/kernel/locking/spinlock.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/locking/spinlock.c 2015-03-15 16:03:03.840094875 -0500
@@ -124,8 +124,11 @@
* __[spin|read|write]_lock_bh()
*/
@@ -13902,9 +28878,9 @@ diff -Nur linux-3.18.8.orig/kernel/locking/spinlock.c linux-3.18.8/kernel/lockin
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
-diff -Nur linux-3.18.8.orig/kernel/locking/spinlock_debug.c linux-3.18.8/kernel/locking/spinlock_debug.c
---- linux-3.18.8.orig/kernel/locking/spinlock_debug.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/locking/spinlock_debug.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/locking/spinlock_debug.c linux-3.18.9/kernel/locking/spinlock_debug.c
+--- linux-3.18.9.orig/kernel/locking/spinlock_debug.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/locking/spinlock_debug.c 2015-03-15 16:03:03.840094875 -0500
@@ -31,6 +31,7 @@
EXPORT_SYMBOL(__raw_spin_lock_init);
@@ -13935,9 +28911,9 @@ diff -Nur linux-3.18.8.orig/kernel/locking/spinlock_debug.c linux-3.18.8/kernel/
}
+
+#endif
-diff -Nur linux-3.18.8.orig/kernel/panic.c linux-3.18.8/kernel/panic.c
---- linux-3.18.8.orig/kernel/panic.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/panic.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/panic.c linux-3.18.9/kernel/panic.c
+--- linux-3.18.9.orig/kernel/panic.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/panic.c 2015-03-15 16:03:03.840094875 -0500
@@ -384,9 +384,11 @@
static int init_oops_id(void)
@@ -13950,9 +28926,9 @@ diff -Nur linux-3.18.8.orig/kernel/panic.c linux-3.18.8/kernel/panic.c
oops_id++;
return 0;
-diff -Nur linux-3.18.8.orig/kernel/power/hibernate.c linux-3.18.8/kernel/power/hibernate.c
---- linux-3.18.8.orig/kernel/power/hibernate.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/power/hibernate.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/power/hibernate.c linux-3.18.9/kernel/power/hibernate.c
+--- linux-3.18.9.orig/kernel/power/hibernate.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/power/hibernate.c 2015-03-15 16:03:03.840094875 -0500
@@ -287,6 +287,8 @@
local_irq_disable();
@@ -14002,9 +28978,9 @@ diff -Nur linux-3.18.8.orig/kernel/power/hibernate.c linux-3.18.8/kernel/power/h
local_irq_enable();
enable_nonboot_cpus();
-diff -Nur linux-3.18.8.orig/kernel/power/suspend.c linux-3.18.8/kernel/power/suspend.c
---- linux-3.18.8.orig/kernel/power/suspend.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/power/suspend.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/power/suspend.c linux-3.18.9/kernel/power/suspend.c
+--- linux-3.18.9.orig/kernel/power/suspend.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/power/suspend.c 2015-03-15 16:03:03.840094875 -0500
@@ -318,6 +318,8 @@
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
@@ -14023,9 +28999,9 @@ diff -Nur linux-3.18.8.orig/kernel/power/suspend.c linux-3.18.8/kernel/power/sus
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
-diff -Nur linux-3.18.8.orig/kernel/printk/printk.c linux-3.18.8/kernel/printk/printk.c
---- linux-3.18.8.orig/kernel/printk/printk.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/printk/printk.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/printk/printk.c linux-3.18.9/kernel/printk/printk.c
+--- linux-3.18.9.orig/kernel/printk/printk.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/printk/printk.c 2015-03-15 16:03:03.840094875 -0500
@@ -1165,6 +1165,7 @@
{
char *text;
@@ -14281,9 +29257,9 @@ diff -Nur linux-3.18.8.orig/kernel/printk/printk.c linux-3.18.8/kernel/printk/pr
}
console_locked = 0;
-diff -Nur linux-3.18.8.orig/kernel/ptrace.c linux-3.18.8/kernel/ptrace.c
---- linux-3.18.8.orig/kernel/ptrace.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/ptrace.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/ptrace.c linux-3.18.9/kernel/ptrace.c
+--- linux-3.18.9.orig/kernel/ptrace.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/ptrace.c 2015-03-15 16:03:03.840094875 -0500
@@ -129,7 +129,12 @@
spin_lock_irq(&task->sighand->siglock);
@@ -14298,9 +29274,9 @@ diff -Nur linux-3.18.8.orig/kernel/ptrace.c linux-3.18.8/kernel/ptrace.c
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);
-diff -Nur linux-3.18.8.orig/kernel/rcu/tiny.c linux-3.18.8/kernel/rcu/tiny.c
---- linux-3.18.8.orig/kernel/rcu/tiny.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/rcu/tiny.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/rcu/tiny.c linux-3.18.9/kernel/rcu/tiny.c
+--- linux-3.18.9.orig/kernel/rcu/tiny.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/rcu/tiny.c 2015-03-15 16:03:03.840094875 -0500
@@ -370,6 +370,7 @@
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
@@ -14317,9 +29293,9 @@ diff -Nur linux-3.18.8.orig/kernel/rcu/tiny.c linux-3.18.8/kernel/rcu/tiny.c
void rcu_init(void)
{
-diff -Nur linux-3.18.8.orig/kernel/rcu/tree.c linux-3.18.8/kernel/rcu/tree.c
---- linux-3.18.8.orig/kernel/rcu/tree.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/rcu/tree.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/rcu/tree.c linux-3.18.9/kernel/rcu/tree.c
+--- linux-3.18.9.orig/kernel/rcu/tree.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/rcu/tree.c 2015-03-15 16:03:03.840094875 -0500
@@ -56,6 +56,11 @@
#include <linux/random.h>
#include <linux/ftrace_event.h>
@@ -14616,9 +29592,9 @@ diff -Nur linux-3.18.8.orig/kernel/rcu/tree.c linux-3.18.8/kernel/rcu/tree.c
/*
* We don't need protection against CPU-hotplug here because
-diff -Nur linux-3.18.8.orig/kernel/rcu/tree.h linux-3.18.8/kernel/rcu/tree.h
---- linux-3.18.8.orig/kernel/rcu/tree.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/rcu/tree.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/rcu/tree.h linux-3.18.9/kernel/rcu/tree.h
+--- linux-3.18.9.orig/kernel/rcu/tree.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/rcu/tree.h 2015-03-15 16:03:03.840094875 -0500
@@ -28,6 +28,7 @@
#include <linux/cpumask.h>
#include <linux/seqlock.h>
@@ -14666,9 +29642,9 @@ diff -Nur linux-3.18.8.orig/kernel/rcu/tree.h linux-3.18.8/kernel/rcu/tree.h
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_BOOST */
-diff -Nur linux-3.18.8.orig/kernel/rcu/tree_plugin.h linux-3.18.8/kernel/rcu/tree_plugin.h
---- linux-3.18.8.orig/kernel/rcu/tree_plugin.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/rcu/tree_plugin.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/rcu/tree_plugin.h linux-3.18.9/kernel/rcu/tree_plugin.h
+--- linux-3.18.9.orig/kernel/rcu/tree_plugin.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/rcu/tree_plugin.h 2015-03-15 16:03:03.844094875 -0500
@@ -24,12 +24,6 @@
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
@@ -14986,9 +29962,9 @@ diff -Nur linux-3.18.8.orig/kernel/rcu/tree_plugin.h linux-3.18.8/kernel/rcu/tre
rdp->nocb_follower_tail = &rdp->nocb_follower_head;
}
-diff -Nur linux-3.18.8.orig/kernel/rcu/update.c linux-3.18.8/kernel/rcu/update.c
---- linux-3.18.8.orig/kernel/rcu/update.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/rcu/update.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/rcu/update.c linux-3.18.9/kernel/rcu/update.c
+--- linux-3.18.9.orig/kernel/rcu/update.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/rcu/update.c 2015-03-15 16:03:03.844094875 -0500
@@ -170,6 +170,7 @@
}
EXPORT_SYMBOL_GPL(rcu_read_lock_held);
@@ -15005,9 +29981,9 @@ diff -Nur linux-3.18.8.orig/kernel/rcu/update.c linux-3.18.8/kernel/rcu/update.c
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-diff -Nur linux-3.18.8.orig/kernel/relay.c linux-3.18.8/kernel/relay.c
---- linux-3.18.8.orig/kernel/relay.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/relay.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/relay.c linux-3.18.9/kernel/relay.c
+--- linux-3.18.9.orig/kernel/relay.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/relay.c 2015-03-15 16:03:03.844094875 -0500
@@ -339,6 +339,10 @@
{
struct rchan_buf *buf = (struct rchan_buf *)data;
@@ -15043,9 +30019,9 @@ diff -Nur linux-3.18.8.orig/kernel/relay.c linux-3.18.8/kernel/relay.c
}
old = buf->data;
-diff -Nur linux-3.18.8.orig/kernel/res_counter.c linux-3.18.8/kernel/res_counter.c
---- linux-3.18.8.orig/kernel/res_counter.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/res_counter.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/res_counter.c linux-3.18.9/kernel/res_counter.c
+--- linux-3.18.9.orig/kernel/res_counter.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/res_counter.c 2015-03-15 16:03:03.844094875 -0500
@@ -59,7 +59,7 @@
r = ret = 0;
@@ -15082,9 +30058,9 @@ diff -Nur linux-3.18.8.orig/kernel/res_counter.c linux-3.18.8/kernel/res_counter
return ret;
}
-diff -Nur linux-3.18.8.orig/kernel/sched/completion.c linux-3.18.8/kernel/sched/completion.c
---- linux-3.18.8.orig/kernel/sched/completion.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/sched/completion.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/sched/completion.c linux-3.18.9/kernel/sched/completion.c
+--- linux-3.18.9.orig/kernel/sched/completion.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/sched/completion.c 2015-03-15 16:03:03.844094875 -0500
@@ -30,10 +30,10 @@
{
unsigned long flags;
@@ -15179,9 +30155,9 @@ diff -Nur linux-3.18.8.orig/kernel/sched/completion.c linux-3.18.8/kernel/sched/
return ret;
}
EXPORT_SYMBOL(completion_done);
-diff -Nur linux-3.18.8.orig/kernel/sched/core.c linux-3.18.8/kernel/sched/core.c
---- linux-3.18.8.orig/kernel/sched/core.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/sched/core.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/sched/core.c linux-3.18.9/kernel/sched/core.c
+--- linux-3.18.9.orig/kernel/sched/core.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/sched/core.c 2015-03-15 16:03:03.844094875 -0500
@@ -280,7 +280,11 @@
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
@@ -15871,9 +30847,9 @@ diff -Nur linux-3.18.8.orig/kernel/sched/core.c linux-3.18.8/kernel/sched/core.c
return (nested == preempt_offset);
}
-diff -Nur linux-3.18.8.orig/kernel/sched/cputime.c linux-3.18.8/kernel/sched/cputime.c
---- linux-3.18.8.orig/kernel/sched/cputime.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/sched/cputime.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/sched/cputime.c linux-3.18.9/kernel/sched/cputime.c
+--- linux-3.18.9.orig/kernel/sched/cputime.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/sched/cputime.c 2015-03-15 16:03:03.848094875 -0500
@@ -675,37 +675,45 @@
void vtime_account_system(struct task_struct *tsk)
@@ -16027,9 +31003,9 @@ diff -Nur linux-3.18.8.orig/kernel/sched/cputime.c linux-3.18.8/kernel/sched/cpu
}
-diff -Nur linux-3.18.8.orig/kernel/sched/deadline.c linux-3.18.8/kernel/sched/deadline.c
---- linux-3.18.8.orig/kernel/sched/deadline.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/sched/deadline.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/sched/deadline.c linux-3.18.9/kernel/sched/deadline.c
+--- linux-3.18.9.orig/kernel/sched/deadline.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/sched/deadline.c 2015-03-15 16:03:03.848094875 -0500
@@ -570,6 +570,7 @@
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -16038,9 +31014,9 @@ diff -Nur linux-3.18.8.orig/kernel/sched/deadline.c linux-3.18.8/kernel/sched/de
}
static
-diff -Nur linux-3.18.8.orig/kernel/sched/debug.c linux-3.18.8/kernel/sched/debug.c
---- linux-3.18.8.orig/kernel/sched/debug.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/sched/debug.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/sched/debug.c linux-3.18.9/kernel/sched/debug.c
+--- linux-3.18.9.orig/kernel/sched/debug.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/sched/debug.c 2015-03-15 16:03:03.848094875 -0500
@@ -256,6 +256,9 @@
P(rt_throttled);
PN(rt_time);
@@ -16062,9 +31038,9 @@ diff -Nur linux-3.18.8.orig/kernel/sched/debug.c linux-3.18.8/kernel/sched/debug
#undef PN
#undef __PN
#undef P
-diff -Nur linux-3.18.8.orig/kernel/sched/fair.c linux-3.18.8/kernel/sched/fair.c
---- linux-3.18.8.orig/kernel/sched/fair.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/sched/fair.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/sched/fair.c linux-3.18.9/kernel/sched/fair.c
+--- linux-3.18.9.orig/kernel/sched/fair.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/sched/fair.c 2015-03-15 16:03:03.848094875 -0500
@@ -2951,7 +2951,7 @@
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
@@ -16137,9 +31113,9 @@ diff -Nur linux-3.18.8.orig/kernel/sched/fair.c linux-3.18.8/kernel/sched/fair.c
} else
check_preempt_curr(rq, p, 0);
}
-diff -Nur linux-3.18.8.orig/kernel/sched/features.h linux-3.18.8/kernel/sched/features.h
---- linux-3.18.8.orig/kernel/sched/features.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/sched/features.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/sched/features.h linux-3.18.9/kernel/sched/features.h
+--- linux-3.18.9.orig/kernel/sched/features.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/sched/features.h 2015-03-15 16:03:03.848094875 -0500
@@ -50,12 +50,18 @@
*/
SCHED_FEAT(NONTASK_CAPACITY, true)
@@ -16160,9 +31136,9 @@ diff -Nur linux-3.18.8.orig/kernel/sched/features.h linux-3.18.8/kernel/sched/fe
SCHED_FEAT(FORCE_SD_OVERLAP, false)
SCHED_FEAT(RT_RUNTIME_SHARE, true)
SCHED_FEAT(LB_MIN, false)
-diff -Nur linux-3.18.8.orig/kernel/sched/Makefile linux-3.18.8/kernel/sched/Makefile
---- linux-3.18.8.orig/kernel/sched/Makefile 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/sched/Makefile 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/sched/Makefile linux-3.18.9/kernel/sched/Makefile
+--- linux-3.18.9.orig/kernel/sched/Makefile 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/sched/Makefile 2015-03-15 16:03:03.848094875 -0500
@@ -13,7 +13,7 @@
obj-y += core.o proc.o clock.o cputime.o
@@ -16172,9 +31148,9 @@ diff -Nur linux-3.18.8.orig/kernel/sched/Makefile linux-3.18.8/kernel/sched/Make
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
-diff -Nur linux-3.18.8.orig/kernel/sched/rt.c linux-3.18.8/kernel/sched/rt.c
---- linux-3.18.8.orig/kernel/sched/rt.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/sched/rt.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/sched/rt.c linux-3.18.9/kernel/sched/rt.c
+--- linux-3.18.9.orig/kernel/sched/rt.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/sched/rt.c 2015-03-15 16:03:03.848094875 -0500
@@ -43,6 +43,7 @@
hrtimer_init(&rt_b->rt_period_timer,
@@ -16183,9 +31159,9 @@ diff -Nur linux-3.18.8.orig/kernel/sched/rt.c linux-3.18.8/kernel/sched/rt.c
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
-diff -Nur linux-3.18.8.orig/kernel/sched/sched.h linux-3.18.8/kernel/sched/sched.h
---- linux-3.18.8.orig/kernel/sched/sched.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/sched/sched.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/sched/sched.h linux-3.18.9/kernel/sched/sched.h
+--- linux-3.18.9.orig/kernel/sched/sched.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/sched/sched.h 2015-03-15 16:03:03.848094875 -0500
@@ -1018,6 +1018,7 @@
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
@@ -16210,9 +31186,9 @@ diff -Nur linux-3.18.8.orig/kernel/sched/sched.h linux-3.18.8/kernel/sched/sched
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
-diff -Nur linux-3.18.8.orig/kernel/sched/wait-simple.c linux-3.18.8/kernel/sched/wait-simple.c
---- linux-3.18.8.orig/kernel/sched/wait-simple.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/kernel/sched/wait-simple.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/sched/wait-simple.c linux-3.18.9/kernel/sched/wait-simple.c
+--- linux-3.18.9.orig/kernel/sched/wait-simple.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/kernel/sched/wait-simple.c 2015-03-15 16:03:03.848094875 -0500
@@ -0,0 +1,115 @@
+/*
+ * Simple waitqueues without fancy flags and callbacks
@@ -16329,9 +31305,9 @@ diff -Nur linux-3.18.8.orig/kernel/sched/wait-simple.c linux-3.18.8/kernel/sched
+ return woken;
+}
+EXPORT_SYMBOL(__swait_wake);
-diff -Nur linux-3.18.8.orig/kernel/sched/work-simple.c linux-3.18.8/kernel/sched/work-simple.c
---- linux-3.18.8.orig/kernel/sched/work-simple.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/kernel/sched/work-simple.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/sched/work-simple.c linux-3.18.9/kernel/sched/work-simple.c
+--- linux-3.18.9.orig/kernel/sched/work-simple.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/kernel/sched/work-simple.c 2015-03-15 16:03:03.852094875 -0500
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
@@ -16509,9 +31485,9 @@ diff -Nur linux-3.18.8.orig/kernel/sched/work-simple.c linux-3.18.8/kernel/sched
+ mutex_unlock(&worker_mutex);
+}
+EXPORT_SYMBOL_GPL(swork_put);
-diff -Nur linux-3.18.8.orig/kernel/signal.c linux-3.18.8/kernel/signal.c
---- linux-3.18.8.orig/kernel/signal.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/signal.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/signal.c linux-3.18.9/kernel/signal.c
+--- linux-3.18.9.orig/kernel/signal.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/signal.c 2015-03-15 16:03:03.852094875 -0500
@@ -14,6 +14,7 @@
#include <linux/export.h>
#include <linux/init.h>
@@ -16756,9 +31732,9 @@ diff -Nur linux-3.18.8.orig/kernel/signal.c linux-3.18.8/kernel/signal.c
freezable_schedule();
} else {
/*
-diff -Nur linux-3.18.8.orig/kernel/softirq.c linux-3.18.8/kernel/softirq.c
---- linux-3.18.8.orig/kernel/softirq.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/softirq.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/softirq.c linux-3.18.9/kernel/softirq.c
+--- linux-3.18.9.orig/kernel/softirq.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/softirq.c 2015-03-15 16:03:03.852094875 -0500
@@ -21,10 +21,12 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
@@ -17650,9 +32626,9 @@ diff -Nur linux-3.18.8.orig/kernel/softirq.c linux-3.18.8/kernel/softirq.c
.thread_should_run = ksoftirqd_should_run,
.thread_fn = run_ksoftirqd,
.thread_comm = "ksoftirqd/%u",
-diff -Nur linux-3.18.8.orig/kernel/stop_machine.c linux-3.18.8/kernel/stop_machine.c
---- linux-3.18.8.orig/kernel/stop_machine.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/stop_machine.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/stop_machine.c linux-3.18.9/kernel/stop_machine.c
+--- linux-3.18.9.orig/kernel/stop_machine.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/stop_machine.c 2015-03-15 16:03:03.852094875 -0500
@@ -30,12 +30,12 @@
atomic_t nr_todo; /* nr left to execute */
bool executed; /* actually executed? */
@@ -17913,9 +32889,9 @@ diff -Nur linux-3.18.8.orig/kernel/stop_machine.c linux-3.18.8/kernel/stop_machi
cpu_relax();
mutex_unlock(&stop_cpus_mutex);
-diff -Nur linux-3.18.8.orig/kernel/time/hrtimer.c linux-3.18.8/kernel/time/hrtimer.c
---- linux-3.18.8.orig/kernel/time/hrtimer.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/time/hrtimer.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/time/hrtimer.c linux-3.18.9/kernel/time/hrtimer.c
+--- linux-3.18.9.orig/kernel/time/hrtimer.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/time/hrtimer.c 2015-03-15 16:03:03.852094875 -0500
@@ -48,11 +48,13 @@
#include <linux/sched/rt.h>
#include <linux/sched/deadline.h>
@@ -18520,9 +33496,9 @@ diff -Nur linux-3.18.8.orig/kernel/time/hrtimer.c linux-3.18.8/kernel/time/hrtim
}
/**
-diff -Nur linux-3.18.8.orig/kernel/time/itimer.c linux-3.18.8/kernel/time/itimer.c
---- linux-3.18.8.orig/kernel/time/itimer.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/time/itimer.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/time/itimer.c linux-3.18.9/kernel/time/itimer.c
+--- linux-3.18.9.orig/kernel/time/itimer.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/time/itimer.c 2015-03-15 16:03:03.852094875 -0500
@@ -213,6 +213,7 @@
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
@@ -18531,9 +33507,9 @@ diff -Nur linux-3.18.8.orig/kernel/time/itimer.c linux-3.18.8/kernel/time/itimer
goto again;
}
expires = timeval_to_ktime(value->it_value);
-diff -Nur linux-3.18.8.orig/kernel/time/jiffies.c linux-3.18.8/kernel/time/jiffies.c
---- linux-3.18.8.orig/kernel/time/jiffies.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/time/jiffies.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/time/jiffies.c linux-3.18.9/kernel/time/jiffies.c
+--- linux-3.18.9.orig/kernel/time/jiffies.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/time/jiffies.c 2015-03-15 16:03:03.852094875 -0500
@@ -73,7 +73,8 @@
.shift = JIFFIES_SHIFT,
};
@@ -18556,9 +33532,9 @@ diff -Nur linux-3.18.8.orig/kernel/time/jiffies.c linux-3.18.8/kernel/time/jiffi
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
-diff -Nur linux-3.18.8.orig/kernel/time/ntp.c linux-3.18.8/kernel/time/ntp.c
---- linux-3.18.8.orig/kernel/time/ntp.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/time/ntp.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/time/ntp.c linux-3.18.9/kernel/time/ntp.c
+--- linux-3.18.9.orig/kernel/time/ntp.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/time/ntp.c 2015-03-15 16:03:03.852094875 -0500
@@ -10,6 +10,7 @@
#include <linux/workqueue.h>
#include <linux/hrtimer.h>
@@ -18620,9 +33596,9 @@ diff -Nur linux-3.18.8.orig/kernel/time/ntp.c linux-3.18.8/kernel/time/ntp.c
#else
void ntp_notify_cmos_timer(void) { }
-diff -Nur linux-3.18.8.orig/kernel/time/posix-cpu-timers.c linux-3.18.8/kernel/time/posix-cpu-timers.c
---- linux-3.18.8.orig/kernel/time/posix-cpu-timers.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/time/posix-cpu-timers.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/time/posix-cpu-timers.c linux-3.18.9/kernel/time/posix-cpu-timers.c
+--- linux-3.18.9.orig/kernel/time/posix-cpu-timers.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/time/posix-cpu-timers.c 2015-03-15 16:03:03.856094875 -0500
@@ -3,6 +3,7 @@
*/
@@ -18870,9 +33846,9 @@ diff -Nur linux-3.18.8.orig/kernel/time/posix-cpu-timers.c linux-3.18.8/kernel/t
/*
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
-diff -Nur linux-3.18.8.orig/kernel/time/posix-timers.c linux-3.18.8/kernel/time/posix-timers.c
---- linux-3.18.8.orig/kernel/time/posix-timers.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/time/posix-timers.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/time/posix-timers.c linux-3.18.9/kernel/time/posix-timers.c
+--- linux-3.18.9.orig/kernel/time/posix-timers.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/time/posix-timers.c 2015-03-15 16:03:03.856094875 -0500
@@ -499,6 +499,7 @@
static struct pid *good_sigevent(sigevent_t * event)
{
@@ -18968,9 +33944,9 @@ diff -Nur linux-3.18.8.orig/kernel/time/posix-timers.c linux-3.18.8/kernel/time/
goto retry_delete;
}
list_del(&timer->list);
-diff -Nur linux-3.18.8.orig/kernel/time/tick-common.c linux-3.18.8/kernel/time/tick-common.c
---- linux-3.18.8.orig/kernel/time/tick-common.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/time/tick-common.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/time/tick-common.c linux-3.18.9/kernel/time/tick-common.c
+--- linux-3.18.9.orig/kernel/time/tick-common.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/time/tick-common.c 2015-03-15 16:03:03.856094875 -0500
@@ -78,13 +78,15 @@
static void tick_periodic(int cpu)
{
@@ -19001,9 +33977,9 @@ diff -Nur linux-3.18.8.orig/kernel/time/tick-common.c linux-3.18.8/kernel/time/t
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
-diff -Nur linux-3.18.8.orig/kernel/time/tick-internal.h linux-3.18.8/kernel/time/tick-internal.h
---- linux-3.18.8.orig/kernel/time/tick-internal.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/time/tick-internal.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/time/tick-internal.h linux-3.18.9/kernel/time/tick-internal.h
+--- linux-3.18.9.orig/kernel/time/tick-internal.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/time/tick-internal.h 2015-03-15 16:03:03.856094875 -0500
@@ -6,7 +6,8 @@
#include "timekeeping.h"
@@ -19014,9 +33990,9 @@ diff -Nur linux-3.18.8.orig/kernel/time/tick-internal.h linux-3.18.8/kernel/time
#define CS_NAME_LEN 32
-diff -Nur linux-3.18.8.orig/kernel/time/tick-sched.c linux-3.18.8/kernel/time/tick-sched.c
---- linux-3.18.8.orig/kernel/time/tick-sched.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/time/tick-sched.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/time/tick-sched.c linux-3.18.9/kernel/time/tick-sched.c
+--- linux-3.18.9.orig/kernel/time/tick-sched.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/time/tick-sched.c 2015-03-15 16:03:03.856094875 -0500
@@ -62,7 +62,8 @@
return;
@@ -19104,9 +34080,9 @@ diff -Nur linux-3.18.8.orig/kernel/time/tick-sched.c linux-3.18.8/kernel/time/ti
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per cpu) */
-diff -Nur linux-3.18.8.orig/kernel/time/timekeeping.c linux-3.18.8/kernel/time/timekeeping.c
---- linux-3.18.8.orig/kernel/time/timekeeping.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/time/timekeeping.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/time/timekeeping.c linux-3.18.9/kernel/time/timekeeping.c
+--- linux-3.18.9.orig/kernel/time/timekeeping.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/time/timekeeping.c 2015-03-15 16:03:03.856094875 -0500
@@ -1814,8 +1814,10 @@
*/
void xtime_update(unsigned long ticks)
@@ -19120,9 +34096,9 @@ diff -Nur linux-3.18.8.orig/kernel/time/timekeeping.c linux-3.18.8/kernel/time/t
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
-diff -Nur linux-3.18.8.orig/kernel/time/timer.c linux-3.18.8/kernel/time/timer.c
---- linux-3.18.8.orig/kernel/time/timer.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/time/timer.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/time/timer.c linux-3.18.9/kernel/time/timer.c
+--- linux-3.18.9.orig/kernel/time/timer.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/time/timer.c 2015-03-15 16:03:03.856094875 -0500
@@ -78,6 +78,9 @@
struct tvec_base {
spinlock_t lock;
@@ -19341,9 +34317,9 @@ diff -Nur linux-3.18.8.orig/kernel/time/timer.c linux-3.18.8/kernel/time/timer.c
}
#endif /* CONFIG_HOTPLUG_CPU */
-diff -Nur linux-3.18.8.orig/kernel/trace/Kconfig linux-3.18.8/kernel/trace/Kconfig
---- linux-3.18.8.orig/kernel/trace/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/trace/Kconfig 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/trace/Kconfig linux-3.18.9/kernel/trace/Kconfig
+--- linux-3.18.9.orig/kernel/trace/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/trace/Kconfig 2015-03-15 16:03:03.856094875 -0500
@@ -187,6 +187,24 @@
enabled. This option and the preempt-off timing option can be
used together or separately.)
@@ -19469,9 +34445,9 @@ diff -Nur linux-3.18.8.orig/kernel/trace/Kconfig linux-3.18.8/kernel/trace/Kconf
config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches and events"
depends on !GENERIC_TRACER
-diff -Nur linux-3.18.8.orig/kernel/trace/latency_hist.c linux-3.18.8/kernel/trace/latency_hist.c
---- linux-3.18.8.orig/kernel/trace/latency_hist.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.8/kernel/trace/latency_hist.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/trace/latency_hist.c linux-3.18.9/kernel/trace/latency_hist.c
+--- linux-3.18.9.orig/kernel/trace/latency_hist.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.9/kernel/trace/latency_hist.c 2015-03-15 16:03:03.860094875 -0500
@@ -0,0 +1,1178 @@
+/*
+ * kernel/trace/latency_hist.c
@@ -20651,9 +35627,9 @@ diff -Nur linux-3.18.8.orig/kernel/trace/latency_hist.c linux-3.18.8/kernel/trac
+}
+
+device_initcall(latency_hist_init);
-diff -Nur linux-3.18.8.orig/kernel/trace/Makefile linux-3.18.8/kernel/trace/Makefile
---- linux-3.18.8.orig/kernel/trace/Makefile 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/trace/Makefile 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/trace/Makefile linux-3.18.9/kernel/trace/Makefile
+--- linux-3.18.9.orig/kernel/trace/Makefile 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/trace/Makefile 2015-03-15 16:03:03.860094875 -0500
@@ -36,6 +36,10 @@
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
@@ -20665,9 +35641,9 @@ diff -Nur linux-3.18.8.orig/kernel/trace/Makefile linux-3.18.8/kernel/trace/Make
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
-diff -Nur linux-3.18.8.orig/kernel/trace/trace.c linux-3.18.8/kernel/trace/trace.c
---- linux-3.18.8.orig/kernel/trace/trace.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/trace/trace.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/trace/trace.c linux-3.18.9/kernel/trace/trace.c
+--- linux-3.18.9.orig/kernel/trace/trace.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/trace/trace.c 2015-03-15 16:03:03.860094875 -0500
@@ -1579,6 +1579,7 @@
struct task_struct *tsk = current;
@@ -20739,9 +35715,9 @@ diff -Nur linux-3.18.8.orig/kernel/trace/trace.c linux-3.18.8/kernel/trace/trace
}
void
-diff -Nur linux-3.18.8.orig/kernel/trace/trace_events.c linux-3.18.8/kernel/trace/trace_events.c
---- linux-3.18.8.orig/kernel/trace/trace_events.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/trace/trace_events.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/trace/trace_events.c linux-3.18.9/kernel/trace/trace_events.c
+--- linux-3.18.9.orig/kernel/trace/trace_events.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/trace/trace_events.c 2015-03-15 16:03:03.860094875 -0500
@@ -162,6 +162,8 @@
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
@@ -20751,9 +35727,9 @@ diff -Nur linux-3.18.8.orig/kernel/trace/trace_events.c linux-3.18.8/kernel/trac
return ret;
}
-diff -Nur linux-3.18.8.orig/kernel/trace/trace.h linux-3.18.8/kernel/trace/trace.h
---- linux-3.18.8.orig/kernel/trace/trace.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/trace/trace.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/trace/trace.h linux-3.18.9/kernel/trace/trace.h
+--- linux-3.18.9.orig/kernel/trace/trace.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/trace/trace.h 2015-03-15 16:03:03.860094875 -0500
@@ -119,6 +119,7 @@
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
@@ -20770,9 +35746,9 @@ diff -Nur linux-3.18.8.orig/kernel/trace/trace.h linux-3.18.8/kernel/trace/trace
};
#define TRACE_BUF_SIZE 1024
-diff -Nur linux-3.18.8.orig/kernel/trace/trace_irqsoff.c linux-3.18.8/kernel/trace/trace_irqsoff.c
---- linux-3.18.8.orig/kernel/trace/trace_irqsoff.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/trace/trace_irqsoff.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/trace/trace_irqsoff.c linux-3.18.9/kernel/trace/trace_irqsoff.c
+--- linux-3.18.9.orig/kernel/trace/trace_irqsoff.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/trace/trace_irqsoff.c 2015-03-15 16:03:03.860094875 -0500
@@ -17,6 +17,7 @@
#include <linux/fs.h>
@@ -20856,9 +35832,9 @@ diff -Nur linux-3.18.8.orig/kernel/trace/trace_irqsoff.c linux-3.18.8/kernel/tra
if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1);
}
-diff -Nur linux-3.18.8.orig/kernel/trace/trace_output.c linux-3.18.8/kernel/trace/trace_output.c
---- linux-3.18.8.orig/kernel/trace/trace_output.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/trace/trace_output.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/trace/trace_output.c linux-3.18.9/kernel/trace/trace_output.c
+--- linux-3.18.9.orig/kernel/trace/trace_output.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/trace/trace_output.c 2015-03-15 16:03:03.860094875 -0500
@@ -410,6 +410,7 @@
{
char hardsoft_irq;
@@ -20905,9 +35881,9 @@ diff -Nur linux-3.18.8.orig/kernel/trace/trace_output.c linux-3.18.8/kernel/trac
return ret;
}
-diff -Nur linux-3.18.8.orig/kernel/user.c linux-3.18.8/kernel/user.c
---- linux-3.18.8.orig/kernel/user.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/user.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/user.c linux-3.18.9/kernel/user.c
+--- linux-3.18.9.orig/kernel/user.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/user.c 2015-03-15 16:03:03.860094875 -0500
@@ -158,11 +158,11 @@
if (!up)
return;
@@ -20922,9 +35898,9 @@ diff -Nur linux-3.18.8.orig/kernel/user.c linux-3.18.8/kernel/user.c
}
struct user_struct *alloc_uid(kuid_t uid)
-diff -Nur linux-3.18.8.orig/kernel/watchdog.c linux-3.18.8/kernel/watchdog.c
---- linux-3.18.8.orig/kernel/watchdog.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/watchdog.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/watchdog.c linux-3.18.9/kernel/watchdog.c
+--- linux-3.18.9.orig/kernel/watchdog.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/watchdog.c 2015-03-15 16:03:03.864094875 -0500
@@ -248,6 +248,8 @@
#ifdef CONFIG_HARDLOCKUP_DETECTOR
@@ -20966,9 +35942,9 @@ diff -Nur linux-3.18.8.orig/kernel/watchdog.c linux-3.18.8/kernel/watchdog.c
/* Enable the perf event */
watchdog_nmi_enable(cpu);
-diff -Nur linux-3.18.8.orig/kernel/workqueue.c linux-3.18.8/kernel/workqueue.c
---- linux-3.18.8.orig/kernel/workqueue.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/workqueue.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/workqueue.c linux-3.18.9/kernel/workqueue.c
+--- linux-3.18.9.orig/kernel/workqueue.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/workqueue.c 2015-03-15 16:03:03.864094875 -0500
@@ -48,6 +48,8 @@
#include <linux/nodemask.h>
#include <linux/moduleparam.h>
@@ -21566,9 +36542,9 @@ diff -Nur linux-3.18.8.orig/kernel/workqueue.c linux-3.18.8/kernel/workqueue.c
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
-diff -Nur linux-3.18.8.orig/kernel/workqueue_internal.h linux-3.18.8/kernel/workqueue_internal.h
---- linux-3.18.8.orig/kernel/workqueue_internal.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/kernel/workqueue_internal.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/kernel/workqueue_internal.h linux-3.18.9/kernel/workqueue_internal.h
+--- linux-3.18.9.orig/kernel/workqueue_internal.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/kernel/workqueue_internal.h 2015-03-15 16:03:03.864094875 -0500
@@ -43,6 +43,7 @@
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
@@ -21587,9 +36563,9 @@ diff -Nur linux-3.18.8.orig/kernel/workqueue_internal.h linux-3.18.8/kernel/work
+void wq_worker_sleeping(struct task_struct *task);
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
-diff -Nur linux-3.18.8.orig/lib/debugobjects.c linux-3.18.8/lib/debugobjects.c
---- linux-3.18.8.orig/lib/debugobjects.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/lib/debugobjects.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/lib/debugobjects.c linux-3.18.9/lib/debugobjects.c
+--- linux-3.18.9.orig/lib/debugobjects.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/lib/debugobjects.c 2015-03-15 16:03:03.864094875 -0500
@@ -309,7 +309,10 @@
struct debug_obj *obj;
unsigned long flags;
@@ -21602,9 +36578,9 @@ diff -Nur linux-3.18.8.orig/lib/debugobjects.c linux-3.18.8/lib/debugobjects.c
db = get_bucket((unsigned long) addr);
-diff -Nur linux-3.18.8.orig/lib/idr.c linux-3.18.8/lib/idr.c
---- linux-3.18.8.orig/lib/idr.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/lib/idr.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/lib/idr.c linux-3.18.9/lib/idr.c
+--- linux-3.18.9.orig/lib/idr.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/lib/idr.c 2015-03-15 16:03:03.868094875 -0500
@@ -31,6 +31,7 @@
#include <linux/spinlock.h>
#include <linux/percpu.h>
@@ -21670,9 +36646,9 @@ diff -Nur linux-3.18.8.orig/lib/idr.c linux-3.18.8/lib/idr.c
if (!new)
break;
-diff -Nur linux-3.18.8.orig/lib/Kconfig linux-3.18.8/lib/Kconfig
---- linux-3.18.8.orig/lib/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/lib/Kconfig 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/lib/Kconfig linux-3.18.9/lib/Kconfig
+--- linux-3.18.9.orig/lib/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/lib/Kconfig 2015-03-15 16:03:03.868094875 -0500
@@ -383,6 +383,7 @@
config CPUMASK_OFFSTACK
@@ -21681,9 +36657,9 @@ diff -Nur linux-3.18.8.orig/lib/Kconfig linux-3.18.8/lib/Kconfig
help
Use dynamic allocation for cpumask_var_t, instead of putting
them on the stack. This is a bit more expensive, but avoids
-diff -Nur linux-3.18.8.orig/lib/Kconfig.debug linux-3.18.8/lib/Kconfig.debug
---- linux-3.18.8.orig/lib/Kconfig.debug 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/lib/Kconfig.debug 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/lib/Kconfig.debug linux-3.18.9/lib/Kconfig.debug
+--- linux-3.18.9.orig/lib/Kconfig.debug 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/lib/Kconfig.debug 2015-03-15 16:03:03.868094875 -0500
@@ -639,7 +639,7 @@
config DEBUG_SHIRQ
@@ -21693,9 +36669,9 @@ diff -Nur linux-3.18.8.orig/lib/Kconfig.debug linux-3.18.8/lib/Kconfig.debug
help
Enable this to generate a spurious interrupt as soon as a shared
interrupt handler is registered, and just before one is deregistered.
-diff -Nur linux-3.18.8.orig/lib/locking-selftest.c linux-3.18.8/lib/locking-selftest.c
---- linux-3.18.8.orig/lib/locking-selftest.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/lib/locking-selftest.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/lib/locking-selftest.c linux-3.18.9/lib/locking-selftest.c
+--- linux-3.18.9.orig/lib/locking-selftest.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/lib/locking-selftest.c 2015-03-15 16:03:03.868094875 -0500
@@ -1858,6 +1858,7 @@
printk(" --------------------------------------------------------------------------\n");
@@ -21733,9 +36709,9 @@ diff -Nur linux-3.18.8.orig/lib/locking-selftest.c linux-3.18.8/lib/locking-self
ww_tests();
-diff -Nur linux-3.18.8.orig/lib/percpu_ida.c linux-3.18.8/lib/percpu_ida.c
---- linux-3.18.8.orig/lib/percpu_ida.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/lib/percpu_ida.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/lib/percpu_ida.c linux-3.18.9/lib/percpu_ida.c
+--- linux-3.18.9.orig/lib/percpu_ida.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/lib/percpu_ida.c 2015-03-15 16:03:03.868094875 -0500
@@ -29,6 +29,9 @@
#include <linux/string.h>
#include <linux/spinlock.h>
@@ -21824,9 +36800,9 @@ diff -Nur linux-3.18.8.orig/lib/percpu_ida.c linux-3.18.8/lib/percpu_ida.c
return err;
}
EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
-diff -Nur linux-3.18.8.orig/lib/radix-tree.c linux-3.18.8/lib/radix-tree.c
---- linux-3.18.8.orig/lib/radix-tree.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/lib/radix-tree.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/lib/radix-tree.c linux-3.18.9/lib/radix-tree.c
+--- linux-3.18.9.orig/lib/radix-tree.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/lib/radix-tree.c 2015-03-15 16:03:03.868094875 -0500
@@ -195,12 +195,13 @@
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
@@ -21858,9 +36834,9 @@ diff -Nur linux-3.18.8.orig/lib/radix-tree.c linux-3.18.8/lib/radix-tree.c
/*
* Return the maximum key which can be store into a
-diff -Nur linux-3.18.8.orig/lib/scatterlist.c linux-3.18.8/lib/scatterlist.c
---- linux-3.18.8.orig/lib/scatterlist.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/lib/scatterlist.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/lib/scatterlist.c linux-3.18.9/lib/scatterlist.c
+--- linux-3.18.9.orig/lib/scatterlist.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/lib/scatterlist.c 2015-03-15 16:03:03.868094875 -0500
@@ -592,7 +592,7 @@
flush_kernel_dcache_page(miter->page);
@@ -21888,9 +36864,9 @@ diff -Nur linux-3.18.8.orig/lib/scatterlist.c linux-3.18.8/lib/scatterlist.c
return offset;
}
-diff -Nur linux-3.18.8.orig/lib/smp_processor_id.c linux-3.18.8/lib/smp_processor_id.c
---- linux-3.18.8.orig/lib/smp_processor_id.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/lib/smp_processor_id.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/lib/smp_processor_id.c linux-3.18.9/lib/smp_processor_id.c
+--- linux-3.18.9.orig/lib/smp_processor_id.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/lib/smp_processor_id.c 2015-03-15 16:03:03.868094875 -0500
@@ -39,8 +39,9 @@
if (!printk_ratelimit())
goto out_enable;
@@ -21903,9 +36879,9 @@ diff -Nur linux-3.18.8.orig/lib/smp_processor_id.c linux-3.18.8/lib/smp_processo
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
-diff -Nur linux-3.18.8.orig/mm/filemap.c linux-3.18.8/mm/filemap.c
---- linux-3.18.8.orig/mm/filemap.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/filemap.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/filemap.c linux-3.18.9/mm/filemap.c
+--- linux-3.18.9.orig/mm/filemap.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/filemap.c 2015-03-15 16:03:03.868094875 -0500
@@ -168,7 +168,9 @@
if (!workingset_node_pages(node) &&
list_empty(&node->private_list)) {
@@ -21932,9 +36908,9 @@ diff -Nur linux-3.18.8.orig/mm/filemap.c linux-3.18.8/mm/filemap.c
}
return 0;
}
-diff -Nur linux-3.18.8.orig/mm/highmem.c linux-3.18.8/mm/highmem.c
---- linux-3.18.8.orig/mm/highmem.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/highmem.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/highmem.c linux-3.18.9/mm/highmem.c
+--- linux-3.18.9.orig/mm/highmem.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/highmem.c 2015-03-15 16:03:03.868094875 -0500
@@ -29,10 +29,11 @@
#include <linux/kgdb.h>
#include <asm/tlbflush.h>
@@ -21959,9 +36935,9 @@ diff -Nur linux-3.18.8.orig/mm/highmem.c linux-3.18.8/mm/highmem.c
unsigned int nr_free_highpages (void)
{
-diff -Nur linux-3.18.8.orig/mm/Kconfig linux-3.18.8/mm/Kconfig
---- linux-3.18.8.orig/mm/Kconfig 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/Kconfig 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/Kconfig linux-3.18.9/mm/Kconfig
+--- linux-3.18.9.orig/mm/Kconfig 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/Kconfig 2015-03-15 16:03:03.872094875 -0500
@@ -408,7 +408,7 @@
config TRANSPARENT_HUGEPAGE
@@ -21971,9 +36947,9 @@ diff -Nur linux-3.18.8.orig/mm/Kconfig linux-3.18.8/mm/Kconfig
select COMPACTION
help
Transparent Hugepages allows the kernel to use huge pages and
-diff -Nur linux-3.18.8.orig/mm/memcontrol.c linux-3.18.8/mm/memcontrol.c
---- linux-3.18.8.orig/mm/memcontrol.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/memcontrol.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/memcontrol.c linux-3.18.9/mm/memcontrol.c
+--- linux-3.18.9.orig/mm/memcontrol.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/memcontrol.c 2015-03-15 16:03:03.872094875 -0500
@@ -60,6 +60,8 @@
#include <net/sock.h>
#include <net/ip.h>
@@ -22074,9 +37050,9 @@ diff -Nur linux-3.18.8.orig/mm/memcontrol.c linux-3.18.8/mm/memcontrol.c
}
static void uncharge_list(struct list_head *page_list)
-diff -Nur linux-3.18.8.orig/mm/memory.c linux-3.18.8/mm/memory.c
---- linux-3.18.8.orig/mm/memory.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/memory.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/memory.c linux-3.18.9/mm/memory.c
+--- linux-3.18.9.orig/mm/memory.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/memory.c 2015-03-15 16:03:03.872094875 -0500
@@ -3258,6 +3258,32 @@
return 0;
}
@@ -22110,9 +37086,9 @@ diff -Nur linux-3.18.8.orig/mm/memory.c linux-3.18.8/mm/memory.c
/*
* By the time we get here, we already hold the mm semaphore
*
-diff -Nur linux-3.18.8.orig/mm/mmu_context.c linux-3.18.8/mm/mmu_context.c
---- linux-3.18.8.orig/mm/mmu_context.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/mmu_context.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/mmu_context.c linux-3.18.9/mm/mmu_context.c
+--- linux-3.18.9.orig/mm/mmu_context.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/mmu_context.c 2015-03-15 16:03:03.872094875 -0500
@@ -23,6 +23,7 @@
struct task_struct *tsk = current;
@@ -22129,9 +37105,9 @@ diff -Nur linux-3.18.8.orig/mm/mmu_context.c linux-3.18.8/mm/mmu_context.c
task_unlock(tsk);
#ifdef finish_arch_post_lock_switch
finish_arch_post_lock_switch();
-diff -Nur linux-3.18.8.orig/mm/page_alloc.c linux-3.18.8/mm/page_alloc.c
---- linux-3.18.8.orig/mm/page_alloc.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/page_alloc.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/page_alloc.c linux-3.18.9/mm/page_alloc.c
+--- linux-3.18.9.orig/mm/page_alloc.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/page_alloc.c 2015-03-15 16:03:03.872094875 -0500
@@ -59,6 +59,7 @@
#include <linux/page-debug-flags.h>
#include <linux/hugetlb.h>
@@ -22464,9 +37440,9 @@ diff -Nur linux-3.18.8.orig/mm/page_alloc.c linux-3.18.8/mm/page_alloc.c
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-diff -Nur linux-3.18.8.orig/mm/slab.h linux-3.18.8/mm/slab.h
---- linux-3.18.8.orig/mm/slab.h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/slab.h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/slab.h linux-3.18.9/mm/slab.h
+--- linux-3.18.9.orig/mm/slab.h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/slab.h 2015-03-15 16:03:03.872094875 -0500
@@ -315,7 +315,11 @@
* The slab lists for all objects.
*/
@@ -22479,9 +37455,9 @@ diff -Nur linux-3.18.8.orig/mm/slab.h linux-3.18.8/mm/slab.h
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
-diff -Nur linux-3.18.8.orig/mm/slub.c linux-3.18.8/mm/slub.c
---- linux-3.18.8.orig/mm/slub.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/slub.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/slub.c linux-3.18.9/mm/slub.c
+--- linux-3.18.9.orig/mm/slub.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/slub.c 2015-03-15 16:03:03.876094875 -0500
@@ -1044,7 +1044,7 @@
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -22871,9 +37847,9 @@ diff -Nur linux-3.18.8.orig/mm/slub.c linux-3.18.8/mm/slub.c
}
for (i = 0; i < t.count; i++) {
-diff -Nur linux-3.18.8.orig/mm/swap.c linux-3.18.8/mm/swap.c
---- linux-3.18.8.orig/mm/swap.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/swap.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/swap.c linux-3.18.9/mm/swap.c
+--- linux-3.18.9.orig/mm/swap.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/swap.c 2015-03-15 16:03:03.876094875 -0500
@@ -31,6 +31,7 @@
#include <linux/memcontrol.h>
#include <linux/gfp.h>
@@ -22992,9 +37968,9 @@ diff -Nur linux-3.18.8.orig/mm/swap.c linux-3.18.8/mm/swap.c
}
static void lru_add_drain_per_cpu(struct work_struct *dummy)
-diff -Nur linux-3.18.8.orig/mm/truncate.c linux-3.18.8/mm/truncate.c
---- linux-3.18.8.orig/mm/truncate.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/truncate.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/truncate.c linux-3.18.9/mm/truncate.c
+--- linux-3.18.9.orig/mm/truncate.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/truncate.c 2015-03-15 16:03:03.876094875 -0500
@@ -56,8 +56,11 @@
* protected by mapping->tree_lock.
*/
@@ -23009,9 +37985,9 @@ diff -Nur linux-3.18.8.orig/mm/truncate.c linux-3.18.8/mm/truncate.c
__radix_tree_delete_node(&mapping->page_tree, node);
unlock:
spin_unlock_irq(&mapping->tree_lock);
-diff -Nur linux-3.18.8.orig/mm/vmalloc.c linux-3.18.8/mm/vmalloc.c
---- linux-3.18.8.orig/mm/vmalloc.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/vmalloc.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/vmalloc.c linux-3.18.9/mm/vmalloc.c
+--- linux-3.18.9.orig/mm/vmalloc.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/vmalloc.c 2015-03-15 16:03:03.876094875 -0500
@@ -798,7 +798,7 @@
struct vmap_block *vb;
struct vmap_area *va;
@@ -23063,9 +38039,9 @@ diff -Nur linux-3.18.8.orig/mm/vmalloc.c linux-3.18.8/mm/vmalloc.c
rcu_read_unlock();
if (!addr) {
-diff -Nur linux-3.18.8.orig/mm/vmstat.c linux-3.18.8/mm/vmstat.c
---- linux-3.18.8.orig/mm/vmstat.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/vmstat.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/vmstat.c linux-3.18.9/mm/vmstat.c
+--- linux-3.18.9.orig/mm/vmstat.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/vmstat.c 2015-03-15 16:03:03.876094875 -0500
@@ -221,6 +221,7 @@
long x;
long t;
@@ -23114,9 +38090,9 @@ diff -Nur linux-3.18.8.orig/mm/vmstat.c linux-3.18.8/mm/vmstat.c
}
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
-diff -Nur linux-3.18.8.orig/mm/workingset.c linux-3.18.8/mm/workingset.c
---- linux-3.18.8.orig/mm/workingset.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/mm/workingset.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/mm/workingset.c linux-3.18.9/mm/workingset.c
+--- linux-3.18.9.orig/mm/workingset.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/mm/workingset.c 2015-03-15 16:03:03.876094875 -0500
@@ -264,7 +264,8 @@
* point where they would still be useful.
*/
@@ -23184,9 +38160,9 @@ diff -Nur linux-3.18.8.orig/mm/workingset.c linux-3.18.8/mm/workingset.c
err:
return ret;
}
-diff -Nur linux-3.18.8.orig/net/core/dev.c linux-3.18.8/net/core/dev.c
---- linux-3.18.8.orig/net/core/dev.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/net/core/dev.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/net/core/dev.c linux-3.18.9/net/core/dev.c
+--- linux-3.18.9.orig/net/core/dev.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/net/core/dev.c 2015-03-15 16:03:03.880094875 -0500
@@ -182,6 +182,7 @@
static DEFINE_HASHTABLE(napi_hash, 8);
@@ -23483,9 +38459,9 @@ diff -Nur linux-3.18.8.orig/net/core/dev.c linux-3.18.8/net/core/dev.c
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
-diff -Nur linux-3.18.8.orig/net/core/skbuff.c linux-3.18.8/net/core/skbuff.c
---- linux-3.18.8.orig/net/core/skbuff.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/net/core/skbuff.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/net/core/skbuff.c linux-3.18.9/net/core/skbuff.c
+--- linux-3.18.9.orig/net/core/skbuff.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/net/core/skbuff.c 2015-03-15 16:03:03.880094875 -0500
@@ -63,6 +63,7 @@
#include <linux/errqueue.h>
#include <linux/prefetch.h>
@@ -23520,9 +38496,9 @@ diff -Nur linux-3.18.8.orig/net/core/skbuff.c linux-3.18.8/net/core/skbuff.c
return data;
}
-diff -Nur linux-3.18.8.orig/net/core/sock.c linux-3.18.8/net/core/sock.c
---- linux-3.18.8.orig/net/core/sock.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/net/core/sock.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/net/core/sock.c linux-3.18.9/net/core/sock.c
+--- linux-3.18.9.orig/net/core/sock.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/net/core/sock.c 2015-03-15 16:03:03.880094875 -0500
@@ -2326,12 +2326,11 @@
if (sk->sk_lock.owned)
__lock_sock(sk);
@@ -23537,9 +38513,9 @@ diff -Nur linux-3.18.8.orig/net/core/sock.c linux-3.18.8/net/core/sock.c
}
EXPORT_SYMBOL(lock_sock_nested);
-diff -Nur linux-3.18.8.orig/net/ipv4/icmp.c linux-3.18.8/net/ipv4/icmp.c
---- linux-3.18.8.orig/net/ipv4/icmp.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/net/ipv4/icmp.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/net/ipv4/icmp.c linux-3.18.9/net/ipv4/icmp.c
+--- linux-3.18.9.orig/net/ipv4/icmp.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/net/ipv4/icmp.c 2015-03-15 16:03:03.880094875 -0500
@@ -69,6 +69,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -23591,9 +38567,9 @@ diff -Nur linux-3.18.8.orig/net/ipv4/icmp.c linux-3.18.8/net/ipv4/icmp.c
}
}
-diff -Nur linux-3.18.8.orig/net/ipv4/ip_output.c linux-3.18.8/net/ipv4/ip_output.c
---- linux-3.18.8.orig/net/ipv4/ip_output.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/net/ipv4/ip_output.c 2015-03-03 21:30:51.000000000 +0100
+diff -Nur linux-3.18.9.orig/net/ipv4/ip_output.c linux-3.18.9/net/ipv4/ip_output.c
+--- linux-3.18.9.orig/net/ipv4/ip_output.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/net/ipv4/ip_output.c 2015-03-15 16:03:03.880094875 -0500
@@ -79,6 +79,7 @@
#include <linux/mroute.h>
#include <linux/netlink.h>
@@ -23610,9 +38586,9 @@ diff -Nur linux-3.18.8.orig/net/ipv4/ip_output.c linux-3.18.8/net/ipv4/ip_output
void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
const struct ip_options *sopt,
__be32 daddr, __be32 saddr,
-diff -Nur linux-3.18.8.orig/net/ipv4/sysctl_net_ipv4.c linux-3.18.8/net/ipv4/sysctl_net_ipv4.c
---- linux-3.18.8.orig/net/ipv4/sysctl_net_ipv4.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/net/ipv4/sysctl_net_ipv4.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/net/ipv4/sysctl_net_ipv4.c linux-3.18.9/net/ipv4/sysctl_net_ipv4.c
+--- linux-3.18.9.orig/net/ipv4/sysctl_net_ipv4.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/net/ipv4/sysctl_net_ipv4.c 2015-03-15 16:03:03.880094875 -0500
@@ -779,6 +779,13 @@
.proc_handler = proc_dointvec
},
@@ -23627,9 +38603,9 @@ diff -Nur linux-3.18.8.orig/net/ipv4/sysctl_net_ipv4.c linux-3.18.8/net/ipv4/sys
.procname = "icmp_ignore_bogus_error_responses",
.data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
.maxlen = sizeof(int),
-diff -Nur linux-3.18.8.orig/net/mac80211/rx.c linux-3.18.8/net/mac80211/rx.c
---- linux-3.18.8.orig/net/mac80211/rx.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/net/mac80211/rx.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/net/mac80211/rx.c linux-3.18.9/net/mac80211/rx.c
+--- linux-3.18.9.orig/net/mac80211/rx.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/net/mac80211/rx.c 2015-03-15 16:03:03.884094875 -0500
@@ -3356,7 +3356,7 @@
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -23639,9 +38615,9 @@ diff -Nur linux-3.18.8.orig/net/mac80211/rx.c linux-3.18.8/net/mac80211/rx.c
if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
goto drop;
-diff -Nur linux-3.18.8.orig/net/netfilter/core.c linux-3.18.8/net/netfilter/core.c
---- linux-3.18.8.orig/net/netfilter/core.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/net/netfilter/core.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/net/netfilter/core.c linux-3.18.9/net/netfilter/core.c
+--- linux-3.18.9.orig/net/netfilter/core.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/net/netfilter/core.c 2015-03-15 16:03:03.884094875 -0500
@@ -21,11 +21,17 @@
#include <linux/proc_fs.h>
#include <linux/mutex.h>
@@ -23660,9 +38636,9 @@ diff -Nur linux-3.18.8.orig/net/netfilter/core.c linux-3.18.8/net/netfilter/core
static DEFINE_MUTEX(afinfo_mutex);
const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
-diff -Nur linux-3.18.8.orig/net/packet/af_packet.c linux-3.18.8/net/packet/af_packet.c
---- linux-3.18.8.orig/net/packet/af_packet.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/net/packet/af_packet.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/net/packet/af_packet.c linux-3.18.9/net/packet/af_packet.c
+--- linux-3.18.9.orig/net/packet/af_packet.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/net/packet/af_packet.c 2015-03-15 16:03:03.884094875 -0500
@@ -63,6 +63,7 @@
#include <linux/if_packet.h>
#include <linux/wireless.h>
@@ -23689,9 +38665,9 @@ diff -Nur linux-3.18.8.orig/net/packet/af_packet.c linux-3.18.8/net/packet/af_pa
}
}
prb_close_block(pkc, pbd, po, status);
-diff -Nur linux-3.18.8.orig/net/rds/ib_rdma.c linux-3.18.8/net/rds/ib_rdma.c
---- linux-3.18.8.orig/net/rds/ib_rdma.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/net/rds/ib_rdma.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/net/rds/ib_rdma.c linux-3.18.9/net/rds/ib_rdma.c
+--- linux-3.18.9.orig/net/rds/ib_rdma.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/net/rds/ib_rdma.c 2015-03-15 16:03:03.884094875 -0500
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/rculist.h>
@@ -23709,9 +38685,9 @@ diff -Nur linux-3.18.8.orig/net/rds/ib_rdma.c linux-3.18.8/net/rds/ib_rdma.c
}
}
-diff -Nur linux-3.18.8.orig/net/sched/sch_generic.c linux-3.18.8/net/sched/sch_generic.c
---- linux-3.18.8.orig/net/sched/sch_generic.c 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/net/sched/sch_generic.c 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/net/sched/sch_generic.c linux-3.18.9/net/sched/sch_generic.c
+--- linux-3.18.9.orig/net/sched/sch_generic.c 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/net/sched/sch_generic.c 2015-03-15 16:03:03.884094875 -0500
@@ -894,7 +894,7 @@
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list)
@@ -23721,9 +38697,9 @@ diff -Nur linux-3.18.8.orig/net/sched/sch_generic.c linux-3.18.8/net/sched/sch_g
}
void dev_deactivate(struct net_device *dev)
-diff -Nur linux-3.18.8.orig/scripts/mkcompile_h linux-3.18.8/scripts/mkcompile_h
---- linux-3.18.8.orig/scripts/mkcompile_h 2015-02-27 02:49:36.000000000 +0100
-+++ linux-3.18.8/scripts/mkcompile_h 2015-03-03 08:05:18.000000000 +0100
+diff -Nur linux-3.18.9.orig/scripts/mkcompile_h linux-3.18.9/scripts/mkcompile_h
+--- linux-3.18.9.orig/scripts/mkcompile_h 2015-03-06 16:53:42.000000000 -0600
++++ linux-3.18.9/scripts/mkcompile_h 2015-03-15 16:03:03.884094875 -0500
@@ -4,7 +4,8 @@
ARCH=$2
SMP=$3