summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mk/kernel-ver.mk4
-rw-r--r--target/config/Config.in.kernelversion2
-rw-r--r--target/linux/patches/4.9.77/alpha-remove-coff.patch (renamed from target/linux/patches/4.9.71/alpha-remove-coff.patch)0
-rw-r--r--target/linux/patches/4.9.77/crisv32_ethernet_driver.patch (renamed from target/linux/patches/4.9.71/crisv32_ethernet_driver.patch)0
-rw-r--r--target/linux/patches/4.9.77/h8300.patch (renamed from target/linux/patches/4.9.71/h8300.patch)0
-rw-r--r--target/linux/patches/4.9.77/initramfs-nosizelimit.patch (renamed from target/linux/patches/4.9.71/initramfs-nosizelimit.patch)0
-rw-r--r--target/linux/patches/4.9.77/ld-or1k.patch (renamed from target/linux/patches/4.9.71/ld-or1k.patch)0
-rw-r--r--target/linux/patches/4.9.77/macsonic.patch (renamed from target/linux/patches/4.9.71/macsonic.patch)0
-rw-r--r--target/linux/patches/4.9.77/microblaze-sigaltstack.patch (renamed from target/linux/patches/4.9.71/microblaze-sigaltstack.patch)0
-rw-r--r--target/linux/patches/4.9.77/mips64r6-multi3.patch (renamed from target/linux/patches/4.9.71/mips64r6-multi3.patch)0
-rw-r--r--target/linux/patches/4.9.77/or1k-more-ram.patch (renamed from target/linux/patches/4.9.71/or1k-more-ram.patch)0
-rw-r--r--target/linux/patches/4.9.77/patch-realtime (renamed from target/linux/patches/4.9.71/patch-realtime)3302
-rw-r--r--target/linux/patches/4.9.77/sh2.patch (renamed from target/linux/patches/4.9.71/sh2.patch)0
-rw-r--r--target/linux/patches/4.9.77/startup.patch (renamed from target/linux/patches/4.9.71/startup.patch)0
-rw-r--r--target/linux/patches/4.9.77/vdso2.patch (renamed from target/linux/patches/4.9.71/vdso2.patch)0
15 files changed, 2020 insertions, 1288 deletions
diff --git a/mk/kernel-ver.mk b/mk/kernel-ver.mk
index 011f0baf3..42d5c0490 100644
--- a/mk/kernel-ver.mk
+++ b/mk/kernel-ver.mk
@@ -22,10 +22,10 @@ KERNEL_VERSION:= $(KERNEL_FILE_VER)-$(KERNEL_RELEASE)
KERNEL_HASH:= 6ebcc57ba31d714af872347184d1de32f4ab0b7096ef4e062d1ca6b3234d9333
endif
ifeq ($(ADK_TARGET_KERNEL_VERSION_4_9),y)
-KERNEL_FILE_VER:= 4.9.71
+KERNEL_FILE_VER:= 4.9.77
KERNEL_RELEASE:= 1
KERNEL_VERSION:= $(KERNEL_FILE_VER)-$(KERNEL_RELEASE)
-KERNEL_HASH:= f1fd9740fb4ec31180113e99a2329214ae441533ee226075a40d4e0675db769c
+KERNEL_HASH:= 7c29bc3fb96f1e23d98f664e786dddd53a1599f56431b9b7fdfba402a4b3705c
endif
ifeq ($(ADK_TARGET_KERNEL_VERSION_4_4),y)
KERNEL_FILE_VER:= 4.4.107
diff --git a/target/config/Config.in.kernelversion b/target/config/Config.in.kernelversion
index ec2757683..85620f5e0 100644
--- a/target/config/Config.in.kernelversion
+++ b/target/config/Config.in.kernelversion
@@ -46,7 +46,7 @@ config ADK_TARGET_KERNEL_VERSION_4_14
depends on !ADK_TARGET_SYSTEM_QEMU_ARM_REALVIEW_EB_MPCORE
config ADK_TARGET_KERNEL_VERSION_4_9
- bool "4.9.71"
+ bool "4.9.77"
depends on !ADK_TARGET_ARCH_CRIS
depends on !ADK_TARGET_ARCH_CSKY
depends on !ADK_TARGET_ARCH_METAG
diff --git a/target/linux/patches/4.9.71/alpha-remove-coff.patch b/target/linux/patches/4.9.77/alpha-remove-coff.patch
index 176db3c88..176db3c88 100644
--- a/target/linux/patches/4.9.71/alpha-remove-coff.patch
+++ b/target/linux/patches/4.9.77/alpha-remove-coff.patch
diff --git a/target/linux/patches/4.9.71/crisv32_ethernet_driver.patch b/target/linux/patches/4.9.77/crisv32_ethernet_driver.patch
index 0cef202fc..0cef202fc 100644
--- a/target/linux/patches/4.9.71/crisv32_ethernet_driver.patch
+++ b/target/linux/patches/4.9.77/crisv32_ethernet_driver.patch
diff --git a/target/linux/patches/4.9.71/h8300.patch b/target/linux/patches/4.9.77/h8300.patch
index c71194f0a..c71194f0a 100644
--- a/target/linux/patches/4.9.71/h8300.patch
+++ b/target/linux/patches/4.9.77/h8300.patch
diff --git a/target/linux/patches/4.9.71/initramfs-nosizelimit.patch b/target/linux/patches/4.9.77/initramfs-nosizelimit.patch
index 40d2f6bd8..40d2f6bd8 100644
--- a/target/linux/patches/4.9.71/initramfs-nosizelimit.patch
+++ b/target/linux/patches/4.9.77/initramfs-nosizelimit.patch
diff --git a/target/linux/patches/4.9.71/ld-or1k.patch b/target/linux/patches/4.9.77/ld-or1k.patch
index 264f9166f..264f9166f 100644
--- a/target/linux/patches/4.9.71/ld-or1k.patch
+++ b/target/linux/patches/4.9.77/ld-or1k.patch
diff --git a/target/linux/patches/4.9.71/macsonic.patch b/target/linux/patches/4.9.77/macsonic.patch
index 75a6fcad2..75a6fcad2 100644
--- a/target/linux/patches/4.9.71/macsonic.patch
+++ b/target/linux/patches/4.9.77/macsonic.patch
diff --git a/target/linux/patches/4.9.71/microblaze-sigaltstack.patch b/target/linux/patches/4.9.77/microblaze-sigaltstack.patch
index c4064e8b9..c4064e8b9 100644
--- a/target/linux/patches/4.9.71/microblaze-sigaltstack.patch
+++ b/target/linux/patches/4.9.77/microblaze-sigaltstack.patch
diff --git a/target/linux/patches/4.9.71/mips64r6-multi3.patch b/target/linux/patches/4.9.77/mips64r6-multi3.patch
index 771febe29..771febe29 100644
--- a/target/linux/patches/4.9.71/mips64r6-multi3.patch
+++ b/target/linux/patches/4.9.77/mips64r6-multi3.patch
diff --git a/target/linux/patches/4.9.71/or1k-more-ram.patch b/target/linux/patches/4.9.77/or1k-more-ram.patch
index de848c838..de848c838 100644
--- a/target/linux/patches/4.9.71/or1k-more-ram.patch
+++ b/target/linux/patches/4.9.77/or1k-more-ram.patch
diff --git a/target/linux/patches/4.9.71/patch-realtime b/target/linux/patches/4.9.77/patch-realtime
index 7abba16fe..7bb72e14d 100644
--- a/target/linux/patches/4.9.71/patch-realtime
+++ b/target/linux/patches/4.9.77/patch-realtime
@@ -378,7 +378,7 @@ diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 9f157e7c51e7..468e224d76aa 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
-@@ -220,11 +220,18 @@ ENDPROC(__dabt_svc)
+@@ -220,11 +220,18 @@ __irq_svc:
#ifdef CONFIG_PREEMPT
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -399,7 +399,7 @@ index 9f157e7c51e7..468e224d76aa 100644
#endif
svc_exit r5, irq = 1 @ return from exception
-@@ -239,8 +246,14 @@ ENDPROC(__irq_svc)
+@@ -239,8 +246,14 @@ svc_preempt:
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
@@ -419,7 +419,7 @@ diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 10c3283d6c19..8872937862cc 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
-@@ -36,7 +36,9 @@
+@@ -36,7 +36,9 @@ ret_fast_syscall:
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
@@ -430,7 +430,7 @@ index 10c3283d6c19..8872937862cc 100644
bne fast_work_pending
/* perform architecture specific actions before user return */
-@@ -62,8 +64,11 @@ ENDPROC(ret_fast_syscall)
+@@ -62,8 +64,11 @@ ret_fast_syscall:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
@@ -983,10 +983,10 @@ index ea5a2277ee46..b988e081ac79 100644
return pen_release != -1 ? -ENOSYS : 0;
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
-index 0122ad1a6027..926b1be48043 100644
+index f7861dc83182..ce47dfe25fb0 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
-@@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
+@@ -433,6 +433,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
@@ -996,7 +996,7 @@ index 0122ad1a6027..926b1be48043 100644
if (user_mode(regs))
goto bad_area;
-@@ -497,6 +500,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
+@@ -500,6 +503,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
@@ -1255,10 +1255,10 @@ index c58ddf8c4062..a8f2f7c1fe12 100644
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
-index 79b0fe24d5b7..f3c959ade308 100644
+index b4c7db434654..433d846f4f51 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
-@@ -428,11 +428,16 @@ ENDPROC(el1_sync)
+@@ -430,11 +430,16 @@ el1_irq:
#ifdef CONFIG_PREEMPT
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
@@ -1278,7 +1278,7 @@ index 79b0fe24d5b7..f3c959ade308 100644
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
-@@ -446,6 +451,7 @@ ENDPROC(el1_irq)
+@@ -448,6 +453,7 @@ el1_preempt:
1: bl preempt_schedule_irq // irq en/disable is done inside
ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
@@ -1313,7 +1313,7 @@ index 5e844f68e847..dc613cc10f54 100644
config CPU_SUPPORTS_HIGHMEM
bool
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
-index 8f01f21e78f1..619485b777d2 100644
+index 6eda5abbd719..601e27701a4a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -52,10 +52,11 @@ config LOCKDEP_SUPPORT
@@ -1412,7 +1412,7 @@ diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 3841d749a430..6dbaeff192b9 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
-@@ -835,7 +835,14 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
+@@ -835,7 +835,14 @@ resume_kernel:
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
andi. r8,r8,_TIF_NEED_RESCHED
@@ -1427,7 +1427,7 @@ index 3841d749a430..6dbaeff192b9 100644
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
-@@ -846,11 +853,11 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
+@@ -846,11 +853,11 @@ resume_kernel:
*/
bl trace_hardirqs_off
#endif
@@ -1442,7 +1442,7 @@ index 3841d749a430..6dbaeff192b9 100644
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
-@@ -1171,7 +1178,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
+@@ -1171,7 +1178,7 @@ global_dbcr0:
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
@@ -1451,7 +1451,7 @@ index 3841d749a430..6dbaeff192b9 100644
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
-@@ -1192,7 +1199,7 @@ do_resched: /* r10 contains MSR_KERNEL here */
+@@ -1192,7 +1199,7 @@ recheck:
MTMSRD(r10) /* disable interrupts */
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
@@ -1461,7 +1461,7 @@ index 3841d749a430..6dbaeff192b9 100644
andi. r0,r9,_TIF_USER_WORK_MASK
beq restore_user
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
-index 767ef6d68c9e..2cb4d5552319 100644
+index caa659671599..891080c4a41e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -656,7 +656,7 @@ _GLOBAL(ret_from_except_lite)
@@ -1473,29 +1473,27 @@ index 767ef6d68c9e..2cb4d5552319 100644
beq 2f
bl restore_interrupts
SCHEDULE_USER
-@@ -718,10 +718,18 @@ _GLOBAL(ret_from_except_lite)
+@@ -718,10 +718,18 @@ resume_kernel:
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
-- andi. r0,r4,_TIF_NEED_RESCHED
-- beq+ restore
-- /* Check that preempt_count() == 0 and interrupts are enabled */
- lwz r8,TI_PREEMPT(r9)
++ lwz r8,TI_PREEMPT(r9)
+ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
+ bne restore
-+ andi. r0,r4,_TIF_NEED_RESCHED
+ andi. r0,r4,_TIF_NEED_RESCHED
+ bne+ check_count
+
+ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
-+ beq+ restore
+ beq+ restore
+ lwz r8,TI_PREEMPT_LAZY(r9)
+
-+ /* Check that preempt_count() == 0 and interrupts are enabled */
+ /* Check that preempt_count() == 0 and interrupts are enabled */
+- lwz r8,TI_PREEMPT(r9)
+check_count:
cmpwi cr1,r8,0
ld r0,SOFTE(r1)
cmpdi r0,0
-@@ -738,7 +746,7 @@ _GLOBAL(ret_from_except_lite)
+@@ -738,7 +746,7 @@ resume_kernel:
/* Re-test flags and eventually loop */
CURRENT_THREAD_INFO(r9, r1)
ld r4,TI_FLAGS(r9)
@@ -1505,10 +1503,10 @@ index 767ef6d68c9e..2cb4d5552319 100644
/*
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
-index 3c05c311e35e..f83f6ac1274d 100644
+index 028a22bfa90c..a75e2dd3e71f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
-@@ -638,6 +638,7 @@ void irq_ctx_init(void)
+@@ -651,6 +651,7 @@ void irq_ctx_init(void)
}
}
@@ -1516,7 +1514,7 @@ index 3c05c311e35e..f83f6ac1274d 100644
void do_softirq_own_stack(void)
{
struct thread_info *curtp, *irqtp;
-@@ -655,6 +656,7 @@ void do_softirq_own_stack(void)
+@@ -668,6 +669,7 @@ void do_softirq_own_stack(void)
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
}
@@ -1610,7 +1608,7 @@ index 6c0378c0b8b5..abd58b4dff97 100644
static inline void handle_one_irq(unsigned int irq)
{
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
-index 165ecdd24d22..b68a464a22be 100644
+index 8b4152f3a764..c5cca159692a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -194,12 +194,10 @@ config NR_CPUS
@@ -1629,7 +1627,7 @@ index 165ecdd24d22..b68a464a22be 100644
config GENERIC_HWEIGHT
bool
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
-index 34a7930b76ef..773740521008 100644
+index 5cbf03c14981..6067d9379e5b 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
@@ -1649,7 +1647,7 @@ index 34a7930b76ef..773740521008 100644
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index bada636d1065..f8a995c90c01 100644
+index da8156fd3d58..d8cd3bc807fc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -17,6 +17,7 @@ config X86_64
@@ -2042,10 +2040,10 @@ index edba8606b99a..4a3389535fc6 100644
jz restore_all
call preempt_schedule_irq
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
-index ef766a358b37..28401f826ab1 100644
+index af4e58132d91..22803e2f7495 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -546,7 +546,23 @@ GLOBAL(retint_user)
+@@ -575,7 +575,23 @@ retint_kernel:
bt $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
@@ -2069,7 +2067,7 @@ index ef766a358b37..28401f826ab1 100644
call preempt_schedule_irq
jmp 0b
1:
-@@ -894,6 +910,7 @@ EXPORT_SYMBOL(native_load_gs_index)
+@@ -925,6 +941,7 @@ bad_gs:
jmp 2b
.previous
@@ -2077,7 +2075,7 @@ index ef766a358b37..28401f826ab1 100644
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack)
pushq %rbp
-@@ -906,6 +923,7 @@ ENTRY(do_softirq_own_stack)
+@@ -937,6 +954,7 @@ ENTRY(do_softirq_own_stack)
decl PER_CPU_VAR(irq_count)
ret
END(do_softirq_own_stack)
@@ -2283,7 +2281,7 @@ index 57ab86d94d64..35d25e27180f 100644
}
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
-index 931ced8ca345..167975ac8af7 100644
+index 11cc600f4df0..8cbfc51ce339 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -87,7 +87,9 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -2297,7 +2295,7 @@ index 931ced8ca345..167975ac8af7 100644
/* --------------------------------------------------------------------------
Boot-time Configuration
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
-index 7249f1500bcb..a79d5c224004 100644
+index cf89928dbd46..18b5ec2a71df 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1712,7 +1712,8 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
@@ -2329,7 +2327,7 @@ index c62e015b126c..0cc71257fca6 100644
+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
-index 22cda29d654e..57c85e3af092 100644
+index 8ca5f8ad008e..edcbd18b3189 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -41,6 +41,8 @@
@@ -2341,7 +2339,7 @@ index 22cda29d654e..57c85e3af092 100644
#include <linux/jump_label.h>
#include <asm/processor.h>
-@@ -1307,7 +1309,7 @@ void mce_log_therm_throt_event(__u64 status)
+@@ -1306,7 +1308,7 @@ void mce_log_therm_throt_event(__u64 status)
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@@ -2350,7 +2348,7 @@ index 22cda29d654e..57c85e3af092 100644
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
-@@ -1316,32 +1318,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
+@@ -1315,32 +1317,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
@@ -2389,7 +2387,7 @@ index 22cda29d654e..57c85e3af092 100644
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1364,7 +1352,7 @@ static void mce_timer_fn(unsigned long data)
+@@ -1363,7 +1351,7 @@ static void mce_timer_fn(unsigned long data)
done:
__this_cpu_write(mce_next_interval, iv);
@@ -2398,7 +2396,7 @@ index 22cda29d654e..57c85e3af092 100644
}
/*
-@@ -1372,7 +1360,7 @@ static void mce_timer_fn(unsigned long data)
+@@ -1371,7 +1359,7 @@ static void mce_timer_fn(unsigned long data)
*/
void mce_timer_kick(unsigned long interval)
{
@@ -2407,7 +2405,7 @@ index 22cda29d654e..57c85e3af092 100644
unsigned long iv = __this_cpu_read(mce_next_interval);
__restart_timer(t, interval);
-@@ -1387,7 +1375,7 @@ static void mce_timer_delete_all(void)
+@@ -1386,7 +1374,7 @@ static void mce_timer_delete_all(void)
int cpu;
for_each_online_cpu(cpu)
@@ -2416,7 +2414,7 @@ index 22cda29d654e..57c85e3af092 100644
}
static void mce_do_trigger(struct work_struct *work)
-@@ -1397,6 +1385,56 @@ static void mce_do_trigger(struct work_struct *work)
+@@ -1396,6 +1384,56 @@ static void mce_do_trigger(struct work_struct *work)
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
@@ -2473,7 +2471,7 @@ index 22cda29d654e..57c85e3af092 100644
/*
* Notify the user(s) about new machine check events.
* Can be called from interrupt context, but not from machine check/NMI
-@@ -1404,19 +1442,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
+@@ -1403,19 +1441,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
*/
int mce_notify_irq(void)
{
@@ -2494,7 +2492,7 @@ index 22cda29d654e..57c85e3af092 100644
return 1;
}
return 0;
-@@ -1722,7 +1749,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
+@@ -1721,7 +1748,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
}
}
@@ -2503,7 +2501,7 @@ index 22cda29d654e..57c85e3af092 100644
{
unsigned long iv = check_interval * HZ;
-@@ -1731,16 +1758,17 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+@@ -1730,16 +1757,17 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
per_cpu(mce_next_interval, cpu) = iv;
@@ -2525,7 +2523,7 @@ index 22cda29d654e..57c85e3af092 100644
mce_start_timer(cpu, t);
}
-@@ -2465,6 +2493,8 @@ static void mce_disable_cpu(void *h)
+@@ -2464,6 +2492,8 @@ static void mce_disable_cpu(void *h)
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
@@ -2534,7 +2532,7 @@ index 22cda29d654e..57c85e3af092 100644
if (!(action & CPU_TASKS_FROZEN))
cmci_clear();
-@@ -2487,6 +2517,7 @@ static void mce_reenable_cpu(void *h)
+@@ -2486,6 +2516,7 @@ static void mce_reenable_cpu(void *h)
if (b->init)
wrmsrl(msr_ops.ctl(i), b->ctl);
}
@@ -2542,7 +2540,7 @@ index 22cda29d654e..57c85e3af092 100644
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2494,7 +2525,6 @@ static int
+@@ -2493,7 +2524,6 @@ static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -2550,7 +2548,7 @@ index 22cda29d654e..57c85e3af092 100644
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
-@@ -2514,11 +2544,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+@@ -2513,11 +2543,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
@@ -2562,7 +2560,7 @@ index 22cda29d654e..57c85e3af092 100644
break;
}
-@@ -2557,6 +2585,10 @@ static __init int mcheck_init_device(void)
+@@ -2556,6 +2584,10 @@ static __init int mcheck_init_device(void)
goto err_out;
}
@@ -2651,10 +2649,10 @@ index bd7be8efdc4c..b3b0a7f7b1ca 100644
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 3f05c044720b..fe68afd37162 100644
+index b24b3c6d686e..02a062b0de5d 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
-@@ -1939,6 +1939,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
+@@ -1944,6 +1944,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS_PINNED);
apic->lapic_timer.timer.function = apic_timer_fn;
@@ -2663,10 +2661,10 @@ index 3f05c044720b..fe68afd37162 100644
/*
* APIC is created enabled. This will prevent kvm_lapic_set_base from
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 81bba3c2137d..fcb84512e85d 100644
+index 73304b1a03cc..2a0fae2ef089 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -5958,6 +5958,13 @@ int kvm_arch_init(void *opaque)
+@@ -5967,6 +5967,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
@@ -2765,10 +2763,10 @@ index ada98b39b8ad..585f6829653b 100644
kmap_atomic_idx_pop();
}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-index e3353c97d086..01664968555c 100644
+index 73dcb0e18c1b..c1085c7ee212 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
-@@ -214,7 +214,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
+@@ -215,7 +215,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
int in_flags, struct page **pages)
{
unsigned int i, level;
@@ -2785,7 +2783,7 @@ index e3353c97d086..01664968555c 100644
BUG_ON(irqs_disabled());
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
-index 9e42842e924a..5398f97172f9 100644
+index 0f0175186f1b..39b5d5b2627d 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -748,9 +748,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
@@ -2861,7 +2859,7 @@ index 9e42842e924a..5398f97172f9 100644
return -1;
}
-@@ -1940,9 +1940,9 @@ static void __init init_per_cpu_tunables(void)
+@@ -1939,9 +1939,9 @@ static void __init init_per_cpu_tunables(void)
bcp->cong_reps = congested_reps;
bcp->disabled_period = sec_2_cycles(disabled_period);
bcp->giveup_limit = giveup_limit;
@@ -2959,7 +2957,7 @@ index b333fc45f9ec..8b85916e6986 100644
/*
diff --git a/block/blk-core.c b/block/blk-core.c
-index d1f2801ce836..6f945bb0fa1a 100644
+index 23daf40be371..e8341f78f119 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
@@ -2976,12 +2974,12 @@ index d1f2801ce836..6f945bb0fa1a 100644
**/
void blk_start_queue(struct request_queue *q)
{
-- WARN_ON(!irqs_disabled());
-+ WARN_ON_NONRT(!irqs_disabled());
+- WARN_ON(!in_interrupt() && !irqs_disabled());
++ WARN_ON_NONRT(!in_interrupt() && !irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
-@@ -659,7 +662,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
+@@ -660,7 +663,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
if (nowait)
return -EBUSY;
@@ -2990,7 +2988,7 @@ index d1f2801ce836..6f945bb0fa1a 100644
!atomic_read(&q->mq_freeze_depth) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
-@@ -679,7 +682,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
+@@ -680,7 +683,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
struct request_queue *q =
container_of(ref, struct request_queue, q_usage_counter);
@@ -2999,7 +2997,7 @@ index d1f2801ce836..6f945bb0fa1a 100644
}
static void blk_rq_timed_out_timer(unsigned long data)
-@@ -748,7 +751,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+@@ -750,7 +753,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
@@ -3008,7 +3006,7 @@ index d1f2801ce836..6f945bb0fa1a 100644
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
-@@ -3200,7 +3203,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
+@@ -3202,7 +3205,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
blk_run_queue_async(q);
else
__blk_run_queue(q);
@@ -3017,7 +3015,7 @@ index d1f2801ce836..6f945bb0fa1a 100644
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3248,7 +3251,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+@@ -3250,7 +3253,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
@@ -3025,7 +3023,7 @@ index d1f2801ce836..6f945bb0fa1a 100644
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
-@@ -3268,11 +3270,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+@@ -3270,11 +3272,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
q = NULL;
depth = 0;
@@ -3037,7 +3035,7 @@ index d1f2801ce836..6f945bb0fa1a 100644
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
-@@ -3285,7 +3282,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+@@ -3287,7 +3284,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
@@ -3046,7 +3044,7 @@ index d1f2801ce836..6f945bb0fa1a 100644
}
/*
-@@ -3312,8 +3309,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+@@ -3314,8 +3311,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
*/
if (q)
queue_unplugged(q, depth, from_schedule);
@@ -3086,7 +3084,7 @@ index 381cb50a673c..dc8785233d94 100644
}
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
-index 7b597ec4e9c5..48c9652a701c 100644
+index 10f8f94b7f20..82500641f37b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
@@ -3198,10 +3196,10 @@ index 7b597ec4e9c5..48c9652a701c 100644
kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
diff --git a/block/blk-mq.h b/block/blk-mq.h
-index e5d25249028c..1e846b842eab 100644
+index c55bcf67b956..c26a84d44cc4 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
-@@ -72,12 +72,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
+@@ -73,12 +73,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
*/
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
@@ -3409,7 +3407,7 @@ index 15073375bd00..357e7ca5a587 100644
/* Delete the reader/writer lock */
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
-index 051b6158d1b7..7ad293bef6ed 100644
+index 8d22acdf90f0..64fbad747da9 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
@@ -3461,17 +3459,17 @@ index 051b6158d1b7..7ad293bef6ed 100644
buf = page_address(page);
consumed = ap->ops->sff_data_xfer(dev, buf + offset,
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
-index 4b5cd3a7b2b6..fa8329ad79fd 100644
+index 4b5cd3a7b2b6..8c93ee150ee8 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
-@@ -118,12 +118,19 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
+@@ -118,12 +118,20 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
- return *get_cpu_ptr(comp->stream);
+ struct zcomp_strm *zstrm;
+
-+ zstrm = *this_cpu_ptr(comp->stream);
++ zstrm = *get_local_ptr(comp->stream);
+ spin_lock(&zstrm->zcomp_lock);
+ return zstrm;
}
@@ -3483,10 +3481,11 @@ index 4b5cd3a7b2b6..fa8329ad79fd 100644
+
+ zstrm = *this_cpu_ptr(comp->stream);
+ spin_unlock(&zstrm->zcomp_lock);
++ put_local_ptr(zstrm);
}
int zcomp_compress(struct zcomp_strm *zstrm,
-@@ -174,6 +181,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp,
+@@ -174,6 +182,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp,
pr_err("Can't allocate a compression stream\n");
return NOTIFY_BAD;
}
@@ -3507,7 +3506,7 @@ index 478cac2ed465..f7a6efdc3285 100644
/* dynamic per-device compression frontend */
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
-index c9914d653968..2038d138f286 100644
+index b7c0b69a02f5..47d033b8a966 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -528,6 +528,8 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
@@ -3793,6 +3792,60 @@ index 08d1dd58c0d2..25ee319dc8e3 100644
return ret;
}
#endif
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 8022bea27fed..247330efd310 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -50,6 +50,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
+ return container_of(data, struct tpm_tis_tcg_phy, priv);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * Flushes previous write operations to chip so that a subsequent
++ * ioread*()s won't stall a cpu.
++ */
++static inline void tpm_tis_flush(void __iomem *iobase)
++{
++ ioread8(iobase + TPM_ACCESS(0));
++}
++#else
++#define tpm_tis_flush(iobase) do { } while (0)
++#endif
++
++static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr)
++{
++ iowrite8(b, iobase + addr);
++ tpm_tis_flush(iobase);
++}
++
++static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr)
++{
++ iowrite32(b, iobase + addr);
++ tpm_tis_flush(iobase);
++}
++
+ static bool interrupts = true;
+ module_param(interrupts, bool, 0444);
+ MODULE_PARM_DESC(interrupts, "Enable interrupts");
+@@ -103,7 +128,7 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+ while (len--)
+- iowrite8(*value++, phy->iobase + addr);
++ tpm_tis_iowrite8(*value++, phy->iobase, addr);
+ return 0;
+ }
+
+@@ -127,7 +152,7 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
+ {
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+- iowrite32(value, phy->iobase + addr);
++ tpm_tis_iowrite32(value, phy->iobase, addr);
+ return 0;
+ }
+
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 4da2af9694a2..5b6f57f500b8 100644
--- a/drivers/clocksource/tcb_clksrc.c
@@ -4191,10 +4244,10 @@ index 02908e37c228..05c0480576e1 100644
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 5dc6082639db..c32458fb3be2 100644
+index ce32303b3013..c0a53bf2e952 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -12131,7 +12131,7 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
+@@ -12138,7 +12138,7 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_flip_work *work;
@@ -4204,7 +4257,7 @@ index 5dc6082639db..c32458fb3be2 100644
if (crtc == NULL)
return;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
-index dbed12c484c9..5c540b78e8b5 100644
+index 64f4e2e18594..aebf1e9eabcb 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -35,6 +35,7 @@
@@ -4224,7 +4277,7 @@ index dbed12c484c9..5c540b78e8b5 100644
/**
* intel_pipe_update_start() - start update of a set of display registers
* @crtc: the crtc of which the registers are going to be updated
-@@ -95,7 +98,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
+@@ -98,7 +101,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
max = vblank_start - 1;
@@ -4233,7 +4286,7 @@ index dbed12c484c9..5c540b78e8b5 100644
if (min <= 0 || max <= 0)
return;
-@@ -125,11 +128,11 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
+@@ -128,11 +131,11 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
break;
}
@@ -4247,7 +4300,7 @@ index dbed12c484c9..5c540b78e8b5 100644
}
finish_wait(wq, &wait);
-@@ -181,7 +184,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
+@@ -202,7 +205,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
crtc->base.state->event = NULL;
}
@@ -4547,7 +4600,7 @@ index 4a2a9e370be7..e970d9afd179 100644
if (t2 - t1 < tx) tx = t2 - t1;
}
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
-index 11a13b5be73a..baaed0ac274b 100644
+index 0c910a863581..3408e5dd1b93 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1923,10 +1923,10 @@ static int __attach_device(struct iommu_dev_data *dev_data,
@@ -4578,8 +4631,26 @@ index 11a13b5be73a..baaed0ac274b 100644
if (WARN_ON(!dev_data->domain))
return;
+@@ -2283,7 +2283,7 @@ static void queue_add(struct dma_ops_domain *dma_dom,
+ pages = __roundup_pow_of_two(pages);
+ address >>= PAGE_SHIFT;
+
+- queue = get_cpu_ptr(&flush_queue);
++ queue = raw_cpu_ptr(&flush_queue);
+ spin_lock_irqsave(&queue->lock, flags);
+
+ if (queue->next == FLUSH_QUEUE_SIZE)
+@@ -2300,8 +2300,6 @@ static void queue_add(struct dma_ops_domain *dma_dom,
+
+ if (atomic_cmpxchg(&queue_timer_on, 0, 1) == 0)
+ mod_timer(&queue_timer, jiffies + msecs_to_jiffies(10));
+-
+- put_cpu_ptr(&flush_queue);
+ }
+
+
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
-index 87fcbf71b85a..674c82b61f36 100644
+index 88bbc8ccc5e3..8a1a8432a6bd 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -479,7 +479,7 @@ struct deferred_flush_data {
@@ -4591,7 +4662,7 @@ index 87fcbf71b85a..674c82b61f36 100644
/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;
-@@ -3719,10 +3719,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
+@@ -3721,10 +3721,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
struct intel_iommu *iommu;
struct deferred_flush_entry *entry;
struct deferred_flush_data *flush_data;
@@ -4603,7 +4674,7 @@ index 87fcbf71b85a..674c82b61f36 100644
/* Flush all CPUs' entries to avoid deferring too much. If
* this becomes a bottleneck, can just flush us, and rely on
-@@ -3755,8 +3753,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
+@@ -3757,8 +3755,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
}
flush_data->size++;
spin_unlock_irqrestore(&flush_data->lock, flags);
@@ -4708,10 +4779,28 @@ index ba7c4c685db3..834ec328f217 100644
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index f34ad2be66a1..123469f7b560 100644
+index 475a7a1bcfe0..8d2c9d70042e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
-@@ -1928,8 +1928,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+@@ -429,7 +429,7 @@ void raid5_release_stripe(struct stripe_head *sh)
+ md_wakeup_thread(conf->mddev->thread);
+ return;
+ slow_path:
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
+ if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
+ INIT_LIST_HEAD(&list);
+@@ -438,7 +438,7 @@ void raid5_release_stripe(struct stripe_head *sh)
+ spin_unlock(&conf->device_lock);
+ release_inactive_stripe_list(conf, &list, hash);
+ }
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ static inline void remove_hash(struct stripe_head *sh)
+@@ -1937,8 +1937,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -4722,7 +4811,7 @@ index f34ad2be66a1..123469f7b560 100644
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
-@@ -1985,7 +1986,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+@@ -1994,7 +1995,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
@@ -4732,7 +4821,7 @@ index f34ad2be66a1..123469f7b560 100644
}
static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
-@@ -6393,6 +6395,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
+@@ -6410,6 +6412,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
__func__, cpu);
return -ENOMEM;
}
@@ -4740,7 +4829,7 @@ index f34ad2be66a1..123469f7b560 100644
return 0;
}
-@@ -6403,7 +6406,6 @@ static int raid5_alloc_percpu(struct r5conf *conf)
+@@ -6420,7 +6423,6 @@ static int raid5_alloc_percpu(struct r5conf *conf)
conf->percpu = alloc_percpu(struct raid5_percpu);
if (!conf->percpu)
return -ENOMEM;
@@ -4884,19 +4973,6 @@ index bca6935a94db..d7a35ee34d03 100644
ctx->done.done);
}
break;
-diff --git a/drivers/pci/access.c b/drivers/pci/access.c
-index d11cdbb8fba3..223bbb9acb03 100644
---- a/drivers/pci/access.c
-+++ b/drivers/pci/access.c
-@@ -672,7 +672,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
- WARN_ON(!dev->block_cfg_access);
-
- dev->block_cfg_access = 0;
-- wake_up_all(&pci_cfg_wait);
-+ wake_up_all_locked(&pci_cfg_wait);
- raw_spin_unlock_irqrestore(&pci_lock, flags);
- }
- EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index bedce3453dd3..faf038978650 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -5245,10 +5321,10 @@ index edc48f3b8230..ee5c6f9dfb6f 100644
static inline uint8_t *
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
-index 068c4e47fac9..a2090f640397 100644
+index bddaabb288d4..8de0ec4222fe 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
-@@ -3125,7 +3125,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+@@ -3129,7 +3129,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
* kref_put().
*/
kref_get(&qentry->irq_notify.kref);
@@ -5389,7 +5465,7 @@ index e8819aa20415..dd7f9bf45d6c 100644
#include <asm/serial.h>
/*
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
-index 080d5a59d0a7..eecc4f111473 100644
+index f6e4373a8850..4620b51b0e7c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -35,6 +35,7 @@
@@ -5400,7 +5476,7 @@ index 080d5a59d0a7..eecc4f111473 100644
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/timer.h>
-@@ -3144,9 +3145,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+@@ -3143,9 +3144,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
serial8250_rpm_get(up);
@@ -5450,7 +5526,7 @@ index e2c33b9528d8..53af53c43e8c 100644
clk_disable(uap->clk);
}
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
-index 44e5b5bf713b..400140d1dfff 100644
+index 472ba3c813c1..e654cb421fb7 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1257,13 +1257,10 @@ serial_omap_console_write(struct console *co, const char *s,
@@ -5481,7 +5557,7 @@ index 44e5b5bf713b..400140d1dfff 100644
static int __init
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
-index 3b9735abf2e0..73ba3239869e 100644
+index fcc7aa248ce7..fb2c38d875f9 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1764,9 +1764,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
@@ -5497,7 +5573,7 @@ index 3b9735abf2e0..73ba3239869e 100644
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
-index 89081b834615..90b231b7ad0a 100644
+index 7b107e43b1c4..f1e8534a1748 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1593,7 +1593,7 @@ static void ffs_data_put(struct ffs_data *ffs)
@@ -5510,10 +5586,10 @@ index 89081b834615..90b231b7ad0a 100644
kfree(ffs);
}
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
-index 1468d8f085a3..6aae3ae25c18 100644
+index b8534d3f8bb0..8fcaf02e21b0 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
-@@ -346,7 +346,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
+@@ -347,7 +347,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
spin_unlock_irq (&epdata->dev->lock);
if (likely (value == 0)) {
@@ -5522,7 +5598,7 @@ index 1468d8f085a3..6aae3ae25c18 100644
if (value != 0) {
spin_lock_irq (&epdata->dev->lock);
if (likely (epdata->ep != NULL)) {
-@@ -355,7 +355,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
+@@ -356,7 +356,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len)
usb_ep_dequeue (epdata->ep, epdata->req);
spin_unlock_irq (&epdata->dev->lock);
@@ -5532,7 +5608,7 @@ index 1468d8f085a3..6aae3ae25c18 100644
epdata->status = -EINTR;
} else {
diff --git a/fs/aio.c b/fs/aio.c
-index 428484f2f841..2b02e2eb2158 100644
+index 0fcb49ad67d4..211ebc21e4db 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -40,6 +40,7 @@
@@ -5636,7 +5712,7 @@ index d8e6d421c27f..2e689ab1306b 100644
}
spin_unlock(&p->d_lock);
diff --git a/fs/buffer.c b/fs/buffer.c
-index b205a629001d..5646afc022ba 100644
+index 5d8f496d624e..48074bd91ea3 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -301,8 +301,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
@@ -5720,7 +5796,7 @@ index a27fc8791551..791aecb7c1ac 100644
cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
diff --git a/fs/dcache.c b/fs/dcache.c
-index 4485a48f4091..691039a6a872 100644
+index 67957f5b325c..f0719b2f1be5 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -19,6 +19,7 @@
@@ -5731,7 +5807,7 @@ index 4485a48f4091..691039a6a872 100644
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
-@@ -750,6 +751,8 @@ static inline bool fast_dput(struct dentry *dentry)
+@@ -777,6 +778,8 @@ static inline bool fast_dput(struct dentry *dentry)
*/
void dput(struct dentry *dentry)
{
@@ -5740,7 +5816,7 @@ index 4485a48f4091..691039a6a872 100644
if (unlikely(!dentry))
return;
-@@ -788,9 +791,18 @@ void dput(struct dentry *dentry)
+@@ -815,9 +818,18 @@ void dput(struct dentry *dentry)
return;
kill_it:
@@ -5762,7 +5838,7 @@ index 4485a48f4091..691039a6a872 100644
goto repeat;
}
}
-@@ -2324,7 +2336,7 @@ void d_delete(struct dentry * dentry)
+@@ -2352,7 +2364,7 @@ void d_delete(struct dentry * dentry)
if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
@@ -5771,7 +5847,27 @@ index 4485a48f4091..691039a6a872 100644
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
-@@ -2384,21 +2396,24 @@ static inline void end_dir_add(struct inode *dir, unsigned n)
+@@ -2397,9 +2409,10 @@ EXPORT_SYMBOL(d_rehash);
+ static inline unsigned start_dir_add(struct inode *dir)
+ {
+
++ preempt_disable_rt();
+ for (;;) {
+- unsigned n = dir->i_dir_seq;
+- if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
++ unsigned n = dir->__i_dir_seq;
++ if (!(n & 1) && cmpxchg(&dir->__i_dir_seq, n, n + 1) == n)
+ return n;
+ cpu_relax();
+ }
+@@ -2407,26 +2420,30 @@ static inline unsigned start_dir_add(struct inode *dir)
+
+ static inline void end_dir_add(struct inode *dir, unsigned n)
+ {
+- smp_store_release(&dir->i_dir_seq, n + 2);
++ smp_store_release(&dir->__i_dir_seq, n + 2);
++ preempt_enable_rt();
+ }
static void d_wait_lookup(struct dentry *dentry)
{
@@ -5807,7 +5903,25 @@ index 4485a48f4091..691039a6a872 100644
{
unsigned int hash = name->hash;
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
-@@ -2507,7 +2522,7 @@ void __d_lookup_done(struct dentry *dentry)
+@@ -2440,7 +2457,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
+
+ retry:
+ rcu_read_lock();
+- seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1;
++ seq = smp_load_acquire(&parent->d_inode->__i_dir_seq) & ~1;
+ r_seq = read_seqbegin(&rename_lock);
+ dentry = __d_lookup_rcu(parent, name, &d_seq);
+ if (unlikely(dentry)) {
+@@ -2462,7 +2479,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
+ goto retry;
+ }
+ hlist_bl_lock(b);
+- if (unlikely(parent->d_inode->i_dir_seq != seq)) {
++ if (unlikely(parent->d_inode->__i_dir_seq != seq)) {
+ hlist_bl_unlock(b);
+ rcu_read_unlock();
+ goto retry;
+@@ -2535,7 +2552,7 @@ void __d_lookup_done(struct dentry *dentry)
hlist_bl_lock(b);
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
@@ -5816,7 +5930,7 @@ index 4485a48f4091..691039a6a872 100644
dentry->d_wait = NULL;
hlist_bl_unlock(b);
INIT_HLIST_NODE(&dentry->d_u.d_alias);
-@@ -3604,6 +3619,11 @@ EXPORT_SYMBOL(d_genocide);
+@@ -3632,6 +3649,11 @@ EXPORT_SYMBOL(d_genocide);
void __init vfs_caches_init_early(void)
{
@@ -5829,7 +5943,7 @@ index 4485a48f4091..691039a6a872 100644
inode_init_early();
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
-index 10db91218933..42af0a06f657 100644
+index 3cbc30413add..41a94f552aab 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -510,12 +510,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
@@ -5848,10 +5962,10 @@ index 10db91218933..42af0a06f657 100644
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
diff --git a/fs/exec.c b/fs/exec.c
-index 67e86571685a..fe14cdd84016 100644
+index b8c43be24751..71f4c6ec2bb8 100644
--- a/fs/exec.c
+++ b/fs/exec.c
-@@ -1017,12 +1017,14 @@ static int exec_mmap(struct mm_struct *mm)
+@@ -1038,12 +1038,14 @@ static int exec_mmap(struct mm_struct *mm)
}
}
task_lock(tsk);
@@ -5866,8 +5980,32 @@ index 67e86571685a..fe14cdd84016 100644
task_unlock(tsk);
if (old_mm) {
up_read(&old_mm->mmap_sem);
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index 0094923e5ebf..37fa06ef5417 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -95,8 +95,7 @@ static void ext4_finish_bio(struct bio *bio)
+ * We check all buffers in the page under BH_Uptodate_Lock
+ * to avoid races with other end io clearing async_write flags
+ */
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
++ flags = bh_uptodate_lock_irqsave(head);
+ do {
+ if (bh_offset(bh) < bio_start ||
+ bh_offset(bh) + bh->b_size > bio_end) {
+@@ -108,8 +107,7 @@ static void ext4_finish_bio(struct bio *bio)
+ if (bio->bi_error)
+ buffer_io_error(bh);
+ } while ((bh = bh->b_this_page) != head);
+- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(head, flags);
+ if (!under_io) {
+ #ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (data_page)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
-index 642c57b8de7b..8494b9308333 100644
+index 4bbad745415a..5f91ca248ab0 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1191,7 +1191,7 @@ static int fuse_direntplus_link(struct file *file,
@@ -5879,19 +6017,51 @@ index 642c57b8de7b..8494b9308333 100644
if (!o->nodeid) {
/*
-diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
-index 684996c8a3a4..6e18a06aaabe 100644
---- a/fs/jbd2/checkpoint.c
-+++ b/fs/jbd2/checkpoint.c
-@@ -116,6 +116,8 @@ void __jbd2_log_wait_for_space(journal_t *journal)
- nblocks = jbd2_space_needed(journal);
- while (jbd2_log_space_left(journal) < nblocks) {
- write_unlock(&journal->j_state_lock);
-+ if (current->plug)
-+ io_schedule();
- mutex_lock(&journal->j_checkpoint_mutex);
+diff --git a/fs/inode.c b/fs/inode.c
+index 920aa0b1c6b0..3d6b5fd1bf06 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -153,7 +153,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
+ inode->i_bdev = NULL;
+ inode->i_cdev = NULL;
+ inode->i_link = NULL;
+- inode->i_dir_seq = 0;
++ inode->__i_dir_seq = 0;
+ inode->i_rdev = 0;
+ inode->dirtied_when = 0;
+
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 9588780ad43e..9b37abd354c9 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -89,7 +89,7 @@ static struct dentry *next_positive(struct dentry *parent,
+ struct list_head *from,
+ int count)
+ {
+- unsigned *seq = &parent->d_inode->i_dir_seq, n;
++ unsigned *seq = &parent->d_inode->__i_dir_seq, n;
+ struct dentry *res;
+ struct list_head *p;
+ bool skipped;
+@@ -122,8 +122,9 @@ static struct dentry *next_positive(struct dentry *parent,
+ static void move_cursor(struct dentry *cursor, struct list_head *after)
+ {
+ struct dentry *parent = cursor->d_parent;
+- unsigned n, *seq = &parent->d_inode->i_dir_seq;
++ unsigned n, *seq = &parent->d_inode->__i_dir_seq;
+ spin_lock(&parent->d_lock);
++ preempt_disable_rt();
+ for (;;) {
+ n = *seq;
+ if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
+@@ -136,6 +137,7 @@ static void move_cursor(struct dentry *cursor, struct list_head *after)
+ else
+ list_add_tail(&cursor->d_child, &parent->d_subdirs);
+ smp_store_release(seq, n + 2);
++ preempt_enable_rt();
+ spin_unlock(&parent->d_lock);
+ }
- /*
diff --git a/fs/locks.c b/fs/locks.c
index 22c5b4aa4961..269c6a44449a 100644
--- a/fs/locks.c
@@ -6037,7 +6207,7 @@ index 22c5b4aa4961..269c6a44449a 100644
locks_dispose_list(&dispose);
}
diff --git a/fs/namei.c b/fs/namei.c
-index d5e5140c1045..150fbdd8e04c 100644
+index e7d125c23aa6..072a2f724437 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1626,7 +1626,7 @@ static struct dentry *lookup_slow(const struct qstr *name,
@@ -6049,7 +6219,7 @@ index d5e5140c1045..150fbdd8e04c 100644
inode_lock_shared(inode);
/* Don't go there if it's already dead */
-@@ -3083,7 +3083,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -3089,7 +3089,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
@@ -6059,7 +6229,7 @@ index d5e5140c1045..150fbdd8e04c 100644
if (unlikely(IS_DEADDIR(dir_inode)))
return -ENOENT;
diff --git a/fs/namespace.c b/fs/namespace.c
-index 5e35057f07ac..843d274ba167 100644
+index d7360f9897b4..da188c6966a3 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -14,6 +14,7 @@
@@ -6070,7 +6240,7 @@ index 5e35057f07ac..843d274ba167 100644
#include <linux/security.h>
#include <linux/idr.h>
#include <linux/init.h> /* init_rootfs */
-@@ -356,8 +357,11 @@ int __mnt_want_write(struct vfsmount *m)
+@@ -357,8 +358,11 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
@@ -6103,7 +6273,7 @@ index dff600ae0d74..d726d2e09353 100644
mutex_unlock(&sp->so_delegreturn_mutex);
put_nfs_open_context(ctx);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
-index 53e02b8bd9bd..a66e7d77cfbb 100644
+index 1e5321d1ed22..2510f2be8557 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -485,7 +485,7 @@ static
@@ -6115,7 +6285,7 @@ index 53e02b8bd9bd..a66e7d77cfbb 100644
struct dentry *dentry;
struct dentry *alias;
struct inode *dir = d_inode(parent);
-@@ -1487,7 +1487,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
+@@ -1492,7 +1492,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned open_flags,
umode_t mode, int *opened)
{
@@ -6124,7 +6294,7 @@ index 53e02b8bd9bd..a66e7d77cfbb 100644
struct nfs_open_context *ctx;
struct dentry *res;
struct iattr attr = { .ia_valid = ATTR_OPEN };
-@@ -1802,7 +1802,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
+@@ -1807,7 +1807,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
trace_nfs_rmdir_enter(dir, dentry);
if (d_really_is_positive(dentry)) {
@@ -6136,7 +6306,7 @@ index 53e02b8bd9bd..a66e7d77cfbb 100644
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
/* Ensure the VFS deletes this inode */
switch (error) {
-@@ -1812,7 +1816,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
+@@ -1817,7 +1821,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
case -ENOENT:
nfs_dentry_handle_enoent(dentry);
}
@@ -6149,7 +6319,7 @@ index 53e02b8bd9bd..a66e7d77cfbb 100644
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
trace_nfs_rmdir_exit(dir, dentry, error);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
-index bf4ec5ecc97e..36cd5fc9192c 100644
+index 76ae25661d3f..89159d298278 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1957,7 +1957,11 @@ static void init_once(void *foo)
@@ -6178,10 +6348,10 @@ index 1452177c822d..f43b01d54c59 100644
};
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
-index fc9b04941739..7c9bc1c7efe7 100644
+index 4638654e26f3..5dd6fd555c72 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
-@@ -2697,7 +2697,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+@@ -2691,7 +2691,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
unsigned int seq;
int ret;
@@ -6190,7 +6360,7 @@ index fc9b04941739..7c9bc1c7efe7 100644
ret = _nfs4_proc_open(opendata);
if (ret != 0)
-@@ -2735,7 +2735,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+@@ -2729,7 +2729,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
@@ -6200,7 +6370,7 @@ index fc9b04941739..7c9bc1c7efe7 100644
}
out:
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
-index 0959c9661662..dabd834d7686 100644
+index 71deeae6eefd..4be6999299dc 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -488,7 +488,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
@@ -6212,7 +6382,7 @@ index 0959c9661662..dabd834d7686 100644
mutex_init(&sp->so_delegreturn_mutex);
return sp;
}
-@@ -1497,8 +1497,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
+@@ -1498,8 +1498,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
* recovering after a network partition or a reboot from a
* server that doesn't support a grace period.
*/
@@ -6226,7 +6396,7 @@ index 0959c9661662..dabd834d7686 100644
restart:
list_for_each_entry(state, &sp->so_states, open_states) {
if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
-@@ -1567,14 +1571,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
+@@ -1568,14 +1572,20 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
spin_lock(&sp->so_lock);
goto restart;
}
@@ -6402,7 +6572,7 @@ index fe251f187ff8..e89da4fb14c2 100644
/**
diff --git a/fs/proc/base.c b/fs/proc/base.c
-index ca651ac00660..41d9dc789285 100644
+index e67fec3c9856..0edc16f95596 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1834,7 +1834,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
@@ -6443,6 +6613,30 @@ index ab8dd1538381..5580853f57dd 100644
}
/*
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index d31cd1ebd8e9..5ea3f933a52a 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -112,8 +112,7 @@ xfs_finish_page_writeback(
+ ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
+ ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
+
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
++ flags = bh_uptodate_lock_irqsave(head);
+ do {
+ if (off >= bvec->bv_offset &&
+ off < bvec->bv_offset + bvec->bv_len) {
+@@ -136,8 +135,7 @@ xfs_finish_page_writeback(
+ }
+ off += bh->b_size;
+ } while ((bh = bh->b_this_page) != head);
+- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(head, flags);
+
+ if (!busy)
+ end_page_writeback(bvec->bv_page);
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index e861a24f06f2..b5c97d3059c7 100644
--- a/include/acpi/platform/aclinux.h
@@ -6587,7 +6781,7 @@ index 8fdcb783197d..d07dbeec7bc1 100644
#endif /* _LINUX_BH_H */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
-index ebbacd14d450..be5e87f6360a 100644
+index 4431ea2c8802..0744157a97ca 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -75,8 +75,50 @@ struct buffer_head {
@@ -6642,7 +6836,7 @@ index ebbacd14d450..be5e87f6360a 100644
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
-index 5b17de62c962..56027cc01a56 100644
+index 6fb1c34cf805..ccd2a5addb56 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -16,6 +16,7 @@
@@ -6653,7 +6847,7 @@ index 5b17de62c962..56027cc01a56 100644
#ifdef CONFIG_CGROUPS
-@@ -137,6 +138,7 @@ struct cgroup_subsys_state {
+@@ -138,6 +139,7 @@ struct cgroup_subsys_state {
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
@@ -6721,7 +6915,7 @@ index e571128ad99a..5e52d28c20c1 100644
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
-index 5beed7b30561..61cab7ef458e 100644
+index ff295e166b2c..d532c60f3fb5 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -11,6 +11,7 @@
@@ -6765,6 +6959,19 @@ index a6ecb34cf547..37caab306336 100644
+#endif
+
#endif /* defined(_LINUX_DELAY_H) */
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index d705ae084edd..ab1946f4a729 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -688,7 +688,7 @@ struct inode {
+ struct block_device *i_bdev;
+ struct cdev *i_cdev;
+ char *i_link;
+- unsigned i_dir_seq;
++ unsigned __i_dir_seq;
+ };
+
+ __u32 i_generation;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index bb3f3297062a..a117a33ef72c 100644
--- a/include/linux/highmem.h
@@ -6913,16 +7120,16 @@ index 5e00f80b1535..a34e10b55cde 100644
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
-@@ -203,6 +210,9 @@ struct hrtimer_cpu_base {
+@@ -202,6 +209,9 @@ struct hrtimer_cpu_base {
+ unsigned int nr_retries;
unsigned int nr_hangs;
unsigned int max_hang_time;
- #endif
++#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+ wait_queue_head_t wait;
-+#endif
+ #endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
-
@@ -412,6 +422,13 @@ static inline void hrtimer_restart(struct hrtimer *timer)
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
@@ -7495,10 +7702,10 @@ index cb483305e1f5..4e5062316bb6 100644
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
diff --git a/include/linux/locallock.h b/include/linux/locallock.h
new file mode 100644
-index 000000000000..845c77f1a5ca
+index 000000000000..280f884a05a3
--- /dev/null
+++ b/include/linux/locallock.h
-@@ -0,0 +1,278 @@
+@@ -0,0 +1,287 @@
+#ifndef _LINUX_LOCALLOCK_H
+#define _LINUX_LOCALLOCK_H
+
@@ -7578,6 +7785,9 @@ index 000000000000..845c77f1a5ca
+ lv->owner = current;
+ lv->nestcnt = 1;
+ return 1;
++ } else if (lv->owner == current) {
++ lv->nestcnt++;
++ return 1;
+ }
+ return 0;
+}
@@ -7751,6 +7961,12 @@ index 000000000000..845c77f1a5ca
+
+static inline void local_irq_lock_init(int lvar) { }
+
++#define local_trylock(lvar) \
++ ({ \
++ preempt_disable(); \
++ 1; \
++ })
++
+#define local_lock(lvar) preempt_disable()
+#define local_unlock(lvar) preempt_enable()
+#define local_lock_irq(lvar) local_irq_disable()
@@ -7778,7 +7994,7 @@ index 000000000000..845c77f1a5ca
+
+#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
-index 08d947fc4c59..705fb564a605 100644
+index e8471c2ca83a..08bde1a7a987 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -11,6 +11,7 @@
@@ -7789,8 +8005,8 @@ index 08d947fc4c59..705fb564a605 100644
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
#include <asm/page.h>
-@@ -509,6 +510,9 @@ struct mm_struct {
- bool tlb_flush_pending;
+@@ -513,6 +514,9 @@ struct mm_struct {
+ bool tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
+#ifdef CONFIG_PREEMPT_RT_BASE
@@ -7964,7 +8180,7 @@ index 000000000000..e0284edec655
+
+#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index bb9b102c15cd..a5b12b8ad196 100644
+index 47c7f5b8f675..85fc72b8a92b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -396,7 +396,19 @@ typedef enum rx_handler_result rx_handler_result_t;
@@ -7987,7 +8203,7 @@ index bb9b102c15cd..a5b12b8ad196 100644
static inline bool napi_disable_pending(struct napi_struct *n)
{
-@@ -2463,14 +2475,53 @@ void netdev_freemem(struct net_device *dev);
+@@ -2464,14 +2476,53 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
@@ -8042,7 +8258,7 @@ index bb9b102c15cd..a5b12b8ad196 100644
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-@@ -2855,6 +2906,7 @@ struct softnet_data {
+@@ -2856,6 +2907,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
@@ -8106,7 +8322,7 @@ index 810124b33327..d54ca43d571f 100644
#if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
-index beb1e10f446e..ebaf2e7bfe29 100644
+index 3bf867a0c3b3..71c6bdd14c8a 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1490,7 +1490,7 @@ struct nfs_unlinkdata {
@@ -8335,7 +8551,7 @@ index 56939d3f6e53..b988bf40ad3e 100644
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
diff --git a/include/linux/pid.h b/include/linux/pid.h
-index 23705a53abba..2cc64b779f03 100644
+index 97b745ddece5..01a5460a0c85 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -2,6 +2,7 @@
@@ -8347,7 +8563,7 @@ index 23705a53abba..2cc64b779f03 100644
enum pid_type
{
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
-index 75e4e30677f1..1cfb1cb72354 100644
+index 7eeceac52dea..f97c54265904 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -50,7 +50,11 @@
@@ -8380,15 +8596,15 @@ index 75e4e30677f1..1cfb1cb72354 100644
/*
* Are we doing bottom half or hardware interrupt processing?
-@@ -72,7 +82,6 @@
+@@ -79,7 +89,6 @@
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-
- /*
- * Are we in NMI context?
-@@ -91,7 +100,11 @@
+ #define in_nmi() (preempt_count() & NMI_MASK)
+ #define in_task() (!(preempt_count() & \
+ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+@@ -96,7 +105,11 @@
/*
* The preempt_count offset after spin_lock()
*/
@@ -8400,7 +8616,7 @@ index 75e4e30677f1..1cfb1cb72354 100644
/*
* The preempt_count offset needed for things like:
-@@ -140,6 +153,20 @@ extern void preempt_count_sub(int val);
+@@ -145,6 +158,20 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
@@ -8421,7 +8637,7 @@ index 75e4e30677f1..1cfb1cb72354 100644
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
-@@ -148,13 +175,25 @@ do { \
+@@ -153,13 +180,25 @@ do { \
barrier(); \
} while (0)
@@ -8448,7 +8664,7 @@ index 75e4e30677f1..1cfb1cb72354 100644
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-@@ -179,6 +218,13 @@ do { \
+@@ -184,6 +223,13 @@ do { \
__preempt_schedule(); \
} while (0)
@@ -8462,7 +8678,7 @@ index 75e4e30677f1..1cfb1cb72354 100644
#else /* !CONFIG_PREEMPT */
#define preempt_enable() \
do { \
-@@ -224,6 +270,7 @@ do { \
+@@ -229,6 +275,7 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
@@ -8470,7 +8686,7 @@ index 75e4e30677f1..1cfb1cb72354 100644
#define preemptible() 0
#endif /* CONFIG_PREEMPT_COUNT */
-@@ -244,10 +291,31 @@ do { \
+@@ -249,10 +296,31 @@ do { \
} while (0)
#define preempt_fold_need_resched() \
do { \
@@ -8708,10 +8924,11 @@ index 01f71e1d2e94..30cc001d0d5a 100644
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
-@@ -626,54 +645,6 @@ static inline void rcu_preempt_sleep_check(void)
+@@ -625,54 +644,6 @@ static inline void rcu_preempt_sleep_check(void)
+ ((typeof(*p) __force __kernel *)(________p1)); \
})
- /**
+-/**
- * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
- * @v: The value to statically initialize with.
- */
@@ -8759,10 +8976,9 @@ index 01f71e1d2e94..30cc001d0d5a 100644
- _r_a_p__v; \
-})
-
--/**
+ /**
* rcu_access_pointer() - fetch RCU pointer with no dereferencing
* @p: The pointer to read
- *
@@ -951,10 +922,14 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
@@ -9198,7 +9414,7 @@ index 000000000000..2ffbf093ae92
+
+#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
-index f425eb3318ab..4d779486ad6b 100644
+index a4d0afc009a7..e775696b480a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -26,6 +26,7 @@ struct sched_param {
@@ -9239,13 +9455,27 @@ index f425eb3318ab..4d779486ad6b 100644
/* Task command name length */
#define TASK_COMM_LEN 16
-@@ -1013,8 +1015,18 @@ struct wake_q_head {
+@@ -1022,9 +1024,31 @@ struct wake_q_head {
+ #define WAKE_Q(name) \
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
- extern void wake_q_add(struct wake_q_head *head,
+-extern void wake_q_add(struct wake_q_head *head,
- struct task_struct *task);
-extern void wake_up_q(struct wake_q_head *head);
-+ struct task_struct *task);
++extern void __wake_q_add(struct wake_q_head *head,
++ struct task_struct *task, bool sleeper);
++static inline void wake_q_add(struct wake_q_head *head,
++ struct task_struct *task)
++{
++ __wake_q_add(head, task, false);
++}
++
++static inline void wake_q_add_sleeper(struct wake_q_head *head,
++ struct task_struct *task)
++{
++ __wake_q_add(head, task, true);
++}
++
+extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
+
+static inline void wake_up_q(struct wake_q_head *head)
@@ -9260,7 +9490,7 @@ index f425eb3318ab..4d779486ad6b 100644
/*
* sched-domains (multiprocessor balancing) declarations:
-@@ -1481,6 +1493,7 @@ struct task_struct {
+@@ -1491,6 +1515,7 @@ struct task_struct {
struct thread_info thread_info;
#endif
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -9268,12 +9498,13 @@ index f425eb3318ab..4d779486ad6b 100644
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
-@@ -1520,6 +1533,12 @@ struct task_struct {
+@@ -1530,6 +1555,13 @@ struct task_struct {
#endif
unsigned int policy;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int migrate_disable;
++ int migrate_disable_update;
+# ifdef CONFIG_SCHED_DEBUG
+ int migrate_disable_atomic;
+# endif
@@ -9281,7 +9512,7 @@ index f425eb3318ab..4d779486ad6b 100644
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -1658,6 +1677,9 @@ struct task_struct {
+@@ -1668,6 +1700,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -9291,7 +9522,7 @@ index f425eb3318ab..4d779486ad6b 100644
/* process credentials */
const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
-@@ -1689,10 +1711,15 @@ struct task_struct {
+@@ -1699,10 +1734,15 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
@@ -9307,7 +9538,13 @@ index f425eb3318ab..4d779486ad6b 100644
unsigned long sas_ss_sp;
size_t sas_ss_size;
-@@ -1723,6 +1750,8 @@ struct task_struct {
+@@ -1728,11 +1768,14 @@ struct task_struct {
+ raw_spinlock_t pi_lock;
+
+ struct wake_q_node wake_q;
++ struct wake_q_node wake_q_sleeper;
+
+ #ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
struct rb_root pi_waiters;
struct rb_node *pi_waiters_leftmost;
@@ -9316,7 +9553,7 @@ index f425eb3318ab..4d779486ad6b 100644
/* Deadlock detection and priority inheritance handling */
struct rt_mutex_waiter *pi_blocked_on;
#endif
-@@ -1921,6 +1950,12 @@ struct task_struct {
+@@ -1931,6 +1974,12 @@ struct task_struct {
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
@@ -9329,7 +9566,7 @@ index f425eb3318ab..4d779486ad6b 100644
#ifdef CONFIG_KCOV
/* Coverage collection mode enabled for this task (0 if disabled). */
enum kcov_mode kcov_mode;
-@@ -1946,9 +1981,23 @@ struct task_struct {
+@@ -1956,8 +2005,22 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -9346,14 +9583,13 @@ index f425eb3318ab..4d779486ad6b 100644
+#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
- #endif
++#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int xmit_recursion;
-+#endif
+ #endif
int pagefault_disabled;
#ifdef CONFIG_MMU
- struct task_struct *oom_reaper_list;
-@@ -1988,14 +2037,6 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+@@ -1998,14 +2061,6 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
}
#endif
@@ -9368,7 +9604,7 @@ index f425eb3318ab..4d779486ad6b 100644
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
-@@ -2211,6 +2252,15 @@ extern struct pid *cad_pid;
+@@ -2225,6 +2280,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@@ -9384,7 +9620,7 @@ index f425eb3318ab..4d779486ad6b 100644
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -2218,6 +2268,7 @@ static inline void put_task_struct(struct task_struct *t)
+@@ -2232,6 +2296,7 @@ static inline void put_task_struct(struct task_struct *t)
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
@@ -9392,7 +9628,7 @@ index f425eb3318ab..4d779486ad6b 100644
struct task_struct *task_rcu_dereference(struct task_struct **ptask);
struct task_struct *try_get_task_struct(struct task_struct **ptask);
-@@ -2259,6 +2310,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
+@@ -2273,6 +2338,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
/*
* Per process flags
*/
@@ -9400,7 +9636,7 @@ index f425eb3318ab..4d779486ad6b 100644
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
-@@ -2427,6 +2479,10 @@ extern void do_set_cpus_allowed(struct task_struct *p,
+@@ -2441,6 +2507,10 @@ extern void do_set_cpus_allowed(struct task_struct *p,
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -9411,7 +9647,7 @@ index f425eb3318ab..4d779486ad6b 100644
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
-@@ -2439,6 +2495,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
+@@ -2453,6 +2523,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
return -EINVAL;
return 0;
}
@@ -9421,7 +9657,7 @@ index f425eb3318ab..4d779486ad6b 100644
#endif
#ifdef CONFIG_NO_HZ_COMMON
-@@ -2677,6 +2736,7 @@ extern void xtime_update(unsigned long ticks);
+@@ -2691,6 +2764,7 @@ extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -9429,7 +9665,7 @@ index f425eb3318ab..4d779486ad6b 100644
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
-@@ -2885,6 +2945,17 @@ static inline void mmdrop(struct mm_struct *mm)
+@@ -2899,6 +2973,17 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
@@ -9447,7 +9683,7 @@ index f425eb3318ab..4d779486ad6b 100644
static inline void mmdrop_async_fn(struct work_struct *work)
{
struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
-@@ -3277,6 +3348,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
+@@ -3291,6 +3376,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -9491,7 +9727,7 @@ index f425eb3318ab..4d779486ad6b 100644
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
-@@ -3308,6 +3416,51 @@ static inline int signal_pending_state(long state, struct task_struct *p)
+@@ -3322,6 +3444,51 @@ static inline int signal_pending_state(long state, struct task_struct *p)
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
@@ -9543,7 +9779,7 @@ index f425eb3318ab..4d779486ad6b 100644
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
-@@ -3333,12 +3486,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
+@@ -3347,12 +3514,16 @@ extern int __cond_resched_lock(spinlock_t *lock);
__cond_resched_lock(lock); \
})
@@ -9560,7 +9796,7 @@ index f425eb3318ab..4d779486ad6b 100644
static inline void cond_resched_rcu(void)
{
-@@ -3513,6 +3670,31 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
+@@ -3527,6 +3698,31 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
@@ -9787,7 +10023,7 @@ index b63f63eaa39c..295540fdfc72 100644
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
-index 32810f279f8e..0db6e31161f6 100644
+index 601dfa849d30..dca387a8fa6b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -284,6 +284,7 @@ struct sk_buff_head {
@@ -10078,18 +10314,10 @@ index 73548eb13a5d..10bac715ea96 100644
-#if defined(CONFIG_SMP)
-# include <asm/spinlock_types.h>
-+#include <linux/spinlock_types_raw.h>
-+
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# include <linux/spinlock_types_nort.h>
-+# include <linux/rwlock_types.h>
- #else
+-#else
-# include <linux/spinlock_types_up.h>
-+# include <linux/rtmutex.h>
-+# include <linux/spinlock_types_rt.h>
-+# include <linux/rwlock_types_rt.h>
- #endif
-
+-#endif
+-
-#include <linux/lockdep.h>
-
-typedef struct raw_spinlock {
@@ -10115,16 +10343,23 @@ index 73548eb13a5d..10bac715ea96 100644
-#else
-# define SPIN_DEP_MAP_INIT(lockname)
-#endif
--
++#include <linux/spinlock_types_raw.h>
+
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_DEBUG_INIT(lockname) \
- .magic = SPINLOCK_MAGIC, \
- .owner_cpu = -1, \
- .owner = SPINLOCK_OWNER_INIT,
--#else
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/spinlock_types_nort.h>
++# include <linux/rwlock_types.h>
+ #else
-# define SPIN_DEBUG_INIT(lockname)
--#endif
--
++# include <linux/rtmutex.h>
++# include <linux/spinlock_types_rt.h>
++# include <linux/rwlock_types_rt.h>
+ #endif
+
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
- { \
- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
@@ -10455,7 +10690,7 @@ index 2873baf5372a..eb1a108f17ca 100644
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
static inline int arch_within_stack_frames(const void * const stack,
diff --git a/include/linux/timer.h b/include/linux/timer.h
-index 51d601f192d4..83cea629efe1 100644
+index ec86e4e55ea3..8e5b680d1275 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list *timer);
@@ -10468,7 +10703,7 @@ index 51d601f192d4..83cea629efe1 100644
#else
# define del_timer_sync(t) del_timer(t)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
-index be007610ceb0..15154b13a53b 100644
+index ba57266d9e80..5c36934ec2bc 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -56,6 +56,9 @@ struct trace_entry {
@@ -10550,10 +10785,10 @@ index 2408e8d5c05c..db50d6609195 100644
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
diff --git a/include/net/dst.h b/include/net/dst.h
-index 6835d224d47b..55a5a9698f14 100644
+index ddcff17615da..a1fc787b1a8c 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
-@@ -446,7 +446,7 @@ static inline void dst_confirm(struct dst_entry *dst)
+@@ -452,7 +452,7 @@ static inline void dst_confirm(struct dst_entry *dst)
static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
struct sk_buff *skb)
{
@@ -10661,7 +10896,7 @@ index 7adf4386ac8f..d3fd5c357268 100644
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
-index e6aa0a249672..b57736f2a8a3 100644
+index f18fc1a0321f..5d2c9b89c168 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -10,6 +10,7 @@
@@ -11003,17 +11238,17 @@ diff --git a/init/Makefile b/init/Makefile
index c4fb45525d08..821190dfaa75 100644
--- a/init/Makefile
+++ b/init/Makefile
-@@ -35,4 +35,4 @@ $(obj)/version.o: include/generated/compile.h
+@@ -35,4 +35,4 @@ silent_chk_compile.h = :
include/generated/compile.h: FORCE
@$($(quiet)chk_compile.h)
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
diff --git a/init/main.c b/init/main.c
-index ae3996ae9bac..6470deef01c9 100644
+index 99f026565608..48ffaaad8ac9 100644
--- a/init/main.c
+++ b/init/main.c
-@@ -507,6 +507,7 @@ asmlinkage __visible void __init start_kernel(void)
+@@ -508,6 +508,7 @@ asmlinkage __visible void __init start_kernel(void)
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
@@ -11138,7 +11373,7 @@ index 3f9c97419f02..11dbe26a8279 100644
config PREEMPT_COUNT
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
-index a3d2aad2443f..bb6b252648ff 100644
+index 4c233437ee1a..6c3c9f298f22 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5041,10 +5041,10 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head)
@@ -11165,7 +11400,7 @@ index a3d2aad2443f..bb6b252648ff 100644
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5740,6 +5740,7 @@ static int __init cgroup_wq_init(void)
+@@ -5749,6 +5749,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
@@ -11174,7 +11409,7 @@ index a3d2aad2443f..bb6b252648ff 100644
/*
* Used to destroy pidlists and separate to serve as flush domain.
diff --git a/kernel/cpu.c b/kernel/cpu.c
-index 99c6c568bc55..f1c64e563970 100644
+index 802eb3361a0a..c6a4cf8ba645 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -239,6 +239,289 @@ static struct {
@@ -11467,7 +11702,7 @@ index 99c6c568bc55..f1c64e563970 100644
void get_online_cpus(void)
{
-@@ -789,10 +1072,14 @@ static int takedown_cpu(unsigned int cpu)
+@@ -802,10 +1085,14 @@ static int takedown_cpu(unsigned int cpu)
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
@@ -11482,7 +11717,7 @@ index 99c6c568bc55..f1c64e563970 100644
/*
* Prevent irq alloc/free while the dying cpu reorganizes the
* interrupt affinities.
-@@ -877,6 +1164,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+@@ -890,6 +1177,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int prev_state, ret = 0;
bool hasdied = false;
@@ -11492,7 +11727,7 @@ index 99c6c568bc55..f1c64e563970 100644
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -884,7 +1174,34 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+@@ -897,7 +1187,34 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
if (!cpu_present(cpu))
return -EINVAL;
@@ -11527,7 +11762,7 @@ index 99c6c568bc55..f1c64e563970 100644
cpuhp_tasks_frozen = tasks_frozen;
-@@ -923,10 +1240,15 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+@@ -936,10 +1253,15 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
out:
@@ -11543,7 +11778,7 @@ index 99c6c568bc55..f1c64e563970 100644
return ret;
}
-@@ -1240,6 +1562,8 @@ core_initcall(cpu_hotplug_pm_sync_init);
+@@ -1242,6 +1564,8 @@ core_initcall(cpu_hotplug_pm_sync_init);
#endif /* CONFIG_PM_SLEEP_SMP */
@@ -11552,7 +11787,7 @@ index 99c6c568bc55..f1c64e563970 100644
#endif /* CONFIG_SMP */
/* Boot processor state steps */
-@@ -1924,6 +2248,10 @@ void __init boot_cpu_init(void)
+@@ -1926,6 +2250,10 @@ void __init boot_cpu_init(void)
set_cpu_active(cpu, true);
set_cpu_present(cpu, true);
set_cpu_possible(cpu, true);
@@ -11563,11 +11798,134 @@ index 99c6c568bc55..f1c64e563970 100644
}
/*
+diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
+index 009cc9a17d95..67b02e138a47 100644
+--- a/kernel/cpu_pm.c
++++ b/kernel/cpu_pm.c
+@@ -22,15 +22,21 @@
+ #include <linux/spinlock.h>
+ #include <linux/syscore_ops.h>
+
+-static DEFINE_RWLOCK(cpu_pm_notifier_lock);
+-static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
++static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
+
+ static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
+ {
+ int ret;
+
+- ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
++ /*
++ * __atomic_notifier_call_chain has a RCU read critical section, which
++ * could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
++ * RCU know this.
++ */
++ rcu_irq_enter_irqson();
++ ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
+ nr_to_call, nr_calls);
++ rcu_irq_exit_irqson();
+
+ return notifier_to_errno(ret);
+ }
+@@ -47,14 +53,7 @@ static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
+ */
+ int cpu_pm_register_notifier(struct notifier_block *nb)
+ {
+- unsigned long flags;
+- int ret;
+-
+- write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+- ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
+- write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+-
+- return ret;
++ return atomic_notifier_chain_register(&cpu_pm_notifier_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
+
+@@ -69,14 +68,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
+ */
+ int cpu_pm_unregister_notifier(struct notifier_block *nb)
+ {
+- unsigned long flags;
+- int ret;
+-
+- write_lock_irqsave(&cpu_pm_notifier_lock, flags);
+- ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
+- write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
+-
+- return ret;
++ return atomic_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
+
+@@ -100,7 +92,6 @@ int cpu_pm_enter(void)
+ int nr_calls;
+ int ret = 0;
+
+- read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+@@ -108,7 +99,6 @@ int cpu_pm_enter(void)
+ * PM entry who are notified earlier to prepare for it.
+ */
+ cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
+- read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+ }
+@@ -128,13 +118,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter);
+ */
+ int cpu_pm_exit(void)
+ {
+- int ret;
+-
+- read_lock(&cpu_pm_notifier_lock);
+- ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
+- read_unlock(&cpu_pm_notifier_lock);
+-
+- return ret;
++ return cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
+ }
+ EXPORT_SYMBOL_GPL(cpu_pm_exit);
+
+@@ -159,7 +143,6 @@ int cpu_cluster_pm_enter(void)
+ int nr_calls;
+ int ret = 0;
+
+- read_lock(&cpu_pm_notifier_lock);
+ ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
+ if (ret)
+ /*
+@@ -167,7 +150,6 @@ int cpu_cluster_pm_enter(void)
+ * PM entry who are notified earlier to prepare for it.
+ */
+ cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
+- read_unlock(&cpu_pm_notifier_lock);
+
+ return ret;
+ }
+@@ -190,13 +172,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
+ */
+ int cpu_cluster_pm_exit(void)
+ {
+- int ret;
+-
+- read_lock(&cpu_pm_notifier_lock);
+- ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
+- read_unlock(&cpu_pm_notifier_lock);
+-
+- return ret;
++ return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
+ }
+ EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
+
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
-index 29f815d2ef7e..341b17f24f95 100644
+index 511b1dd8ff09..1dd63833ecdc 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
-@@ -284,7 +284,7 @@ static struct cpuset top_cpuset = {
+@@ -285,7 +285,7 @@ static struct cpuset top_cpuset = {
*/
static DEFINE_MUTEX(cpuset_mutex);
@@ -11576,7 +11934,7 @@ index 29f815d2ef7e..341b17f24f95 100644
static struct workqueue_struct *cpuset_migrate_mm_wq;
-@@ -907,9 +907,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
+@@ -908,9 +908,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
continue;
rcu_read_unlock();
@@ -11588,7 +11946,7 @@ index 29f815d2ef7e..341b17f24f95 100644
WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
-@@ -974,9 +974,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+@@ -975,9 +975,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0)
return retval;
@@ -11600,7 +11958,7 @@ index 29f815d2ef7e..341b17f24f95 100644
/* use trialcs->cpus_allowed as a temp variable */
update_cpumasks_hier(cs, trialcs->cpus_allowed);
-@@ -1176,9 +1176,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
+@@ -1177,9 +1177,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
continue;
rcu_read_unlock();
@@ -11612,7 +11970,7 @@ index 29f815d2ef7e..341b17f24f95 100644
WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
-@@ -1246,9 +1246,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
+@@ -1247,9 +1247,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0)
goto done;
@@ -11624,7 +11982,7 @@ index 29f815d2ef7e..341b17f24f95 100644
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
-@@ -1339,9 +1339,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
+@@ -1340,9 +1340,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
@@ -11636,7 +11994,7 @@ index 29f815d2ef7e..341b17f24f95 100644
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
rebuild_sched_domains_locked();
-@@ -1756,7 +1756,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
+@@ -1757,7 +1757,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;
@@ -11645,7 +12003,7 @@ index 29f815d2ef7e..341b17f24f95 100644
switch (type) {
case FILE_CPULIST:
-@@ -1775,7 +1775,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
+@@ -1776,7 +1776,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
ret = -EINVAL;
}
@@ -11654,7 +12012,7 @@ index 29f815d2ef7e..341b17f24f95 100644
return ret;
}
-@@ -1989,12 +1989,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
+@@ -1991,12 +1991,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpuset_inc();
@@ -11669,7 +12027,7 @@ index 29f815d2ef7e..341b17f24f95 100644
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
-@@ -2021,12 +2021,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
+@@ -2023,12 +2023,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
}
rcu_read_unlock();
@@ -11684,7 +12042,7 @@ index 29f815d2ef7e..341b17f24f95 100644
out_unlock:
mutex_unlock(&cpuset_mutex);
return 0;
-@@ -2065,7 +2065,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
+@@ -2067,7 +2067,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
mutex_lock(&cpuset_mutex);
@@ -11693,7 +12051,7 @@ index 29f815d2ef7e..341b17f24f95 100644
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
-@@ -2076,7 +2076,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
+@@ -2078,7 +2078,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
@@ -11702,7 +12060,7 @@ index 29f815d2ef7e..341b17f24f95 100644
mutex_unlock(&cpuset_mutex);
}
-@@ -2177,12 +2177,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
+@@ -2179,12 +2179,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
{
bool is_empty;
@@ -11717,7 +12075,7 @@ index 29f815d2ef7e..341b17f24f95 100644
/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
-@@ -2219,10 +2219,10 @@ hotplug_update_tasks(struct cpuset *cs,
+@@ -2221,10 +2221,10 @@ hotplug_update_tasks(struct cpuset *cs,
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
@@ -11730,7 +12088,7 @@ index 29f815d2ef7e..341b17f24f95 100644
if (cpus_updated)
update_tasks_cpumask(cs);
-@@ -2308,21 +2308,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
+@@ -2317,21 +2317,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
@@ -11756,7 +12114,7 @@ index 29f815d2ef7e..341b17f24f95 100644
update_tasks_nodemask(&top_cpuset);
}
-@@ -2420,11 +2420,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
+@@ -2436,11 +2436,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
{
unsigned long flags;
@@ -11770,7 +12128,7 @@ index 29f815d2ef7e..341b17f24f95 100644
}
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
-@@ -2472,11 +2472,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
+@@ -2488,11 +2488,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
nodemask_t mask;
unsigned long flags;
@@ -11784,7 +12142,7 @@ index 29f815d2ef7e..341b17f24f95 100644
return mask;
}
-@@ -2568,14 +2568,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
+@@ -2584,14 +2584,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
return true;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
@@ -11802,7 +12160,7 @@ index 29f815d2ef7e..341b17f24f95 100644
}
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
-index fc1ef736253c..83c666537a7a 100644
+index 77777d918676..3203e9dee9f8 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -554,7 +554,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
@@ -11843,7 +12201,7 @@ index fc1ef736253c..83c666537a7a 100644
return r;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 07c0dc806dfc..baf1a2867d74 100644
+index 13b9784427b0..f74fbfe5465c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1050,6 +1050,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
@@ -11854,7 +12212,7 @@ index 07c0dc806dfc..baf1a2867d74 100644
}
static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
-@@ -8363,6 +8364,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
+@@ -8405,6 +8406,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
@@ -11876,10 +12234,10 @@ index 3076f3089919..fb2ebcf3ca7c 100644
spin_unlock(&sighand->siglock);
diff --git a/kernel/fork.c b/kernel/fork.c
-index 59faac4de181..0edb0f3c1db8 100644
+index 70e10cb49be0..2529725eefa2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -76,6 +76,7 @@
+@@ -77,6 +77,7 @@
#include <linux/compiler.h>
#include <linux/sysctl.h>
#include <linux/kcov.h>
@@ -11887,7 +12245,7 @@ index 59faac4de181..0edb0f3c1db8 100644
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
-@@ -376,13 +377,24 @@ static inline void put_signal_struct(struct signal_struct *sig)
+@@ -378,13 +379,24 @@ static inline void put_signal_struct(struct signal_struct *sig)
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
@@ -11913,7 +12271,7 @@ index 59faac4de181..0edb0f3c1db8 100644
cgroup_free(tsk);
task_numa_free(tsk);
security_task_free(tsk);
-@@ -393,7 +405,18 @@ void __put_task_struct(struct task_struct *tsk)
+@@ -395,7 +407,18 @@ void __put_task_struct(struct task_struct *tsk)
if (!profile_handoff_task(tsk))
free_task(tsk);
}
@@ -11932,7 +12290,15 @@ index 59faac4de181..0edb0f3c1db8 100644
void __init __weak arch_task_cache_init(void) { }
-@@ -852,6 +875,19 @@ void __mmdrop(struct mm_struct *mm)
+@@ -541,6 +564,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
+ tsk->splice_pipe = NULL;
+ tsk->task_frag.page = NULL;
+ tsk->wake_q.next = NULL;
++ tsk->wake_q_sleeper.next = NULL;
+
+ account_kernel_stack(tsk, 1);
+
+@@ -867,6 +891,19 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -11952,7 +12318,7 @@ index 59faac4de181..0edb0f3c1db8 100644
static inline void __mmput(struct mm_struct *mm)
{
VM_BUG_ON(atomic_read(&mm->mm_users));
-@@ -1417,6 +1453,7 @@ static void rt_mutex_init_task(struct task_struct *p)
+@@ -1432,6 +1469,7 @@ static void rt_mutex_init_task(struct task_struct *p)
#ifdef CONFIG_RT_MUTEXES
p->pi_waiters = RB_ROOT;
p->pi_waiters_leftmost = NULL;
@@ -11960,7 +12326,7 @@ index 59faac4de181..0edb0f3c1db8 100644
p->pi_blocked_on = NULL;
#endif
}
-@@ -1426,6 +1463,9 @@ static void rt_mutex_init_task(struct task_struct *p)
+@@ -1441,6 +1479,9 @@ static void rt_mutex_init_task(struct task_struct *p)
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
@@ -11970,7 +12336,7 @@ index 59faac4de181..0edb0f3c1db8 100644
tsk->cputime_expires.prof_exp = 0;
tsk->cputime_expires.virt_exp = 0;
tsk->cputime_expires.sched_exp = 0;
-@@ -1552,6 +1592,7 @@ static __latent_entropy struct task_struct *copy_process(
+@@ -1567,6 +1608,7 @@ static __latent_entropy struct task_struct *copy_process(
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
@@ -11979,10 +12345,10 @@ index 59faac4de181..0edb0f3c1db8 100644
p->utime = p->stime = p->gtime = 0;
p->utimescaled = p->stimescaled = 0;
diff --git a/kernel/futex.c b/kernel/futex.c
-index 4c6b6e697b73..d9bab63efccb 100644
+index 88bad86180ac..2e074d63e8fa 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -800,7 +800,7 @@ static int refill_pi_state_cache(void)
+@@ -801,7 +801,7 @@ static int refill_pi_state_cache(void)
return 0;
}
@@ -11991,7 +12357,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
{
struct futex_pi_state *pi_state = current->pi_state_cache;
-@@ -810,6 +810,11 @@ static struct futex_pi_state * alloc_pi_state(void)
+@@ -811,6 +811,11 @@ static struct futex_pi_state * alloc_pi_state(void)
return pi_state;
}
@@ -12003,7 +12369,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
/*
* Drops a reference to the pi_state object and frees or caches it
* when the last reference is gone.
-@@ -854,7 +859,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
+@@ -855,7 +860,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
* Look up the task based on what TID userspace gave us.
* We dont trust it.
*/
@@ -12012,7 +12378,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
{
struct task_struct *p;
-@@ -904,7 +909,9 @@ void exit_pi_state_list(struct task_struct *curr)
+@@ -905,7 +910,9 @@ void exit_pi_state_list(struct task_struct *curr)
* task still owns the PI-state:
*/
if (head->next != next) {
@@ -12022,7 +12388,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
continue;
}
-@@ -914,10 +921,12 @@ void exit_pi_state_list(struct task_struct *curr)
+@@ -915,10 +922,12 @@ void exit_pi_state_list(struct task_struct *curr)
pi_state->owner = NULL;
raw_spin_unlock_irq(&curr->pi_lock);
@@ -12037,7 +12403,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
raw_spin_lock_irq(&curr->pi_lock);
}
raw_spin_unlock_irq(&curr->pi_lock);
-@@ -971,6 +980,39 @@ void exit_pi_state_list(struct task_struct *curr)
+@@ -972,6 +981,39 @@ void exit_pi_state_list(struct task_struct *curr)
*
* [10] There is no transient state which leaves owner and user space
* TID out of sync.
@@ -12077,7 +12443,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
*/
/*
-@@ -978,10 +1020,13 @@ void exit_pi_state_list(struct task_struct *curr)
+@@ -979,10 +1021,13 @@ void exit_pi_state_list(struct task_struct *curr)
* the pi_state against the user space value. If correct, attach to
* it.
*/
@@ -12092,7 +12458,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
/*
* Userspace might have messed up non-PI and PI futexes [3]
-@@ -989,9 +1034,39 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
+@@ -990,8 +1035,38 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
if (unlikely(!pi_state))
return -EINVAL;
@@ -12110,7 +12476,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
+ */
WARN_ON(!atomic_read(&pi_state->refcount));
- /*
++ /*
+ * Now that we have a pi_state, we can acquire wait_lock
+ * and do the state validation.
+ */
@@ -12128,11 +12494,10 @@ index 4c6b6e697b73..d9bab63efccb 100644
+ if (uval != uval2)
+ goto out_eagain;
+
-+ /*
+ /*
* Handle the owner died case:
*/
- if (uval & FUTEX_OWNER_DIED) {
-@@ -1006,11 +1081,11 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
+@@ -1007,11 +1082,11 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
* is not 0. Inconsistent state. [5]
*/
if (pid)
@@ -12146,7 +12511,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
}
/*
-@@ -1022,14 +1097,14 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
+@@ -1023,14 +1098,14 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
* Take a ref on the state and return success. [6]
*/
if (!pid)
@@ -12163,7 +12528,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
}
/*
-@@ -1038,11 +1113,29 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
+@@ -1039,11 +1114,29 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
* user space TID. [9/10]
*/
if (pid != task_pid_vnr(pi_state->owner))
@@ -12196,7 +12561,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
}
/*
-@@ -1093,6 +1186,9 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
+@@ -1094,6 +1187,9 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
/*
* No existing pi state. First waiter. [2]
@@ -12206,7 +12571,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
*/
pi_state = alloc_pi_state();
-@@ -1117,17 +1213,18 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
+@@ -1118,17 +1214,18 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
return 0;
}
@@ -12229,7 +12594,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
/*
* We are the first waiter - try to look up the owner based on
-@@ -1146,7 +1243,7 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
+@@ -1147,7 +1244,7 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
return -EFAULT;
@@ -12238,7 +12603,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
return curval != uval ? -EAGAIN : 0;
}
-@@ -1174,7 +1271,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
+@@ -1175,7 +1272,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
struct task_struct *task, int set_waiters)
{
u32 uval, newval, vpid = task_pid_vnr(task);
@@ -12247,7 +12612,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
int ret;
/*
-@@ -1200,9 +1297,9 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
+@@ -1201,9 +1298,9 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
* Lookup existing state first. If it exists, try to attach to
* its pi_state.
*/
@@ -12260,7 +12625,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
/*
* No waiter and user TID is 0. We are here because the
-@@ -1283,50 +1380,45 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
+@@ -1284,50 +1381,45 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
wake_q_add(wake_q, p);
__unqueue_futex(q);
/*
@@ -12339,7 +12704,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
*/
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
-@@ -1335,6 +1427,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+@@ -1336,6 +1428,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
ret = -EFAULT;
@@ -12347,7 +12712,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
} else if (curval != uval) {
/*
* If a unconditional UNLOCK_PI operation (user space did not
-@@ -1347,10 +1440,14 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+@@ -1348,10 +1441,14 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
else
ret = -EINVAL;
}
@@ -12366,7 +12731,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
raw_spin_lock(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
-@@ -1363,22 +1460,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
+@@ -1364,22 +1461,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
pi_state->owner = new_owner;
raw_spin_unlock(&new_owner->pi_lock);
@@ -12395,7 +12760,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
}
/*
-@@ -1824,7 +1914,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+@@ -1825,7 +1915,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
* If that call succeeds then we have pi_state and an
* initial refcount on it.
*/
@@ -12404,7 +12769,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
}
switch (ret) {
-@@ -1907,7 +1997,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+@@ -1908,7 +1998,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
* refcount on the pi_state and store the pointer in
* the futex_q object of the waiter.
*/
@@ -12413,7 +12778,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
this->pi_state = pi_state;
ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
this->rt_waiter,
-@@ -1924,6 +2014,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+@@ -1925,6 +2015,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
@@ -12430,7 +12795,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
-@@ -2007,20 +2107,7 @@ queue_unlock(struct futex_hash_bucket *hb)
+@@ -2008,20 +2108,7 @@ queue_unlock(struct futex_hash_bucket *hb)
hb_waiters_dec(hb);
}
@@ -12452,7 +12817,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
{
int prio;
-@@ -2037,6 +2124,24 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
+@@ -2038,6 +2125,24 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
plist_node_init(&q->list, prio);
plist_add(&q->list, &hb->chain);
q->task = current;
@@ -12477,7 +12842,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
spin_unlock(&hb->lock);
}
-@@ -2123,10 +2228,13 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+@@ -2124,10 +2229,13 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
{
u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
@@ -12492,7 +12857,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
/* Owner died? */
if (!pi_state->owner)
newtid |= FUTEX_OWNER_DIED;
-@@ -2134,7 +2242,8 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+@@ -2135,7 +2243,8 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
/*
* We are here either because we stole the rtmutex from the
* previous highest priority waiter or we are the highest priority
@@ -12502,7 +12867,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
* We have to replace the newowner TID in the user space variable.
* This must be atomic as we have to preserve the owner died bit here.
*
-@@ -2142,17 +2251,16 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+@@ -2143,17 +2252,16 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
* because we can fault here. Imagine swapped out pages or a fork
* that marked all the anonymous memory readonly for cow.
*
@@ -12525,7 +12890,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
newval = (uval & FUTEX_OWNER_DIED) | newtid;
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
-@@ -2167,47 +2275,60 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+@@ -2168,47 +2276,60 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
* itself.
*/
if (pi_state->owner != NULL) {
@@ -12601,7 +12966,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
}
static long futex_wait_restart(struct restart_block *restart);
-@@ -2229,13 +2350,16 @@ static long futex_wait_restart(struct restart_block *restart);
+@@ -2230,57 +2351,32 @@ static long futex_wait_restart(struct restart_block *restart);
*/
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
{
@@ -12619,10 +12984,10 @@ index 4c6b6e697b73..d9bab63efccb 100644
*/
if (q->pi_state->owner != current)
ret = fixup_pi_state_owner(uaddr, q, current);
-@@ -2243,43 +2367,15 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
+ goto out;
}
- /*
+- /*
- * Catch the rare case, where the lock was released when we were on the
- * way back before we locked the hash bucket.
- */
@@ -12651,7 +13016,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
- goto out;
- }
-
-- /*
+ /*
* Paranoia check. If we did not take the lock, then we should not be
* the owner of the rt_mutex.
*/
@@ -12665,7 +13030,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
out:
return ret ? ret : locked;
-@@ -2503,6 +2599,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2504,6 +2600,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to = NULL;
@@ -12674,7 +13039,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -2555,25 +2653,77 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2556,24 +2654,76 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
}
}
@@ -12699,8 +13064,8 @@ index 4c6b6e697b73..d9bab63efccb 100644
/* Fixup the trylock return value: */
ret = ret ? 0 : -EWOULDBLOCK;
+ goto no_block;
- }
-
++ }
++
+ rt_mutex_init_waiter(&rt_waiter, false);
+
+ /*
@@ -12735,8 +13100,8 @@ index 4c6b6e697b73..d9bab63efccb 100644
+
+ spin_lock(q.lock_ptr);
+ goto no_block;
-+ }
-+
+ }
+
+
+ if (unlikely(to))
+ hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
@@ -12744,7 +13109,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
+ ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
+
spin_lock(q.lock_ptr);
- /*
++ /*
+ * If we failed to acquire the lock (signal/timeout), we must
+ * first acquire the hb->lock before removing the lock from the
+ * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
@@ -12757,11 +13122,10 @@ index 4c6b6e697b73..d9bab63efccb 100644
+ ret = 0;
+
+no_block:
-+ /*
+ /*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
- */
-@@ -2589,12 +2739,19 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2590,12 +2740,19 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
* If fixup_owner() faulted and was unable to handle the fault, unlock
* it and return the fault to userspace.
*/
@@ -12783,7 +13147,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
goto out_put_key;
out_unlock_put_key:
-@@ -2603,8 +2760,10 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2604,8 +2761,10 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
out_put_key:
put_futex_key(&q.key);
out:
@@ -12795,7 +13159,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
return ret != -EINTR ? ret : -ERESTARTNOINTR;
uaddr_faulted:
-@@ -2631,7 +2790,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+@@ -2632,7 +2791,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
union futex_key key = FUTEX_KEY_INIT;
struct futex_hash_bucket *hb;
@@ -12804,7 +13168,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
int ret;
retry:
-@@ -2655,12 +2814,48 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+@@ -2656,12 +2815,48 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* all and we at least want to know if user space fiddled
* with the futex value instead of blindly unlocking.
*/
@@ -12819,9 +13183,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
+ if (!pi_state)
+ goto out_unlock;
+
- /*
-- * In case of success wake_futex_pi dropped the hash
-- * bucket lock.
++ /*
+ * If current does not own the pi_state then the futex is
+ * inconsistent and user space fiddled with the futex value.
+ */
@@ -12853,12 +13215,14 @@ index 4c6b6e697b73..d9bab63efccb 100644
+
+ put_pi_state(pi_state);
+
-+ /*
+ /*
+- * In case of success wake_futex_pi dropped the hash
+- * bucket lock.
+ * Success, we're done! No tricky corner cases.
*/
if (!ret)
goto out_putkey;
-@@ -2675,7 +2870,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+@@ -2676,7 +2871,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* setting the FUTEX_WAITERS bit. Try again.
*/
if (ret == -EAGAIN) {
@@ -12866,7 +13230,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
put_futex_key(&key);
goto retry;
}
-@@ -2683,7 +2877,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+@@ -2684,7 +2878,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* wake_futex_pi has detected invalid state. Tell user
* space.
*/
@@ -12875,7 +13239,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
}
/*
-@@ -2693,8 +2887,10 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+@@ -2694,8 +2888,10 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* preserve the WAITERS bit not the OWNER_DIED one. We are the
* owner.
*/
@@ -12887,7 +13251,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
/*
* If uval has changed, let user space handle it.
-@@ -2708,7 +2904,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
+@@ -2709,7 +2905,6 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
return ret;
pi_faulted:
@@ -12895,7 +13259,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
put_futex_key(&key);
ret = fault_in_user_writeable(uaddr);
-@@ -2812,8 +3007,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2813,8 +3008,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
@@ -12906,7 +13270,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -2838,10 +3034,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2839,10 +3035,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -12918,7 +13282,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
-@@ -2872,20 +3065,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2873,20 +3066,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -12985,7 +13349,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -2894,16 +3122,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2895,16 +3123,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -13009,7 +13373,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
}
} else {
struct rt_mutex *pi_mutex;
-@@ -2915,10 +3146,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2916,10 +3147,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
*/
WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
@@ -13027,7 +13391,7 @@ index 4c6b6e697b73..d9bab63efccb 100644
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
-@@ -2936,13 +3171,20 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+@@ -2937,13 +3172,20 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* the fault, unlock the rt_mutex and return the fault to
* userspace.
*/
@@ -13073,7 +13437,7 @@ index d3f24905852c..f87aa8fdcc51 100644
if (!noirqdebug)
note_interrupt(desc, retval);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
-index 6b669593e7eb..e357bf6c59d5 100644
+index ea41820ab12e..5994867526f3 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -22,6 +22,7 @@
@@ -13196,7 +13560,7 @@ index 6b669593e7eb..e357bf6c59d5 100644
wake_threads_waitq(desc);
}
-@@ -1336,6 +1389,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+@@ -1338,6 +1391,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
@@ -13206,7 +13570,7 @@ index 6b669593e7eb..e357bf6c59d5 100644
/* Set default affinity mask once everything is setup */
setup_affinity(desc, mask);
-@@ -2061,7 +2117,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
+@@ -2063,7 +2119,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
* This call sets the internal irqchip state of an interrupt,
* depending on the value of @which.
*
@@ -13423,16 +13787,16 @@ index ee1bc1bb8feb..ddef07958840 100644
/* whether file capabilities are enabled */
static ssize_t fscaps_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
-@@ -225,6 +234,9 @@ static struct attribute * kernel_attrs[] = {
+@@ -224,6 +233,9 @@ static struct attribute * kernel_attrs[] = {
+ #ifndef CONFIG_TINY_RCU
&rcu_expedited_attr.attr,
&rcu_normal_attr.attr,
- #endif
++#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+ &realtime_attr.attr,
-+#endif
+ #endif
NULL
};
-
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 6f88e352cd4f..6ff9e8011dd0 100644
--- a/kernel/locking/Makefile
@@ -13470,7 +13834,7 @@ index 6f88e352cd4f..6ff9e8011dd0 100644
obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
-index 4d7ffc0a0d00..3d157b3128eb 100644
+index 6599c7f3071d..79f8e00e802e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -658,6 +658,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
@@ -13540,7 +13904,7 @@ index 4d7ffc0a0d00..3d157b3128eb 100644
return NULL;
}
-@@ -3410,7 +3423,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
+@@ -3417,7 +3430,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
* Clearly if the lock hasn't been acquired _ever_, we're not
* holding it either, so report failure.
*/
@@ -13549,7 +13913,7 @@ index 4d7ffc0a0d00..3d157b3128eb 100644
return 0;
/*
-@@ -3689,6 +3702,7 @@ static void check_flags(unsigned long flags)
+@@ -3696,6 +3709,7 @@ static void check_flags(unsigned long flags)
}
}
@@ -13557,7 +13921,7 @@ index 4d7ffc0a0d00..3d157b3128eb 100644
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -3703,6 +3717,7 @@ static void check_flags(unsigned long flags)
+@@ -3710,6 +3724,7 @@ static void check_flags(unsigned long flags)
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
@@ -13565,7 +13929,7 @@ index 4d7ffc0a0d00..3d157b3128eb 100644
if (!debug_locks)
print_irqtrace_events(current);
-@@ -4159,7 +4174,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
+@@ -4166,7 +4181,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
* If the class exists we look it up and zap it:
*/
class = look_up_lock_class(lock, j);
@@ -13575,7 +13939,7 @@ index 4d7ffc0a0d00..3d157b3128eb 100644
}
/*
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
-index f8c5af52a131..788068773e61 100644
+index d3de04b12f8c..0f49abeae337 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -26,7 +26,6 @@
@@ -13986,7 +14350,7 @@ index d0519c3432b6..b585af9a1b50 100644
extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index 2c49d76f96c3..218f1d26afe7 100644
+index 2c49d76f96c3..3a8b5d44aaf8 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -7,6 +7,11 @@
@@ -14001,15 +14365,16 @@ index 2c49d76f96c3..218f1d26afe7 100644
*
* See Documentation/locking/rt-mutex-design.txt for details.
*/
-@@ -16,6 +21,7 @@
+@@ -16,6 +21,8 @@
#include <linux/sched/rt.h>
#include <linux/sched/deadline.h>
#include <linux/timer.h>
+#include <linux/ww_mutex.h>
++#include <linux/blkdev.h>
#include "rtmutex_common.h"
-@@ -133,6 +139,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+@@ -133,6 +140,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
@@ -14022,36 +14387,20 @@ index 2c49d76f96c3..218f1d26afe7 100644
/*
* We can speed up the acquire/release, if there's no debugging state to be
* set up.
-@@ -222,12 +234,25 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+@@ -222,6 +235,12 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
}
#endif
-+#define STEAL_NORMAL 0
-+#define STEAL_LATERAL 1
+/*
+ * Only use with rt_mutex_waiter_{less,equal}()
+ */
-+#define task_to_waiter(p) \
-+ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
++#define task_to_waiter(p) &(struct rt_mutex_waiter) \
++ { .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) }
+
static inline int
rt_mutex_waiter_less(struct rt_mutex_waiter *left,
-- struct rt_mutex_waiter *right)
-+ struct rt_mutex_waiter *right, int mode)
- {
-- if (left->prio < right->prio)
-- return 1;
-+ if (mode == STEAL_NORMAL) {
-+ if (left->prio < right->prio)
-+ return 1;
-+ } else {
-+ if (left->prio <= right->prio)
-+ return 1;
-+ }
-
- /*
- * If both waiters have dl_prio(), we check the deadlines of the
-@@ -236,12 +261,30 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ struct rt_mutex_waiter *right)
+@@ -236,12 +255,51 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
* then right waiter has a dl_prio() too.
*/
if (dl_prio(left->prio))
@@ -14081,28 +14430,31 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ return 1;
+}
+
++#define STEAL_NORMAL 0
++#define STEAL_LATERAL 1
++
++static inline int
++rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode)
++{
++ struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
++
++ if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter))
++ return 1;
++
++ /*
++ * Note that RT tasks are excluded from lateral-steals
++ * to prevent the introduction of an unbounded latency.
++ */
++ if (mode == STEAL_NORMAL || rt_task(waiter->task))
++ return 0;
++
++ return rt_mutex_waiter_equal(waiter, top_waiter);
++}
++
static void
rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
{
-@@ -253,7 +296,7 @@ rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
-- if (rt_mutex_waiter_less(waiter, entry)) {
-+ if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
- link = &parent->rb_left;
- } else {
- link = &parent->rb_right;
-@@ -292,7 +335,7 @@ rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
- while (*link) {
- parent = *link;
- entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
-- if (rt_mutex_waiter_less(waiter, entry)) {
-+ if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) {
- link = &parent->rb_left;
- } else {
- link = &parent->rb_right;
-@@ -320,72 +363,16 @@ rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
+@@ -320,72 +378,16 @@ rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
RB_CLEAR_NODE(&waiter->pi_tree_entry);
}
@@ -14113,24 +14465,19 @@ index 2c49d76f96c3..218f1d26afe7 100644
- * the waiter is not allowed to do priority boosting
- */
-int rt_mutex_getprio(struct task_struct *task)
-+static void rt_mutex_adjust_prio(struct task_struct *p)
- {
+-{
- if (likely(!task_has_pi_waiters(task)))
- return task->normal_prio;
-+ struct task_struct *pi_task = NULL;
-
+-
- return min(task_top_pi_waiter(task)->prio,
- task->normal_prio);
-}
-+ lockdep_assert_held(&p->pi_lock);
-
+-
-struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
-{
- if (likely(!task_has_pi_waiters(task)))
- return NULL;
-+ if (task_has_pi_waiters(p))
-+ pi_task = task_top_pi_waiter(p)->task;
-
+-
- return task_top_pi_waiter(task)->task;
-}
-
@@ -14154,13 +14501,16 @@ index 2c49d76f96c3..218f1d26afe7 100644
- * This can be both boosting and unboosting. task->pi_lock must be held.
- */
-static void __rt_mutex_adjust_prio(struct task_struct *task)
--{
++static void rt_mutex_adjust_prio(struct task_struct *p)
+ {
- int prio = rt_mutex_getprio(task);
--
++ struct task_struct *pi_task = NULL;
+
- if (task->prio != prio || dl_prio(prio))
- rt_mutex_setprio(task, prio);
-}
--
++ lockdep_assert_held(&p->pi_lock);
+
-/*
- * Adjust task priority (undo boosting). Called from the exit path of
- * rt_mutex_slowunlock() and rt_mutex_slowlock().
@@ -14173,7 +14523,9 @@ index 2c49d76f96c3..218f1d26afe7 100644
-void rt_mutex_adjust_prio(struct task_struct *task)
-{
- unsigned long flags;
--
++ if (task_has_pi_waiters(p))
++ pi_task = task_top_pi_waiter(p)->task;
+
- raw_spin_lock_irqsave(&task->pi_lock, flags);
- __rt_mutex_adjust_prio(task);
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
@@ -14181,7 +14533,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
}
/*
-@@ -414,6 +401,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
+@@ -414,6 +416,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
return debug_rt_mutex_detect_deadlock(waiter, chwalk);
}
@@ -14196,7 +14548,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
/*
* Max number of times we'll walk the boosting chain:
*/
-@@ -421,7 +416,8 @@ int max_lock_depth = 1024;
+@@ -421,7 +431,8 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
@@ -14206,7 +14558,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
}
/*
-@@ -557,7 +553,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+@@ -557,7 +568,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* reached or the state of the chain has changed while we
* dropped the locks.
*/
@@ -14215,7 +14567,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
goto out_unlock_pi;
/*
-@@ -608,7 +604,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+@@ -608,7 +619,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* enabled we continue, but stop the requeueing in the chain
* walk.
*/
@@ -14224,7 +14576,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
if (!detect_deadlock)
goto out_unlock_pi;
else
-@@ -704,7 +700,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+@@ -704,7 +715,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/* [7] Requeue the waiter in the lock waiter tree. */
rt_mutex_dequeue(lock, waiter);
@@ -14251,7 +14603,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
rt_mutex_enqueue(lock, waiter);
/* [8] Release the task */
-@@ -719,13 +734,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+@@ -719,13 +749,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* follow here. This is the end of the chain we are walking.
*/
if (!rt_mutex_owner(lock)) {
@@ -14270,7 +14622,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
raw_spin_unlock_irq(&lock->wait_lock);
return 0;
}
-@@ -745,7 +763,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+@@ -745,7 +778,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
*/
rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
rt_mutex_enqueue_pi(task, waiter);
@@ -14279,7 +14631,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
} else if (prerequeue_top_waiter == waiter) {
/*
-@@ -761,7 +779,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+@@ -761,7 +794,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
rt_mutex_dequeue_pi(task, waiter);
waiter = rt_mutex_top_waiter(lock);
rt_mutex_enqueue_pi(task, waiter);
@@ -14288,7 +14640,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
} else {
/*
* Nothing changed. No need to do any priority
-@@ -818,6 +836,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+@@ -818,6 +851,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
return ret;
}
@@ -14296,9 +14648,11 @@ index 2c49d76f96c3..218f1d26afe7 100644
/*
* Try to take an rt-mutex
*
-@@ -828,9 +847,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+@@ -827,10 +861,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ * @task: The task which wants to acquire the lock
* @waiter: The waiter that is queued to the lock's wait tree if the
* callsite called task_blocked_on_lock(), otherwise NULL
++ * @mode: Lock steal mode (STEAL_NORMAL, STEAL_LATERAL)
*/
-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- struct rt_mutex_waiter *waiter)
@@ -14311,48 +14665,41 @@ index 2c49d76f96c3..218f1d26afe7 100644
/*
* Before testing whether we can acquire @lock, we set the
* RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
-@@ -866,8 +888,10 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- * If waiter is not the highest priority waiter of
- * @lock, give up.
+@@ -863,12 +901,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ */
+ if (waiter) {
+ /*
+- * If waiter is not the highest priority waiter of
+- * @lock, give up.
++ * If waiter is not the highest priority waiter of @lock,
++ * or its peer when lateral steal is allowed, give up.
*/
- if (waiter != rt_mutex_top_waiter(lock))
-+ if (waiter != rt_mutex_top_waiter(lock)) {
-+ /* XXX rt_mutex_waiter_less() ? */
++ if (!rt_mutex_steal(lock, waiter, mode))
return 0;
-+ }
-
+-
/*
* We can acquire the lock. Remove the waiter from the
-@@ -885,14 +909,26 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
- * not need to be dequeued.
+ * lock waiters tree.
+@@ -886,13 +923,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
*/
if (rt_mutex_has_waiters(lock)) {
-+ struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
-+
-+ if (task != pown)
-+ return 0;
-+
-+ /*
-+ * Note that RT tasks are excluded from lateral-steals
-+ * to prevent the introduction of an unbounded latency.
-+ */
-+ if (rt_task(task))
-+ mode = STEAL_NORMAL;
/*
- * If @task->prio is greater than or equal to
- * the top waiter priority (kernel view),
- * @task lost.
+- * If @task->prio is greater than or equal to
+- * the top waiter priority (kernel view),
+- * @task lost.
++ * If @task->prio is greater than the top waiter
++ * priority (kernel view), or equal to it when a
++ * lateral steal is forbidden, @task lost.
*/
- if (task->prio >= rt_mutex_top_waiter(lock)->prio)
-+ if (!rt_mutex_waiter_less(task_to_waiter(task),
-+ rt_mutex_top_waiter(lock),
-+ mode))
++ if (!rt_mutex_steal(lock, task_to_waiter(task), mode))
return 0;
-
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
-@@ -936,11 +972,384 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+@@ -936,177 +972,589 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
*/
rt_mutex_set_owner(lock, task);
@@ -14362,25 +14709,58 @@ index 2c49d76f96c3..218f1d26afe7 100644
}
+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
+ /*
+- * Task blocks on lock.
+- *
+- * Prepare waiter and propagate pi chain
+- *
+- * This must be called with lock->wait_lock held and interrupts disabled
+ * preemptible spin_lock functions:
-+ */
+ */
+-static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+- struct rt_mutex_waiter *waiter,
+- struct task_struct *task,
+- enum rtmutex_chainwalk chwalk)
+static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
+ void (*slowfn)(struct rt_mutex *lock,
+ bool mg_off),
+ bool do_mig_dis)
-+{
+ {
+- struct task_struct *owner = rt_mutex_owner(lock);
+- struct rt_mutex_waiter *top_waiter = waiter;
+- struct rt_mutex *next_lock;
+- int chain_walk = 0, res;
+ might_sleep_no_state_check();
-+
+
+- /*
+- * Early deadlock detection. We really don't want the task to
+- * enqueue on itself just to untangle the mess later. It's not
+- * only an optimization. We drop the locks, so another waiter
+- * can come in before the chain walk detects the deadlock. So
+- * the other will detect the deadlock and return -EDEADLOCK,
+- * which is wrong, as the other waiter is not in a deadlock
+- * situation.
+- */
+- if (owner == task)
+- return -EDEADLK;
+ if (do_mig_dis)
+ migrate_disable();
-+
+
+- raw_spin_lock(&task->pi_lock);
+- __rt_mutex_adjust_prio(task);
+- waiter->task = task;
+- waiter->lock = lock;
+- waiter->prio = task->prio;
+ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+ return;
+ else
+ slowfn(lock, do_mig_dis);
+}
-+
+
+- /* Get the top priority waiter on the lock */
+- if (rt_mutex_has_waiters(lock))
+- top_waiter = rt_mutex_top_waiter(lock);
+- rt_mutex_enqueue(lock, waiter);
+static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
+ void (*slowfn)(struct rt_mutex *lock))
+{
@@ -14398,7 +14778,8 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ struct task_struct *owner)
+{
+ int res = 0;
-+
+
+- task->pi_blocked_on = waiter;
+ rcu_read_lock();
+ for (;;) {
+ if (owner != rt_mutex_owner(lock))
@@ -14424,7 +14805,8 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ return 1;
+}
+#endif
-+
+
+- raw_spin_unlock(&task->pi_lock);
+static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+ struct task_struct *task,
@@ -14443,44 +14825,85 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ struct rt_mutex_waiter waiter, *top_waiter;
+ unsigned long flags;
+ int ret;
-+
+
+- if (!owner)
+- return 0;
+ rt_mutex_init_waiter(&waiter, true);
-+
+
+- raw_spin_lock(&owner->pi_lock);
+- if (waiter == rt_mutex_top_waiter(lock)) {
+- rt_mutex_dequeue_pi(owner, top_waiter);
+- rt_mutex_enqueue_pi(owner, waiter);
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
-+
+
+- __rt_mutex_adjust_prio(owner);
+- if (owner->pi_blocked_on)
+- chain_walk = 1;
+- } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
+- chain_walk = 1;
+ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ return;
-+ }
-+
+ }
+
+- /* Store the lock on which owner is blocked or NULL */
+- next_lock = task_blocked_on_lock(owner);
+ BUG_ON(rt_mutex_owner(lock) == self);
-+
-+ /*
+
+- raw_spin_unlock(&owner->pi_lock);
+ /*
+- * Even if full deadlock detection is on, if the owner is not
+- * blocked itself, we can avoid finding this out in the chain
+- * walk.
+ * We save whatever state the task is in and we'll restore it
+ * after acquiring the lock taking real wakeups into account
+ * as well. We are serialized via pi_lock against wakeups. See
+ * try_to_wake_up().
-+ */
+ */
+- if (!chain_walk || !next_lock)
+- return 0;
+ raw_spin_lock(&self->pi_lock);
+ self->saved_state = self->state;
+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
+ raw_spin_unlock(&self->pi_lock);
-+
+
+- /*
+- * The owner can't disappear while holding a lock,
+- * so the owner struct is protected by wait_lock.
+- * Gets dropped in rt_mutex_adjust_prio_chain()!
+- */
+- get_task_struct(owner);
+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
+ BUG_ON(ret);
-+
+
+- raw_spin_unlock_irq(&lock->wait_lock);
+ for (;;) {
+ /* Try to acquire the lock again. */
+ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
+ break;
-+
+
+- res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
+- next_lock, waiter, task);
+ top_waiter = rt_mutex_top_waiter(lock);
+ lock_owner = rt_mutex_owner(lock);
-+
+
+- raw_spin_lock_irq(&lock->wait_lock);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-+
+
+- return res;
+-}
+ debug_rt_mutex_print_deadlock(&waiter);
-+
+
+-/*
+- * Remove the top waiter from the current tasks pi waiter tree and
+- * queue it up.
+- *
+- * Called with lock->wait_lock held and interrupts disabled.
+- */
+-static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
+- struct rt_mutex *lock)
+-{
+- struct rt_mutex_waiter *waiter;
+ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) {
+ if (mg_off)
+ migrate_enable();
@@ -14488,87 +14911,129 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ if (mg_off)
+ migrate_disable();
+ }
-+
+
+- raw_spin_lock(&current->pi_lock);
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
-+
+
+- waiter = rt_mutex_top_waiter(lock);
+ raw_spin_lock(&self->pi_lock);
+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
+ raw_spin_unlock(&self->pi_lock);
+ }
-+
-+ /*
+
+ /*
+- * Remove it from current->pi_waiters. We do not adjust a
+- * possible priority boost right now. We execute wakeup in the
+- * boosted mode and go back to normal after releasing
+- * lock->wait_lock.
+ * Restore the task state to current->saved_state. We set it
+ * to the original state above and the try_to_wake_up() code
+ * has possibly updated it when a real (non-rtmutex) wakeup
+ * happened while we were blocked. Clear saved_state so
+ * try_to_wakeup() does not get confused.
-+ */
+ */
+- rt_mutex_dequeue_pi(current, waiter);
+ raw_spin_lock(&self->pi_lock);
+ __set_current_state_no_track(self->saved_state);
+ self->saved_state = TASK_RUNNING;
+ raw_spin_unlock(&self->pi_lock);
-+
-+ /*
+
+ /*
+- * As we are waking up the top waiter, and the waiter stays
+- * queued on the lock until it gets the lock, this lock
+- * obviously has waiters. Just set the bit here and this has
+- * the added benefit of forcing all new tasks into the
+- * slow path making sure no task of lower priority than
+- * the top waiter can steal this lock.
+ * try_to_take_rt_mutex() sets the waiter bit
+ * unconditionally. We might have to fix that up:
-+ */
+ */
+- lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
+ fixup_rt_mutex_waiters(lock);
-+
+
+- raw_spin_unlock(&current->pi_lock);
+ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
+ BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
-+
+
+- wake_q_add(wake_q, waiter->task);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+ debug_rt_mutex_free_waiter(&waiter);
-+}
-+
+ }
+
+static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
+ struct wake_q_head *wake_q,
+ struct wake_q_head *wq_sleeper);
-+/*
+ /*
+- * Remove a waiter from a lock and give up
+- *
+- * Must be called with lock->wait_lock held and interrupts disabled. I must
+- * have just failed to try_to_take_rt_mutex().
+ * Slow path to release a rt_mutex spin_lock style
-+ */
+ */
+-static void remove_waiter(struct rt_mutex *lock,
+- struct rt_mutex_waiter *waiter)
+static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
-+{
+ {
+- bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
+- struct task_struct *owner = rt_mutex_owner(lock);
+- struct rt_mutex *next_lock;
+ unsigned long flags;
+ WAKE_Q(wake_q);
+ WAKE_Q(wake_sleeper_q);
+ bool postunlock;
-+
+
+- raw_spin_lock(&current->pi_lock);
+- rt_mutex_dequeue(lock, waiter);
+- current->pi_blocked_on = NULL;
+- raw_spin_unlock(&current->pi_lock);
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+ postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-+
+
+- /*
+- * Only update priority if the waiter was the highest priority
+- * waiter of the lock and there is an owner to update.
+- */
+- if (!owner || !is_top_waiter)
+- return;
+ if (postunlock)
+ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
+}
-+
+
+- raw_spin_lock(&owner->pi_lock);
+void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
+{
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false);
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+}
+EXPORT_SYMBOL(rt_spin_lock__no_mg);
-+
+
+- rt_mutex_dequeue_pi(owner, waiter);
+void __lockfunc rt_spin_lock(spinlock_t *lock)
+{
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+}
+EXPORT_SYMBOL(rt_spin_lock);
-+
+
+- if (rt_mutex_has_waiters(lock))
+- rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
+void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
+{
+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true);
+}
+EXPORT_SYMBOL(__rt_spin_lock);
-+
+
+- __rt_mutex_adjust_prio(owner);
+void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
+{
+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false);
+}
+EXPORT_SYMBOL(__rt_spin_lock__no_mg);
-+
+
+- /* Store the lock on which owner is blocked or NULL */
+- next_lock = task_blocked_on_lock(owner);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
+{
@@ -14577,7 +15042,8 @@ index 2c49d76f96c3..218f1d26afe7 100644
+}
+EXPORT_SYMBOL(rt_spin_lock_nested);
+#endif
-+
+
+- raw_spin_unlock(&owner->pi_lock);
+void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock)
+{
+ /* NOTE: we always pass in '1' for nested, for simplicity */
@@ -14585,7 +15051,8 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
+}
+EXPORT_SYMBOL(rt_spin_unlock__no_mg);
-+
+
+- /*
+void __lockfunc rt_spin_unlock(spinlock_t *lock)
+{
+ /* NOTE: we always pass in '1' for nested, for simplicity */
@@ -14736,23 +15203,38 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
+}
+
- /*
- * Task blocks on lock.
- *
-@@ -958,6 +1367,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
- struct rt_mutex *next_lock;
- int chain_walk = 0, res;
-
++/*
++ * Task blocks on lock.
++ *
++ * Prepare waiter and propagate pi chain
++ *
++ * This must be called with lock->wait_lock held and interrupts disabled
++ */
++static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
++ struct rt_mutex_waiter *waiter,
++ struct task_struct *task,
++ enum rtmutex_chainwalk chwalk)
++{
++ struct task_struct *owner = rt_mutex_owner(lock);
++ struct rt_mutex_waiter *top_waiter = waiter;
++ struct rt_mutex *next_lock;
++ int chain_walk = 0, res;
++
+ lockdep_assert_held(&lock->wait_lock);
+
- /*
- * Early deadlock detection. We really don't want the task to
- * enqueue on itself just to untangle the mess later. It's not
-@@ -971,10 +1382,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
- return -EDEADLK;
-
- raw_spin_lock(&task->pi_lock);
-- __rt_mutex_adjust_prio(task);
++ /*
++ * Early deadlock detection. We really don't want the task to
++ * enqueue on itself just to untangle the mess later. It's not
++ * only an optimization. We drop the locks, so another waiter
++ * can come in before the chain walk detects the deadlock. So
++ * the other will detect the deadlock and return -EDEADLOCK,
++ * which is wrong, as the other waiter is not in a deadlock
++ * situation.
++ */
++ if (owner == task)
++ return -EDEADLK;
++
++ raw_spin_lock(&task->pi_lock);
+
+ /*
+ * In the case of futex requeue PI, this will be a proxy
@@ -14771,55 +15253,100 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
+
+ rt_mutex_adjust_prio(task);
- waiter->task = task;
- waiter->lock = lock;
- waiter->prio = task->prio;
++ waiter->task = task;
++ waiter->lock = lock;
++ waiter->prio = task->prio;
+ waiter->deadline = task->dl.deadline;
-
- /* Get the top priority waiter on the lock */
- if (rt_mutex_has_waiters(lock))
-@@ -993,8 +1422,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
- rt_mutex_dequeue_pi(owner, top_waiter);
- rt_mutex_enqueue_pi(owner, waiter);
-
-- __rt_mutex_adjust_prio(owner);
-- if (owner->pi_blocked_on)
++
++ /* Get the top priority waiter on the lock */
++ if (rt_mutex_has_waiters(lock))
++ top_waiter = rt_mutex_top_waiter(lock);
++ rt_mutex_enqueue(lock, waiter);
++
++ task->pi_blocked_on = waiter;
++
++ raw_spin_unlock(&task->pi_lock);
++
++ if (!owner)
++ return 0;
++
++ raw_spin_lock(&owner->pi_lock);
++ if (waiter == rt_mutex_top_waiter(lock)) {
++ rt_mutex_dequeue_pi(owner, top_waiter);
++ rt_mutex_enqueue_pi(owner, waiter);
++
+ rt_mutex_adjust_prio(owner);
+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
- chain_walk = 1;
- } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
- chain_walk = 1;
-@@ -1036,6 +1465,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
- * Called with lock->wait_lock held and interrupts disabled.
- */
- static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
++ chain_walk = 1;
++ } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
++ chain_walk = 1;
++ }
++
++ /* Store the lock on which owner is blocked or NULL */
++ next_lock = task_blocked_on_lock(owner);
++
++ raw_spin_unlock(&owner->pi_lock);
++ /*
++ * Even if full deadlock detection is on, if the owner is not
++ * blocked itself, we can avoid finding this out in the chain
++ * walk.
++ */
++ if (!chain_walk || !next_lock)
++ return 0;
++
++ /*
++ * The owner can't disappear while holding a lock,
++ * so the owner struct is protected by wait_lock.
++ * Gets dropped in rt_mutex_adjust_prio_chain()!
++ */
++ get_task_struct(owner);
++
++ raw_spin_unlock_irq(&lock->wait_lock);
++
++ res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
++ next_lock, waiter, task);
++
++ raw_spin_lock_irq(&lock->wait_lock);
++
++ return res;
++}
++
++/*
++ * Remove the top waiter from the current tasks pi waiter tree and
++ * queue it up.
++ *
++ * Called with lock->wait_lock held and interrupts disabled.
++ */
++static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
+ struct wake_q_head *wake_sleeper_q,
- struct rt_mutex *lock)
- {
- struct rt_mutex_waiter *waiter;
-@@ -1045,12 +1475,14 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
- waiter = rt_mutex_top_waiter(lock);
-
- /*
-- * Remove it from current->pi_waiters. We do not adjust a
-- * possible priority boost right now. We execute wakeup in the
-- * boosted mode and go back to normal after releasing
-- * lock->wait_lock.
++ struct rt_mutex *lock)
++{
++ struct rt_mutex_waiter *waiter;
++
++ raw_spin_lock(&current->pi_lock);
++
++ waiter = rt_mutex_top_waiter(lock);
++
++ /*
+ * Remove it from current->pi_waiters and deboost.
+ *
+ * We must in fact deboost here in order to ensure we call
+ * rt_mutex_setprio() to update p->pi_top_task before the
+ * task unblocks.
- */
- rt_mutex_dequeue_pi(current, waiter);
++ */
++ rt_mutex_dequeue_pi(current, waiter);
+ rt_mutex_adjust_prio(current);
-
- /*
- * As we are waking up the top waiter, and the waiter stays
-@@ -1062,9 +1494,22 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
- */
- lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
-
++
++ /*
++ * As we are waking up the top waiter, and the waiter stays
++ * queued on the lock until it gets the lock, this lock
++ * obviously has waiters. Just set the bit here and this has
++ * the added benefit of forcing all new tasks into the
++ * slow path making sure no task of lower priority than
++ * the top waiter can steal this lock.
++ */
++ lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
++
+ /*
+ * We deboosted before waking the top waiter task such that we don't
+ * run two tasks with the 'same' priority (and ensure the
@@ -14832,40 +15359,58 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ */
+ preempt_disable();
+ if (waiter->savestate)
-+ wake_q_add(wake_sleeper_q, waiter->task);
++ wake_q_add_sleeper(wake_sleeper_q, waiter->task);
+ else
+ wake_q_add(wake_q, waiter->task);
- raw_spin_unlock(&current->pi_lock);
--
-- wake_q_add(wake_q, waiter->task);
- }
-
- /*
-@@ -1078,7 +1523,9 @@ static void remove_waiter(struct rt_mutex *lock,
- {
- bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
- struct task_struct *owner = rt_mutex_owner(lock);
-- struct rt_mutex *next_lock;
++ raw_spin_unlock(&current->pi_lock);
++}
++
++/*
++ * Remove a waiter from a lock and give up
++ *
++ * Must be called with lock->wait_lock held and interrupts disabled. I must
++ * have just failed to try_to_take_rt_mutex().
++ */
++static void remove_waiter(struct rt_mutex *lock,
++ struct rt_mutex_waiter *waiter)
++{
++ bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
++ struct task_struct *owner = rt_mutex_owner(lock);
+ struct rt_mutex *next_lock = NULL;
+
+ lockdep_assert_held(&lock->wait_lock);
-
- raw_spin_lock(&current->pi_lock);
- rt_mutex_dequeue(lock, waiter);
-@@ -1099,10 +1546,11 @@ static void remove_waiter(struct rt_mutex *lock,
- if (rt_mutex_has_waiters(lock))
- rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
-
-- __rt_mutex_adjust_prio(owner);
++
++ raw_spin_lock(&current->pi_lock);
++ rt_mutex_dequeue(lock, waiter);
++ current->pi_blocked_on = NULL;
++ raw_spin_unlock(&current->pi_lock);
++
++ /*
++ * Only update priority if the waiter was the highest priority
++ * waiter of the lock and there is an owner to update.
++ */
++ if (!owner || !is_top_waiter)
++ return;
++
++ raw_spin_lock(&owner->pi_lock);
++
++ rt_mutex_dequeue_pi(owner, waiter);
++
++ if (rt_mutex_has_waiters(lock))
++ rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
++
+ rt_mutex_adjust_prio(owner);
-
- /* Store the lock on which owner is blocked or NULL */
-- next_lock = task_blocked_on_lock(owner);
++
++ /* Store the lock on which owner is blocked or NULL */
+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ next_lock = task_blocked_on_lock(owner);
-
- raw_spin_unlock(&owner->pi_lock);
-
++
++ raw_spin_unlock(&owner->pi_lock);
++
++ /*
+ * Don't walk the chain, if the owner task is not blocked
+ * itself.
+ */
@@ -1138,21 +1586,30 @@ void rt_mutex_adjust_pi(struct task_struct *task)
raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -14938,10 +15483,17 @@ index 2c49d76f96c3..218f1d26afe7 100644
if (ret)
break;
}
-@@ -1223,21 +1682,148 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+@@ -1223,35 +1682,94 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
}
}
+-/*
+- * Slow path lock function:
+- */
+-static int __sched
+-rt_mutex_slowlock(struct rt_mutex *lock, int state,
+- struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk)
+static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
+ struct ww_acquire_ctx *ww_ctx)
+{
@@ -14985,21 +15537,34 @@ index 2c49d76f96c3..218f1d26afe7 100644
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void ww_mutex_account_lock(struct rt_mutex *lock,
+ struct ww_acquire_ctx *ww_ctx)
-+{
+ {
+- struct rt_mutex_waiter waiter;
+- unsigned long flags;
+- int ret = 0;
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
+ struct rt_mutex_waiter *waiter, *n;
-+
+
+- debug_rt_mutex_init_waiter(&waiter);
+- RB_CLEAR_NODE(&waiter.pi_tree_entry);
+- RB_CLEAR_NODE(&waiter.tree_entry);
+ /*
+ * This branch gets optimized out for the common case,
+ * and is only important for ww_mutex_lock.
+ */
+ ww_mutex_lock_acquired(ww, ww_ctx);
+ ww->ctx = ww_ctx;
-+
-+ /*
+
+ /*
+- * Technically we could use raw_spin_[un]lock_irq() here, but this can
+- * be called in early boot if the cmpxchg() fast path is disabled
+- * (debug, no architecture support). In this case we will acquire the
+- * rtmutex with lock->wait_lock held. But we cannot unconditionally
+- * enable interrupts in that early boot case. So we need to use the
+- * irqsave/restore variants.
+ * Give any possible sleeping processes the chance to wake up,
+ * so they can recheck if they have to back off.
-+ */
+ */
+- raw_spin_lock_irqsave(&lock->wait_lock, flags);
+ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
+ tree_entry) {
+ /* XXX debug rt mutex waiter wakeup */
@@ -15025,24 +15590,26 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ struct rt_mutex_waiter *waiter)
+{
+ int ret;
-+
-+ /* Try to acquire the lock again: */
-+ if (try_to_take_rt_mutex(lock, current, NULL)) {
+
+ /* Try to acquire the lock again: */
+ if (try_to_take_rt_mutex(lock, current, NULL)) {
+- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ if (ww_ctx)
+ ww_mutex_account_lock(lock, ww_ctx);
-+ return 0;
-+ }
-+
-+ set_current_state(state);
-+
-+ /* Setup the timer, when timeout != NULL */
-+ if (unlikely(timeout))
-+ hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
-+
+ return 0;
+ }
+
+@@ -1261,17 +1779,27 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ if (unlikely(timeout))
+ hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
+
+- ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
+ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
-+
+
+- if (likely(!ret))
+ if (likely(!ret)) {
-+ /* sleep on the mutex */
+ /* sleep on the mutex */
+- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
+ ret = __rt_mutex_slowlock(lock, state, timeout, waiter,
+ ww_ctx);
+ } else if (ww_ctx) {
@@ -15050,81 +15617,53 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ ret = __mutex_lock_check_stamp(lock, ww_ctx);
+ BUG_ON(!ret);
+ }
-+
-+ if (unlikely(ret)) {
-+ __set_current_state(TASK_RUNNING);
-+ if (rt_mutex_has_waiters(lock))
+
+ if (unlikely(ret)) {
+ __set_current_state(TASK_RUNNING);
+ if (rt_mutex_has_waiters(lock))
+- remove_waiter(lock, &waiter);
+- rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+ remove_waiter(lock, waiter);
+ /* ww_mutex want to report EDEADLK/EALREADY, let them */
+ if (!ww_ctx)
+ rt_mutex_handle_deadlock(ret, chwalk, waiter);
+ } else if (ww_ctx) {
+ ww_mutex_account_lock(lock, ww_ctx);
-+ }
-+
-+ /*
-+ * try_to_take_rt_mutex() sets the waiter bit
-+ * unconditionally. We might have to fix that up.
-+ */
-+ fixup_rt_mutex_waiters(lock);
+ }
+
+ /*
+@@ -1279,6 +1807,36 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ * unconditionally. We might have to fix that up.
+ */
+ fixup_rt_mutex_waiters(lock);
+ return ret;
+}
+
- /*
- * Slow path lock function:
- */
- static int __sched
- rt_mutex_slowlock(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
-- enum rtmutex_chainwalk chwalk)
++/*
++ * Slow path lock function:
++ */
++static int __sched
++rt_mutex_slowlock(struct rt_mutex *lock, int state,
++ struct hrtimer_sleeper *timeout,
+ enum rtmutex_chainwalk chwalk,
+ struct ww_acquire_ctx *ww_ctx)
- {
- struct rt_mutex_waiter waiter;
- unsigned long flags;
- int ret = 0;
-
-- debug_rt_mutex_init_waiter(&waiter);
-- RB_CLEAR_NODE(&waiter.pi_tree_entry);
-- RB_CLEAR_NODE(&waiter.tree_entry);
++{
++ struct rt_mutex_waiter waiter;
++ unsigned long flags;
++ int ret = 0;
++
+ rt_mutex_init_waiter(&waiter, false);
-
- /*
- * Technically we could use raw_spin_[un]lock_irq() here, but this can
-@@ -1249,36 +1835,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
- */
- raw_spin_lock_irqsave(&lock->wait_lock, flags);
-
-- /* Try to acquire the lock again: */
-- if (try_to_take_rt_mutex(lock, current, NULL)) {
-- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-- return 0;
-- }
--
-- set_current_state(state);
--
-- /* Setup the timer, when timeout != NULL */
-- if (unlikely(timeout))
-- hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
--
-- ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
--
-- if (likely(!ret))
-- /* sleep on the mutex */
-- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
--
-- if (unlikely(ret)) {
-- __set_current_state(TASK_RUNNING);
-- if (rt_mutex_has_waiters(lock))
-- remove_waiter(lock, &waiter);
-- rt_mutex_handle_deadlock(ret, chwalk, &waiter);
-- }
--
-- /*
-- * try_to_take_rt_mutex() sets the waiter bit
-- * unconditionally. We might have to fix that up.
-- */
-- fixup_rt_mutex_waiters(lock);
++
++ /*
++ * Technically we could use raw_spin_[un]lock_irq() here, but this can
++ * be called in early boot if the cmpxchg() fast path is disabled
++ * (debug, no architecture support). In this case we will acquire the
++ * rtmutex with lock->wait_lock held. But we cannot unconditionally
++ * enable interrupts in that early boot case. So we need to use the
++ * irqsave/restore variants.
++ */
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
++
+ ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx,
+ &waiter);
@@ -15169,7 +15708,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
}
/*
-@@ -1403,63 +1959,85 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+@@ -1403,63 +1959,97 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -15187,6 +15726,15 @@ index 2c49d76f96c3..218f1d26afe7 100644
- } else
- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
+
++ /*
++ * If rt_mutex blocks, the function sched_submit_work will not call
++ * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true).
++ * We must call blk_schedule_flush_plug here, if we don't call it,
++ * a deadlock in device mapper may happen.
++ */
++ if (unlikely(blk_needs_flush_plug(current)))
++ blk_schedule_flush_plug(current);
++
+ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
}
@@ -15209,6 +15757,9 @@ index 2c49d76f96c3..218f1d26afe7 100644
- } else
- return slowfn(lock, state, timeout, chwalk);
+
++ if (unlikely(blk_needs_flush_plug(current)))
++ blk_schedule_flush_plug(current);
++
+ return slowfn(lock, state, timeout, chwalk, ww_ctx);
}
@@ -15278,7 +15829,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
}
/**
-@@ -1469,15 +2047,13 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
+@@ -1469,15 +2059,13 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
*/
void __sched rt_mutex_lock(struct rt_mutex *lock)
{
@@ -15296,7 +15847,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
* @lock: the rt_mutex to be locked
*
* Returns:
-@@ -1486,23 +2062,32 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+@@ -1486,23 +2074,32 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
@@ -15340,7 +15891,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
}
/**
-@@ -1525,6 +2110,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
+@@ -1525,6 +2122,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
@@ -15348,7 +15899,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -1542,7 +2128,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+@@ -1542,7 +2140,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
*/
int __sched rt_mutex_trylock(struct rt_mutex *lock)
{
@@ -15360,26 +15911,14 @@ index 2c49d76f96c3..218f1d26afe7 100644
return 0;
return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
-@@ -1560,21 +2150,53 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
+@@ -1560,21 +2162,53 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
--/**
-- * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
-- * @lock: the rt_mutex to be unlocked
-- *
-- * Returns: true/false indicating whether priority adjustment is
-- * required or not.
-- */
--bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
-- struct wake_q_head *wqh)
+static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
+ struct wake_q_head *wake_q,
+ struct wake_q_head *wq_sleeper)
- {
-- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
-- rt_mutex_deadlock_account_unlock(current);
-- return false;
++{
+ lockdep_assert_held(&lock->wait_lock);
+
+ debug_rt_mutex_unlock(lock);
@@ -15387,8 +15926,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ if (!rt_mutex_has_waiters(lock)) {
+ lock->owner = NULL;
+ return false; /* done */
- }
-- return rt_mutex_slowunlock(lock, wqh);
++ }
+
+ /*
+ * We've already deboosted, mark_wakeup_next_waiter() will
@@ -15401,14 +15939,26 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ return true; /* call postunlock() */
+}
+
-+/**
+ /**
+- * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
+- * @lock: the rt_mutex to be unlocked
+- *
+- * Returns: true/false indicating whether priority adjustment is
+- * required or not.
+ * Futex variant, that since futex variants do not use the fast-path, can be
+ * simple and will not need to retry.
-+ */
+ */
+-bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
+- struct wake_q_head *wqh)
+bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
+ struct wake_q_head *wake_q,
+ struct wake_q_head *wq_sleeper)
-+{
+ {
+- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
+- rt_mutex_deadlock_account_unlock(current);
+- return false;
+- }
+- return rt_mutex_slowunlock(lock, wqh);
+ return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper);
+}
+
@@ -15427,7 +15977,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
}
/**
-@@ -1607,13 +2229,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+@@ -1607,13 +2241,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
@@ -15442,7 +15992,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1628,10 +2249,9 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+@@ -1628,10 +2261,9 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
@@ -15454,21 +16004,39 @@ index 2c49d76f96c3..218f1d26afe7 100644
}
/**
-@@ -1647,7 +2267,66 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+@@ -1647,34 +2279,44 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
{
debug_rt_mutex_proxy_unlock(lock);
rt_mutex_set_owner(lock, NULL);
- rt_mutex_deadlock_account_unlock(proxy_owner);
-+}
-+
+ }
+
+-/**
+- * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+- * @lock: the rt_mutex to take
+- * @waiter: the pre-initialized rt_mutex_waiter
+- * @task: the task to prepare
+- *
+- * Returns:
+- * 0 - task blocked on lock
+- * 1 - acquired the lock for task, caller should wake it up
+- * <0 - error
+- *
+- * Special API call for FUTEX_REQUEUE_PI support.
+- */
+-int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
-+ struct rt_mutex_waiter *waiter,
-+ struct task_struct *task)
-+{
-+ int ret;
-+
+ struct rt_mutex_waiter *waiter,
+ struct task_struct *task)
+ {
+ int ret;
+
+- raw_spin_lock_irq(&lock->wait_lock);
+-
+- if (try_to_take_rt_mutex(lock, task, NULL)) {
+- raw_spin_unlock_irq(&lock->wait_lock);
+ if (try_to_take_rt_mutex(lock, task, NULL))
-+ return 1;
+ return 1;
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /*
@@ -15492,72 +16060,59 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ raw_spin_lock(&task->pi_lock);
+ if (task->pi_blocked_on) {
+ raw_spin_unlock(&task->pi_lock);
-+ raw_spin_unlock_irq(&lock->wait_lock);
+ return -EAGAIN;
-+ }
+ }
+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
+ raw_spin_unlock(&task->pi_lock);
+#endif
-+
-+ /* We enforce deadlock detection for futexes */
-+ ret = task_blocks_on_rt_mutex(lock, waiter, task,
-+ RT_MUTEX_FULL_CHAINWALK);
-+
-+ if (ret && !rt_mutex_owner(lock)) {
-+ /*
-+ * Reset the return value. We might have
-+ * returned with -EDEADLK and the owner
-+ * released the lock while we were walking the
-+ * pi chain. Let the waiter sort it out.
-+ */
-+ ret = 0;
-+ }
-+
-+ if (ret && rt_mutex_has_waiters(lock))
-+ remove_waiter(lock, waiter);
-+
-+ debug_rt_mutex_print_deadlock(waiter);
-+
-+ return ret;
- }
- /**
-@@ -1670,33 +2349,9 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
- int ret;
+ /* We enforce deadlock detection for futexes */
+ ret = task_blocks_on_rt_mutex(lock, waiter, task,
+@@ -1690,16 +2332,40 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ ret = 0;
+ }
- raw_spin_lock_irq(&lock->wait_lock);
--
-- if (try_to_take_rt_mutex(lock, task, NULL)) {
-- raw_spin_unlock_irq(&lock->wait_lock);
-- return 1;
-- }
--
-- /* We enforce deadlock detection for futexes */
-- ret = task_blocks_on_rt_mutex(lock, waiter, task,
-- RT_MUTEX_FULL_CHAINWALK);
--
-- if (ret && !rt_mutex_owner(lock)) {
-- /*
-- * Reset the return value. We might have
-- * returned with -EDEADLK and the owner
-- * released the lock while we were walking the
-- * pi chain. Let the waiter sort it out.
-- */
-- ret = 0;
-- }
--
- if (unlikely(ret))
-- remove_waiter(lock, waiter);
--
-+ ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
- raw_spin_unlock_irq(&lock->wait_lock);
++ if (ret && rt_mutex_has_waiters(lock))
+ remove_waiter(lock, waiter);
-- debug_rt_mutex_print_deadlock(waiter);
+- raw_spin_unlock_irq(&lock->wait_lock);
-
+ debug_rt_mutex_print_deadlock(waiter);
+
return ret;
}
-@@ -1721,36 +2376,106 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
++/**
++ * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
++ * @lock: the rt_mutex to take
++ * @waiter: the pre-initialized rt_mutex_waiter
++ * @task: the task to prepare
++ *
++ * Returns:
++ * 0 - task blocked on lock
++ * 1 - acquired the lock for task, caller should wake it up
++ * <0 - error
++ *
++ * Special API call for FUTEX_REQUEUE_PI support.
++ */
++int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
++ struct rt_mutex_waiter *waiter,
++ struct task_struct *task)
++{
++ int ret;
++
++ raw_spin_lock_irq(&lock->wait_lock);
++ ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
++ raw_spin_unlock_irq(&lock->wait_lock);
++
++ return ret;
++}
++
+ /**
+ * rt_mutex_next_owner - return the next owner of the lock
+ *
+@@ -1721,36 +2387,106 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
}
/**
@@ -15590,11 +16145,8 @@ index 2c49d76f96c3..218f1d26afe7 100644
raw_spin_lock_irq(&lock->wait_lock);
-
-- set_current_state(TASK_INTERRUPTIBLE);
--
- /* sleep on the mutex */
-- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
-+ set_current_state(TASK_INTERRUPTIBLE);
++ /* sleep on the mutex */
+ set_current_state(TASK_INTERRUPTIBLE);
+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
@@ -15602,7 +16154,8 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ */
+ fixup_rt_mutex_waiters(lock);
-- if (unlikely(ret))
+- /* sleep on the mutex */
+- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
+ /*
+ * RT has a problem here when the wait got interrupted by a timeout
+ * or a signal. task->pi_blocked_on is still set. The task must
@@ -15621,7 +16174,9 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ raw_spin_unlock(&tsk->pi_lock);
+ }
+ raw_spin_unlock_irq(&lock->wait_lock);
-+
+
+- if (unlikely(ret))
+- remove_waiter(lock, waiter);
+ return ret;
+}
+
@@ -15648,7 +16203,7 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ struct rt_mutex_waiter *waiter)
+{
+ bool cleanup = false;
-+
+
+ raw_spin_lock_irq(&lock->wait_lock);
+ /*
+ * Do an unconditional try-lock, this deals with the lock stealing
@@ -15667,14 +16222,13 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ * So check if we became owner, if not, take us off the wait_list.
+ */
+ if (rt_mutex_owner(lock) != current) {
- remove_waiter(lock, waiter);
--
++ remove_waiter(lock, waiter);
+ cleanup = true;
+ }
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
-@@ -1759,5 +2484,91 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+@@ -1759,5 +2495,91 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
raw_spin_unlock_irq(&lock->wait_lock);
@@ -15722,8 +16276,8 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ else if (!ret && ww_ctx->acquired > 1)
+ return ww_mutex_deadlock_injection(lock, ww_ctx);
+
- return ret;
- }
++ return ret;
++}
+EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
+
+int __sched
@@ -15740,8 +16294,8 @@ index 2c49d76f96c3..218f1d26afe7 100644
+ else if (!ret && ww_ctx->acquired > 1)
+ return ww_mutex_deadlock_injection(lock, ww_ctx);
+
-+ return ret;
-+}
+ return ret;
+ }
+EXPORT_SYMBOL_GPL(__ww_mutex_lock);
+
+void __sched ww_mutex_unlock(struct ww_mutex *lock)
@@ -16160,7 +16714,7 @@ index db3ccb1dd614..909779647bd1 100644
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
-index 0374a596cffa..94970338d518 100644
+index 9aa0fccd5d43..76d0b40d9193 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
@@ -16179,7 +16733,7 @@ index 0374a596cffa..94970338d518 100644
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
-@@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
+@@ -135,6 +137,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
arch_spin_unlock(&lock->raw_lock);
}
@@ -16187,7 +16741,7 @@ index 0374a596cffa..94970338d518 100644
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
if (!debug_locks_off())
-@@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock)
+@@ -224,3 +227,5 @@ void do_raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock);
}
@@ -16219,10 +16773,11 @@ index 0e54d5bf0097..f27764fbfa24 100644
continue;
for_each_possible_cpu(cpu) {
void *start = per_cpu_ptr(mod->percpu, cpu);
-+ void *va = (void *)addr;
-
+-
- if ((void *)addr >= start &&
- (void *)addr < start + mod->percpu_size) {
++ void *va = (void *)addr;
++
+ if (va >= start && va < start + mod->percpu_size) {
+ if (can_addr) {
+ *can_addr = (unsigned long) (va - start);
@@ -16267,7 +16822,7 @@ index 0e54d5bf0097..f27764fbfa24 100644
#define MODINFO_ATTR(field) \
diff --git a/kernel/panic.c b/kernel/panic.c
-index e6480e20379e..7e9c1918a94e 100644
+index dbec387099b1..b67a4803ff2b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -482,9 +482,11 @@ static u64 oops_id;
@@ -16678,10 +17233,10 @@ index 9c5b231684d0..cf15bdb6855b 100644
* console_unblank can no longer be called in interrupt context unless
* oops_in_progress is set to 1..
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
-index a5caecef88be..61e7c5e2183c 100644
+index f39a7be98fc1..583ce3aad891 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
-@@ -166,7 +166,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
+@@ -172,7 +172,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !__fatal_signal_pending(task)) {
@@ -16723,7 +17278,7 @@ index bf08fee53dc7..eeb8ce4ad7b6 100644
* Don't even think about trying any of these in real life!!!
* The names includes "busted", and they really means it!
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
-index 10f62c6f48e7..dbee19478f09 100644
+index d1a02877a42c..a7b11a29e03a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -55,6 +55,11 @@
@@ -16830,7 +17385,7 @@ index 10f62c6f48e7..dbee19478f09 100644
case RCU_SCHED_FLAVOR:
rsp = &rcu_sched_state;
break;
-@@ -3016,18 +3049,17 @@ __rcu_process_callbacks(struct rcu_state *rsp)
+@@ -3026,18 +3059,17 @@ __rcu_process_callbacks(struct rcu_state *rsp)
/*
* Do RCU core processing for the current CPU.
*/
@@ -16851,7 +17406,7 @@ index 10f62c6f48e7..dbee19478f09 100644
/*
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
-@@ -3039,19 +3071,106 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+@@ -3049,19 +3081,106 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return;
@@ -16861,8 +17416,8 @@ index 10f62c6f48e7..dbee19478f09 100644
- }
- invoke_rcu_callbacks_kthread();
+ rcu_do_batch(rsp, rdp);
- }
-
++}
++
+static void rcu_wake_cond(struct task_struct *t, int status)
+{
+ /*
@@ -16871,8 +17426,8 @@ index 10f62c6f48e7..dbee19478f09 100644
+ */
+ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
+ wake_up_process(t);
-+}
-+
+ }
+
+/*
+ * Wake up this CPU's rcuc kthread to do RCU core processing.
+ */
@@ -16891,8 +17446,8 @@ index 10f62c6f48e7..dbee19478f09 100644
+ if (t != NULL && current != t)
+ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
+ local_irq_restore(flags);
- }
-
++}
++
+static void rcu_cpu_kthread_park(unsigned int cpu)
+{
+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
@@ -16901,8 +17456,8 @@ index 10f62c6f48e7..dbee19478f09 100644
+static int rcu_cpu_kthread_should_run(unsigned int cpu)
+{
+ return __this_cpu_read(rcu_cpu_has_work);
-+}
-+
+ }
+
+/*
+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
+ * RCU softirq used in flavors and configurations of RCU that do not
@@ -16965,7 +17520,7 @@ index 10f62c6f48e7..dbee19478f09 100644
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
*/
-@@ -3195,6 +3314,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
+@@ -3205,6 +3324,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
@@ -16973,7 +17528,7 @@ index 10f62c6f48e7..dbee19478f09 100644
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
-@@ -3203,6 +3323,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
+@@ -3213,6 +3333,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -16981,7 +17536,7 @@ index 10f62c6f48e7..dbee19478f09 100644
/*
* Queue an RCU callback for lazy invocation after a grace period.
-@@ -3294,6 +3415,7 @@ void synchronize_sched(void)
+@@ -3304,6 +3425,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
@@ -16989,7 +17544,7 @@ index 10f62c6f48e7..dbee19478f09 100644
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
-@@ -3320,6 +3442,7 @@ void synchronize_rcu_bh(void)
+@@ -3330,6 +3452,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
@@ -16997,7 +17552,7 @@ index 10f62c6f48e7..dbee19478f09 100644
/**
* get_state_synchronize_rcu - Snapshot current RCU state
-@@ -3698,6 +3821,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
+@@ -3708,6 +3831,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
mutex_unlock(&rsp->barrier_mutex);
}
@@ -17005,7 +17560,7 @@ index 10f62c6f48e7..dbee19478f09 100644
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
-@@ -3706,6 +3830,7 @@ void rcu_barrier_bh(void)
+@@ -3716,6 +3840,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@@ -17013,7 +17568,7 @@ index 10f62c6f48e7..dbee19478f09 100644
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
-@@ -4227,12 +4352,13 @@ void __init rcu_init(void)
+@@ -4237,12 +4362,13 @@ void __init rcu_init(void)
rcu_bootup_announce();
rcu_init_geometry();
@@ -17066,7 +17621,7 @@ index e99a5234d9ed..958ac107062c 100644
struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_BOOST */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
-index 56583e764ebf..7c656f8e192f 100644
+index e3944c4b072d..be12d1aac840 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -24,25 +24,10 @@
@@ -17172,10 +17727,11 @@ index 56583e764ebf..7c656f8e192f 100644
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
-@@ -1013,23 +1000,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
+@@ -1012,23 +999,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
+ }
}
- /*
+-/*
- * Wake up the per-CPU kthread to invoke RCU callbacks.
- */
-static void invoke_rcu_callbacks_kthread(void)
@@ -17192,10 +17748,9 @@ index 56583e764ebf..7c656f8e192f 100644
- local_irq_restore(flags);
-}
-
--/*
+ /*
* Is the current CPU running the RCU-callbacks kthread?
* Caller must have preemption disabled.
- */
@@ -1083,67 +1053,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
return 0;
}
@@ -17496,7 +18051,7 @@ index 8d0f35debf35..b62cf6400fe0 100644
}
EXPORT_SYMBOL(completion_done);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 154fd689fe02..10e832da70b6 100644
+index e5066955cc3a..ed1ebcc2ff3d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -129,7 +129,11 @@ const_debug unsigned int sysctl_sched_features =
@@ -17519,7 +18074,25 @@ index 154fd689fe02..10e832da70b6 100644
}
#else /* CONFIG_SCHED_HRTICK */
static inline void hrtick_clear(struct rq *rq)
-@@ -449,7 +454,7 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+@@ -425,9 +430,15 @@ static bool set_nr_if_polling(struct task_struct *p)
+ #endif
+ #endif
+
+-void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++void __wake_q_add(struct wake_q_head *head, struct task_struct *task,
++ bool sleeper)
+ {
+- struct wake_q_node *node = &task->wake_q;
++ struct wake_q_node *node;
++
++ if (sleeper)
++ node = &task->wake_q_sleeper;
++ else
++ node = &task->wake_q;
+
+ /*
+ * Atomically grab the task, if ->wake_q is !nil already it means
+@@ -449,24 +460,33 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
head->lastp = &node->next;
}
@@ -17528,7 +18101,24 @@ index 154fd689fe02..10e832da70b6 100644
{
struct wake_q_node *node = head->first;
-@@ -466,7 +471,10 @@ void wake_up_q(struct wake_q_head *head)
+ while (node != WAKE_Q_TAIL) {
+ struct task_struct *task;
+
+- task = container_of(node, struct task_struct, wake_q);
++ if (sleeper)
++ task = container_of(node, struct task_struct, wake_q_sleeper);
++ else
++ task = container_of(node, struct task_struct, wake_q);
+ BUG_ON(!task);
+ /* task can safely be re-inserted now */
+ node = node->next;
+- task->wake_q.next = NULL;
++ if (sleeper)
++ task->wake_q_sleeper.next = NULL;
++ else
++ task->wake_q.next = NULL;
+
+ /*
* wake_up_process() implies a wmb() to pair with the queueing
* in wake_q_add() so as not to miss wakeups.
*/
@@ -17540,7 +18130,7 @@ index 154fd689fe02..10e832da70b6 100644
put_task_struct(task);
}
}
-@@ -502,6 +510,38 @@ void resched_curr(struct rq *rq)
+@@ -502,6 +522,38 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -17579,7 +18169,7 @@ index 154fd689fe02..10e832da70b6 100644
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -525,11 +565,14 @@ void resched_cpu(int cpu)
+@@ -524,11 +576,14 @@ void resched_cpu(int cpu)
*/
int get_nohz_timer_target(void)
{
@@ -17596,7 +18186,7 @@ index 154fd689fe02..10e832da70b6 100644
rcu_read_lock();
for_each_domain(cpu, sd) {
-@@ -548,6 +591,8 @@ int get_nohz_timer_target(void)
+@@ -547,6 +602,8 @@ int get_nohz_timer_target(void)
cpu = housekeeping_any_cpu();
unlock:
rcu_read_unlock();
@@ -17605,22 +18195,34 @@ index 154fd689fe02..10e832da70b6 100644
return cpu;
}
/*
-@@ -1100,6 +1145,11 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+@@ -1092,7 +1149,8 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
+ }
- lockdep_assert_held(&p->pi_lock);
+-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++static void __do_set_cpus_allowed_tail(struct task_struct *p,
++ const struct cpumask *new_mask)
+ {
+ struct rq *rq = task_rq(p);
+ bool queued, running;
+@@ -1121,6 +1179,98 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ set_curr_task(rq, p);
+ }
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
+ if (__migrate_disabled(p)) {
++ lockdep_assert_held(&p->pi_lock);
++
+ cpumask_copy(&p->cpus_allowed, new_mask);
++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
++ p->migrate_disable_update = 1;
++#endif
+ return;
+ }
++ __do_set_cpus_allowed_tail(p, new_mask);
++}
+
- queued = task_on_rq_queued(p);
- running = task_current(rq, p);
-
-@@ -1122,6 +1172,84 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
- set_curr_task(rq, p);
- }
-
+static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
+static DEFINE_MUTEX(sched_down_mutex);
+static cpumask_t sched_down_cpumask;
@@ -17702,7 +18304,7 @@ index 154fd689fe02..10e832da70b6 100644
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
-@@ -1179,7 +1307,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
+@@ -1179,7 +1329,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
}
/* Can the task run on the task's current CPU? If so, we're done */
@@ -17711,7 +18313,7 @@ index 154fd689fe02..10e832da70b6 100644
goto out;
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
-@@ -1366,6 +1494,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
+@@ -1366,6 +1516,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
return ret;
}
@@ -17730,7 +18332,7 @@ index 154fd689fe02..10e832da70b6 100644
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1410,7 +1550,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+@@ -1410,7 +1572,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -17739,7 +18341,7 @@ index 154fd689fe02..10e832da70b6 100644
return 0;
cpu_relax();
}
-@@ -1425,7 +1565,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+@@ -1425,7 +1587,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
@@ -17749,7 +18351,7 @@ index 154fd689fe02..10e832da70b6 100644
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &rf);
-@@ -1680,10 +1821,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
+@@ -1680,10 +1843,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -17760,7 +18362,7 @@ index 154fd689fe02..10e832da70b6 100644
}
/*
-@@ -2018,8 +2155,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+@@ -2018,8 +2177,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -17789,10 +18391,11 @@ index 154fd689fe02..10e832da70b6 100644
trace_sched_waking(p);
-@@ -2102,53 +2258,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+@@ -2101,53 +2279,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ return success;
}
- /**
+-/**
- * try_to_wake_up_local - try to wake up a local task with rq lock held
- * @p: the thread to be awakened
- * @cookie: context's cookie for pinning
@@ -17839,11 +18442,10 @@ index 154fd689fe02..10e832da70b6 100644
- raw_spin_unlock(&p->pi_lock);
-}
-
--/**
+ /**
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
- *
-@@ -2166,6 +2275,18 @@ int wake_up_process(struct task_struct *p)
+@@ -2166,6 +2297,18 @@ int wake_up_process(struct task_struct *p)
}
EXPORT_SYMBOL(wake_up_process);
@@ -17862,7 +18464,7 @@ index 154fd689fe02..10e832da70b6 100644
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
-@@ -2442,6 +2563,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -2442,6 +2585,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -17872,7 +18474,7 @@ index 154fd689fe02..10e832da70b6 100644
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -2770,21 +2894,16 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+@@ -2770,21 +2916,16 @@ static struct rq *finish_task_switch(struct task_struct *prev)
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
@@ -17899,7 +18501,7 @@ index 154fd689fe02..10e832da70b6 100644
put_task_struct(prev);
}
-@@ -3252,6 +3371,77 @@ static inline void schedule_debug(struct task_struct *prev)
+@@ -3252,6 +3393,114 @@ static inline void schedule_debug(struct task_struct *prev)
schedstat_inc(this_rq()->sched_count);
}
@@ -17967,6 +18569,43 @@ index 154fd689fe02..10e832da70b6 100644
+ */
+ p->migrate_disable = 0;
+
++ if (p->migrate_disable_update) {
++ struct rq *rq;
++ struct rq_flags rf;
++
++ rq = task_rq_lock(p, &rf);
++ update_rq_clock(rq);
++
++ __do_set_cpus_allowed_tail(p, &p->cpus_allowed);
++ task_rq_unlock(rq, p, &rf);
++
++ p->migrate_disable_update = 0;
++
++ WARN_ON(smp_processor_id() != task_cpu(p));
++ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_allowed)) {
++ const struct cpumask *cpu_valid_mask = cpu_active_mask;
++ struct migration_arg arg;
++ unsigned int dest_cpu;
++
++ if (p->flags & PF_KTHREAD) {
++ /*
++ * Kernel threads are allowed on online && !active CPUs
++ */
++ cpu_valid_mask = cpu_online_mask;
++ }
++ dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_allowed);
++ arg.task = p;
++ arg.dest_cpu = dest_cpu;
++
++ unpin_current_cpu();
++ preempt_lazy_enable();
++ preempt_enable();
++ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
++ tlb_migrate_finish(p->mm);
++ return;
++ }
++ }
++
+ unpin_current_cpu();
+ preempt_enable();
+ preempt_lazy_enable();
@@ -17977,7 +18616,7 @@ index 154fd689fe02..10e832da70b6 100644
/*
* Pick up the highest-prio task:
*/
-@@ -3368,19 +3558,6 @@ static void __sched notrace __schedule(bool preempt)
+@@ -3368,19 +3617,6 @@ static void __sched notrace __schedule(bool preempt)
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
@@ -17997,7 +18636,7 @@ index 154fd689fe02..10e832da70b6 100644
}
switch_count = &prev->nvcsw;
}
-@@ -3390,6 +3567,7 @@ static void __sched notrace __schedule(bool preempt)
+@@ -3390,6 +3626,7 @@ static void __sched notrace __schedule(bool preempt)
next = pick_next_task(rq, prev, cookie);
clear_tsk_need_resched(prev);
@@ -18005,14 +18644,14 @@ index 154fd689fe02..10e832da70b6 100644
clear_preempt_need_resched();
rq->clock_skip_update = 0;
-@@ -3437,9 +3615,20 @@ void __noreturn do_task_dead(void)
+@@ -3437,8 +3674,19 @@ void __noreturn do_task_dead(void)
static inline void sched_submit_work(struct task_struct *tsk)
{
- if (!tsk->state || tsk_is_pi_blocked(tsk))
+ if (!tsk->state)
return;
- /*
++ /*
+ * If a worker went to sleep, notify and ask workqueue whether
+ * it wants to wake up a task to maintain concurrency.
+ */
@@ -18023,11 +18662,10 @@ index 154fd689fe02..10e832da70b6 100644
+ if (tsk_is_pi_blocked(tsk))
+ return;
+
-+ /*
+ /*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
- */
-@@ -3447,6 +3636,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
+@@ -3447,6 +3695,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk);
}
@@ -18040,7 +18678,7 @@ index 154fd689fe02..10e832da70b6 100644
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3457,6 +3652,7 @@ asmlinkage __visible void __sched schedule(void)
+@@ -3457,6 +3711,7 @@ asmlinkage __visible void __sched schedule(void)
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
@@ -18048,7 +18686,7 @@ index 154fd689fe02..10e832da70b6 100644
}
EXPORT_SYMBOL(schedule);
-@@ -3520,6 +3716,30 @@ static void __sched notrace preempt_schedule_common(void)
+@@ -3520,6 +3775,30 @@ static void __sched notrace preempt_schedule_common(void)
} while (need_resched());
}
@@ -18079,7 +18717,7 @@ index 154fd689fe02..10e832da70b6 100644
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3534,7 +3754,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
+@@ -3534,7 +3813,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
*/
if (likely(!preemptible()))
return;
@@ -18089,7 +18727,7 @@ index 154fd689fe02..10e832da70b6 100644
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3561,6 +3782,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+@@ -3561,6 +3841,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
if (likely(!preemptible()))
return;
@@ -18099,7 +18737,7 @@ index 154fd689fe02..10e832da70b6 100644
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -3583,7 +3807,16 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+@@ -3583,7 +3866,16 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
* an infinite recursion.
*/
prev_ctx = exception_enter();
@@ -18116,7 +18754,7 @@ index 154fd689fe02..10e832da70b6 100644
exception_exit(prev_ctx);
preempt_latency_stop(1);
-@@ -3629,10 +3862,25 @@ EXPORT_SYMBOL(default_wake_function);
+@@ -3629,10 +3921,25 @@ EXPORT_SYMBOL(default_wake_function);
#ifdef CONFIG_RT_MUTEXES
@@ -18144,7 +18782,7 @@ index 154fd689fe02..10e832da70b6 100644
*
* This function changes the 'effective' priority of a task. It does
* not touch ->normal_prio like __setscheduler().
-@@ -3640,16 +3888,40 @@ EXPORT_SYMBOL(default_wake_function);
+@@ -3640,16 +3947,40 @@ EXPORT_SYMBOL(default_wake_function);
* Used by the rt_mutex code to implement priority inheritance
* logic. Call site only calls if the priority of the task changed.
*/
@@ -18188,7 +18826,7 @@ index 154fd689fe02..10e832da70b6 100644
/*
* Idle task boosting is a nono in general. There is one
-@@ -3669,7 +3941,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+@@ -3669,7 +4000,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
goto out_unlock;
}
@@ -18197,7 +18835,7 @@ index 154fd689fe02..10e832da70b6 100644
oldprio = p->prio;
if (oldprio == prio)
-@@ -3693,7 +3965,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+@@ -3693,7 +4024,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
* running task
*/
if (dl_prio(prio)) {
@@ -18205,7 +18843,7 @@ index 154fd689fe02..10e832da70b6 100644
if (!dl_prio(p->normal_prio) ||
(pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
p->dl.dl_boosted = 1;
-@@ -3730,6 +4001,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
+@@ -3730,6 +4060,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
balance_callback(rq);
preempt_enable();
}
@@ -18217,7 +18855,7 @@ index 154fd689fe02..10e832da70b6 100644
#endif
void set_user_nice(struct task_struct *p, long nice)
-@@ -3974,10 +4250,9 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
+@@ -3974,10 +4309,9 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
* Keep a potential priority boosting if called from
* sched_setscheduler().
*/
@@ -18230,7 +18868,7 @@ index 154fd689fe02..10e832da70b6 100644
if (dl_prio(p->prio))
p->sched_class = &dl_sched_class;
-@@ -4264,7 +4539,7 @@ static int __sched_setscheduler(struct task_struct *p,
+@@ -4264,7 +4598,7 @@ static int __sched_setscheduler(struct task_struct *p,
* the runqueue. This will be done when the task deboost
* itself.
*/
@@ -18239,7 +18877,7 @@ index 154fd689fe02..10e832da70b6 100644
if (new_effective_prio == oldprio)
queue_flags &= ~DEQUEUE_MOVE;
}
-@@ -4939,6 +5214,7 @@ int __cond_resched_lock(spinlock_t *lock)
+@@ -4939,6 +5273,7 @@ int __cond_resched_lock(spinlock_t *lock)
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -18247,7 +18885,7 @@ index 154fd689fe02..10e832da70b6 100644
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -4952,6 +5228,7 @@ int __sched __cond_resched_softirq(void)
+@@ -4952,6 +5287,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
@@ -18255,7 +18893,7 @@ index 154fd689fe02..10e832da70b6 100644
/**
* yield - yield the current processor to other threads.
-@@ -5315,7 +5592,9 @@ void init_idle(struct task_struct *idle, int cpu)
+@@ -5315,7 +5651,9 @@ void init_idle(struct task_struct *idle, int cpu)
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -18266,7 +18904,7 @@ index 154fd689fe02..10e832da70b6 100644
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -5458,6 +5737,8 @@ void sched_setnuma(struct task_struct *p, int nid)
+@@ -5458,6 +5796,8 @@ void sched_setnuma(struct task_struct *p, int nid)
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -18275,8 +18913,8 @@ index 154fd689fe02..10e832da70b6 100644
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
-@@ -5472,7 +5753,12 @@ void idle_task_exit(void)
- switch_mm_irqs_off(mm, &init_mm, current);
+@@ -5472,7 +5812,12 @@ void idle_task_exit(void)
+ switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
- mmdrop(mm);
@@ -18289,7 +18927,15 @@ index 154fd689fe02..10e832da70b6 100644
}
/*
-@@ -7418,6 +7704,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -5881,6 +6226,7 @@ static int init_rootdomain(struct root_domain *rd)
+ rd->rto_cpu = -1;
+ raw_spin_lock_init(&rd->rto_lock);
+ init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
++ rd->rto_push_work.flags |= IRQ_WORK_HARD_IRQ;
+ #endif
+
+ init_dl_bw(&rd->dl_bw);
+@@ -7439,6 +7785,10 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(cpu);
hrtick_clear(rq);
@@ -18300,7 +18946,7 @@ index 154fd689fe02..10e832da70b6 100644
return 0;
}
#endif
-@@ -7698,7 +7988,7 @@ void __init sched_init(void)
+@@ -7700,7 +8050,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
@@ -18310,17 +18956,17 @@ index 154fd689fe02..10e832da70b6 100644
return (nested == preempt_offset);
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
-index c95c5122b105..e00accf92a4b 100644
+index df5c32a0c6ed..c77fd444dc3c 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -687,6 +687,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
+@@ -693,6 +693,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer->function = dl_task_timer;
+ timer->irqsafe = 1;
}
- static
+ /*
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index fa178b62ea79..935224123441 100644
--- a/kernel/sched/debug.c
@@ -18347,7 +18993,7 @@ index fa178b62ea79..935224123441 100644
#undef PN
#undef __PN
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index c242944f5cbd..4aeb2e2e41bc 100644
+index 3d862f5b0331..c6db32c0c557 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3518,7 +3518,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
@@ -18395,7 +19041,7 @@ index c242944f5cbd..4aeb2e2e41bc 100644
return;
}
hrtick_start(rq, delta);
-@@ -5905,7 +5905,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -5862,7 +5862,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;
preempt:
@@ -18404,7 +19050,7 @@ index c242944f5cbd..4aeb2e2e41bc 100644
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -8631,7 +8631,7 @@ static void task_fork_fair(struct task_struct *p)
+@@ -8588,7 +8588,7 @@ static void task_fork_fair(struct task_struct *p)
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -18413,7 +19059,7 @@ index c242944f5cbd..4aeb2e2e41bc 100644
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -8655,7 +8655,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
+@@ -8612,7 +8612,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -18423,7 +19069,7 @@ index c242944f5cbd..4aeb2e2e41bc 100644
check_preempt_curr(rq, p, 0);
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
-index 69631fa46c2f..6d28fcd08872 100644
+index 1b3c8189b286..36086f74e011 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -45,11 +45,19 @@ SCHED_FEAT(LB_BIAS, true)
@@ -18444,10 +19090,10 @@ index 69631fa46c2f..6d28fcd08872 100644
SCHED_FEAT(TTWU_QUEUE, true)
+#endif
- #ifdef HAVE_RT_PUSH_IPI
/*
+ * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
-index f139f22ce30d..b0691f4e7d49 100644
+index 7a360d6f6798..d361629c0f96 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
@@ -18458,19 +19104,11 @@ index f139f22ce30d..b0691f4e7d49 100644
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
-@@ -101,6 +102,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
- rt_rq->push_cpu = nr_cpu_ids;
- raw_spin_lock_init(&rt_rq->push_lock);
- init_irq_work(&rt_rq->push_work, push_irq_work_func);
-+ rt_rq->push_work.flags |= IRQ_WORK_HARD_IRQ;
- #endif
- #endif /* CONFIG_SMP */
- /* We start is dequeued state, because no RT tasks are queued */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
-index 055f935d4421..19324ac27026 100644
+index cff985feb6e7..280c7d5a7657 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1163,6 +1163,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+@@ -1162,6 +1162,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
@@ -18478,7 +19116,7 @@ index 055f935d4421..19324ac27026 100644
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
-@@ -1346,6 +1347,15 @@ extern void init_sched_fair_class(void);
+@@ -1345,6 +1346,15 @@ extern void init_sched_fair_class(void);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -18719,7 +19357,7 @@ index 000000000000..1950f40ca725
+}
+EXPORT_SYMBOL_GPL(swork_put);
diff --git a/kernel/signal.c b/kernel/signal.c
-index 0b1415720a15..c884647951f7 100644
+index 7ebe236a5364..4d094ae3a625 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -14,6 +14,7 @@
@@ -18730,7 +19368,7 @@ index 0b1415720a15..c884647951f7 100644
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
-@@ -352,13 +353,30 @@ static bool task_participate_group_stop(struct task_struct *task)
+@@ -354,13 +355,30 @@ static bool task_participate_group_stop(struct task_struct *task)
return false;
}
@@ -18762,7 +19400,7 @@ index 0b1415720a15..c884647951f7 100644
{
struct sigqueue *q = NULL;
struct user_struct *user;
-@@ -375,7 +393,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+@@ -377,7 +395,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
if (override_rlimit ||
atomic_read(&user->sigpending) <=
task_rlimit(t, RLIMIT_SIGPENDING)) {
@@ -18774,7 +19412,7 @@ index 0b1415720a15..c884647951f7 100644
} else {
print_dropped_signal(sig);
}
-@@ -392,6 +413,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+@@ -394,6 +415,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
return q;
}
@@ -18788,7 +19426,7 @@ index 0b1415720a15..c884647951f7 100644
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
-@@ -401,6 +429,21 @@ static void __sigqueue_free(struct sigqueue *q)
+@@ -403,6 +431,21 @@ static void __sigqueue_free(struct sigqueue *q)
kmem_cache_free(sigqueue_cachep, q);
}
@@ -18810,10 +19448,11 @@ index 0b1415720a15..c884647951f7 100644
void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
-@@ -414,6 +457,21 @@ void flush_sigqueue(struct sigpending *queue)
+@@ -415,6 +458,21 @@ void flush_sigqueue(struct sigpending *queue)
+ }
}
- /*
++/*
+ * Called from __exit_signal. Flush tsk->pending and
+ * tsk->sigqueue_cache
+ */
@@ -18828,21 +19467,20 @@ index 0b1415720a15..c884647951f7 100644
+ kmem_cache_free(sigqueue_cachep, q);
+}
+
-+/*
+ /*
* Flush all pending signals for this kthread.
*/
- void flush_signals(struct task_struct *t)
-@@ -525,7 +583,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
- still_pending:
- list_del_init(&first->list);
- copy_siginfo(info, &first->info);
+@@ -534,7 +592,7 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
+ (info->si_code == SI_TIMER) &&
+ (info->si_sys_private);
+
- __sigqueue_free(first);
+ sigqueue_free_current(first);
} else {
/*
* Ok, it wasn't in the queue. This must be
-@@ -560,6 +618,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
- {
+@@ -570,6 +628,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+ bool resched_timer = false;
int signr;
+ WARN_ON_ONCE(tsk != current);
@@ -18850,7 +19488,7 @@ index 0b1415720a15..c884647951f7 100644
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
-@@ -1156,8 +1216,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
+@@ -1166,8 +1226,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
@@ -18861,7 +19499,7 @@ index 0b1415720a15..c884647951f7 100644
{
unsigned long int flags;
int ret, blocked, ignored;
-@@ -1182,6 +1242,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+@@ -1192,6 +1252,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
return ret;
}
@@ -18901,7 +19539,7 @@ index 0b1415720a15..c884647951f7 100644
/*
* Nuke all other threads in the group.
*/
-@@ -1216,12 +1309,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
+@@ -1226,12 +1319,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
* Disable interrupts early to avoid deadlocks.
* See rcu_read_unlock() comment header for details.
*/
@@ -18916,7 +19554,7 @@ index 0b1415720a15..c884647951f7 100644
break;
}
/*
-@@ -1242,7 +1335,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
+@@ -1252,7 +1345,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
}
spin_unlock(&sighand->siglock);
rcu_read_unlock();
@@ -18925,7 +19563,7 @@ index 0b1415720a15..c884647951f7 100644
}
return sighand;
-@@ -1485,7 +1578,8 @@ EXPORT_SYMBOL(kill_pid);
+@@ -1495,7 +1588,8 @@ EXPORT_SYMBOL(kill_pid);
*/
struct sigqueue *sigqueue_alloc(void)
{
@@ -18935,7 +19573,7 @@ index 0b1415720a15..c884647951f7 100644
if (q)
q->flags |= SIGQUEUE_PREALLOC;
-@@ -1846,15 +1940,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
+@@ -1856,15 +1950,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
@@ -19213,10 +19851,11 @@ index 744fa611cae0..819bd7cf5ad0 100644
pending = local_softirq_pending();
if (pending) {
-@@ -331,6 +471,309 @@ asmlinkage __visible void do_softirq(void)
+@@ -330,6 +470,309 @@ asmlinkage __visible void do_softirq(void)
+ local_irq_restore(flags);
}
- /*
++/*
+ * This function must run with irqs disabled!
+ */
+void raise_softirq_irqoff(unsigned int nr)
@@ -19519,10 +20158,9 @@ index 744fa611cae0..819bd7cf5ad0 100644
+}
+
+#endif /* PREEMPT_RT_FULL */
-+/*
+ /*
* Enter an interrupt context.
*/
- void irq_enter(void)
@@ -341,9 +784,9 @@ void irq_enter(void)
* Prevent raise_softirq from needlessly waking up ksoftirqd
* here, as softirq will be serviced on return from interrupt.
@@ -19805,12 +20443,11 @@ index 744fa611cae0..819bd7cf5ad0 100644
list = __this_cpu_read(tasklet_hi_vec.head);
__this_cpu_write(tasklet_hi_vec.head, NULL);
__this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
-+
- local_irq_enable();
-
+- local_irq_enable();
+-
- while (list) {
- struct tasklet_struct *t = list;
--
+
- list = list->next;
-
- if (tasklet_trylock(t)) {
@@ -19824,7 +20461,8 @@ index 744fa611cae0..819bd7cf5ad0 100644
- }
- tasklet_unlock(t);
- }
--
++ local_irq_enable();
+
- local_irq_disable();
- t->next = NULL;
- *__this_cpu_read(tasklet_hi_vec.tail) = t;
@@ -19849,24 +20487,9 @@ index 744fa611cae0..819bd7cf5ad0 100644
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
+-static int ksoftirqd_should_run(unsigned int cpu)
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+void tasklet_unlock_wait(struct tasklet_struct *t)
-+{
-+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
-+ /*
-+ * Hack for now to avoid this busy-loop:
-+ */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ msleep(1);
-+#else
-+ barrier();
-+#endif
-+ }
-+}
-+EXPORT_SYMBOL(tasklet_unlock_wait);
-+#endif
-+
- static int ksoftirqd_should_run(unsigned int cpu)
{
- return local_softirq_pending();
-}
@@ -19875,16 +20498,29 @@ index 744fa611cae0..819bd7cf5ad0 100644
-{
- local_irq_disable();
- if (local_softirq_pending()) {
-- /*
++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
+ /*
- * We can safely run softirq on inline stack, as we are not deep
- * in the task stack here.
-- */
++ * Hack for now to avoid this busy-loop:
+ */
- __do_softirq();
- local_irq_enable();
- cond_resched_rcu_qs();
- return;
-- }
++#ifdef CONFIG_PREEMPT_RT_FULL
++ msleep(1);
++#else
++ barrier();
++#endif
+ }
- local_irq_enable();
++}
++EXPORT_SYMBOL(tasklet_unlock_wait);
++#endif
++
++static int ksoftirqd_should_run(unsigned int cpu)
++{
+ return ksoftirqd_softirq_pending();
}
@@ -20021,7 +20657,7 @@ index ec9ab2f01489..8b89dbedeaff 100644
}
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
-index bb5ec425dfe0..8338b14ed3a3 100644
+index eeb7f2f5698d..369203af6406 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -53,6 +53,7 @@
@@ -20032,7 +20668,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
#include "tick-internal.h"
-@@ -695,6 +696,29 @@ static void hrtimer_switch_to_hres(void)
+@@ -693,6 +694,29 @@ static void hrtimer_switch_to_hres(void)
retrigger_next_event(NULL);
}
@@ -20062,7 +20698,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
static void clock_was_set_work(struct work_struct *work)
{
clock_was_set();
-@@ -710,6 +734,7 @@ void clock_was_set_delayed(void)
+@@ -708,6 +732,7 @@ void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
@@ -20070,7 +20706,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
#else
-@@ -719,11 +744,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
+@@ -717,11 +742,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
static inline void hrtimer_switch_to_hres(void) { }
static inline void
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
@@ -20084,7 +20720,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
-@@ -855,6 +877,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
+@@ -853,6 +875,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
@@ -20117,7 +20753,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
-@@ -896,6 +944,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
+@@ -894,6 +942,11 @@ static void __remove_hrtimer(struct hrtimer *timer,
if (!(state & HRTIMER_STATE_ENQUEUED))
return;
@@ -20129,7 +20765,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
-@@ -991,7 +1044,16 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+@@ -989,7 +1042,16 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
timer_stats_hrtimer_set_start_info(timer);
@@ -20146,7 +20782,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
leftmost = enqueue_hrtimer(timer, new_base);
if (!leftmost)
goto unlock;
-@@ -1063,7 +1125,7 @@ int hrtimer_cancel(struct hrtimer *timer)
+@@ -1061,7 +1123,7 @@ int hrtimer_cancel(struct hrtimer *timer)
if (ret >= 0)
return ret;
@@ -20155,7 +20791,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1127,6 +1189,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+@@ -1137,6 +1199,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
@@ -20163,7 +20799,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
-@@ -1167,6 +1230,7 @@ bool hrtimer_active(const struct hrtimer *timer)
+@@ -1177,6 +1240,7 @@ bool hrtimer_active(const struct hrtimer *timer)
seq = raw_read_seqcount_begin(&cpu_base->seq);
if (timer->state != HRTIMER_STATE_INACTIVE ||
@@ -20171,10 +20807,11 @@ index bb5ec425dfe0..8338b14ed3a3 100644
cpu_base->running == timer)
return true;
-@@ -1265,10 +1329,112 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
+@@ -1275,10 +1339,112 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
cpu_base->running = NULL;
}
+-static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
+#ifdef CONFIG_PREEMPT_RT_BASE
+static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
@@ -20276,7 +20913,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
+
+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
+
- static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
++static int __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
{
struct hrtimer_clock_base *base = cpu_base->clock_base;
unsigned int active = cpu_base->active_bases;
@@ -20284,7 +20921,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
for (; active; base++, active >>= 1) {
struct timerqueue_node *node;
-@@ -1284,6 +1450,15 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
+@@ -1294,6 +1460,15 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
timer = container_of(node, struct hrtimer, node);
@@ -20300,7 +20937,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
/*
* The immediate goal for using the softexpires is
* minimizing wakeups, not running timers at the
-@@ -1299,9 +1474,14 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
+@@ -1309,9 +1484,13 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
break;
@@ -20311,12 +20948,57 @@ index bb5ec425dfe0..8338b14ed3a3 100644
+ raise = 1;
}
}
++ return raise;
+ }
+
+ #ifdef CONFIG_HIGH_RES_TIMERS
+@@ -1325,6 +1504,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ ktime_t expires_next, now, entry_time, delta;
+ int retries = 0;
++ int raise;
+
+ BUG_ON(!cpu_base->hres_active);
+ cpu_base->nr_events++;
+@@ -1343,7 +1523,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+ */
+ cpu_base->expires_next.tv64 = KTIME_MAX;
+
+- __hrtimer_run_queues(cpu_base, now);
++ raise = __hrtimer_run_queues(cpu_base, now);
+
+ /* Reevaluate the clock bases for the next expiry */
+ expires_next = __hrtimer_get_next_event(cpu_base);
+@@ -1354,6 +1534,8 @@ void hrtimer_interrupt(struct clock_event_device *dev)
+ cpu_base->expires_next = expires_next;
+ cpu_base->in_hrtirq = 0;
+ raw_spin_unlock(&cpu_base->lock);
++ if (raise)
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+
+ /* Reprogramming necessary ? */
+ if (!tick_program_event(expires_next, 0)) {
+@@ -1433,6 +1615,7 @@ void hrtimer_run_queues(void)
+ {
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ ktime_t now;
++ int raise;
+
+ if (__hrtimer_hres_active(cpu_base))
+ return;
+@@ -1451,8 +1634,10 @@ void hrtimer_run_queues(void)
+
+ raw_spin_lock(&cpu_base->lock);
+ now = hrtimer_update_base(cpu_base);
+- __hrtimer_run_queues(cpu_base, now);
++ raise = __hrtimer_run_queues(cpu_base, now);
+ raw_spin_unlock(&cpu_base->lock);
+ if (raise)
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
- #ifdef CONFIG_HIGH_RES_TIMERS
-@@ -1464,16 +1644,18 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
+ /*
+@@ -1474,16 +1659,18 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
@@ -20337,7 +21019,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
hrtimer_start_expires(&t->timer, mode);
if (likely(t->task))
-@@ -1515,7 +1697,8 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
+@@ -1525,7 +1712,8 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
@@ -20347,7 +21029,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
goto out;
rmtp = restart->nanosleep.rmtp;
-@@ -1532,8 +1715,10 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
+@@ -1542,8 +1730,10 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
return ret;
}
@@ -20360,7 +21042,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
{
struct restart_block *restart;
struct hrtimer_sleeper t;
-@@ -1546,7 +1731,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+@@ -1556,7 +1746,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
@@ -20369,7 +21051,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
goto out;
/* Absolute timers do not update the rmtp value and restart: */
-@@ -1573,6 +1758,12 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+@@ -1583,6 +1773,12 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
return ret;
}
@@ -20382,7 +21064,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
-@@ -1587,6 +1778,26 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
+@@ -1597,6 +1793,26 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
}
@@ -20409,7 +21091,7 @@ index bb5ec425dfe0..8338b14ed3a3 100644
/*
* Functions related to boot-time initialization:
*/
-@@ -1598,10 +1809,14 @@ int hrtimers_prepare_cpu(unsigned int cpu)
+@@ -1608,16 +1824,20 @@ int hrtimers_prepare_cpu(unsigned int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -20424,7 +21106,56 @@ index bb5ec425dfe0..8338b14ed3a3 100644
return 0;
}
-@@ -1671,9 +1886,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
+ #ifdef CONFIG_HOTPLUG_CPU
+
+-static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
++static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+ struct hrtimer_clock_base *new_base)
+ {
+ struct hrtimer *timer;
+@@ -1645,12 +1865,21 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+ */
+ enqueue_hrtimer(timer, new_base);
+ }
++#ifdef CONFIG_PREEMPT_RT_BASE
++ list_splice_tail(&old_base->expired, &new_base->expired);
++ /*
++ * Tell the caller to raise HRTIMER_SOFTIRQ. We can't safely
++ * acquire ktimersoftd->pi_lock while the base lock is held.
++ */
++ return !list_empty(&new_base->expired);
++#endif
++ return 0;
+ }
+
+ int hrtimers_dead_cpu(unsigned int scpu)
+ {
+ struct hrtimer_cpu_base *old_base, *new_base;
+- int i;
++ int i, raise = 0;
+
+ BUG_ON(cpu_online(scpu));
+ tick_cancel_sched_timer(scpu);
+@@ -1666,13 +1895,16 @@ int hrtimers_dead_cpu(unsigned int scpu)
+ raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+
+ for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+- migrate_hrtimer_list(&old_base->clock_base[i],
+- &new_base->clock_base[i]);
++ raise |= migrate_hrtimer_list(&old_base->clock_base[i],
++ &new_base->clock_base[i]);
+ }
+
+ raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock(&new_base->lock);
+
++ if (raise)
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++
+ /* Check, if we got expired work to do */
+ __hrtimer_peek_ahead_timers();
+ local_irq_enable();
+@@ -1681,9 +1913,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
#endif /* CONFIG_HOTPLUG_CPU */
@@ -20858,16 +21589,15 @@ index f2826c35e918..464a98155a0e 100644
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
-- if (timer_delete_hook(timer) == TIMER_RETRY) {
+ /* On RT we can race with a deletion */
+ if (!timer->it_signal) {
- unlock_timer(timer, flags);
++ unlock_timer(timer, flags);
+ return;
+ }
+
-+ if (timer_delete_hook(timer) == TIMER_RETRY) {
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
+ rcu_read_lock();
-+ unlock_timer(timer, flags);
+ unlock_timer(timer, flags);
+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
+ timer);
+ rcu_read_unlock();
@@ -20920,7 +21650,7 @@ index 4fcd99e12aa0..5a47f2e98faf 100644
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
-index 3bcb61b52f6c..66d85482a96e 100644
+index dae1a45be504..c573b1a848b6 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(ktime_t now)
@@ -20973,7 +21703,7 @@ index 3bcb61b52f6c..66d85482a96e 100644
};
/*
-@@ -673,10 +679,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
+@@ -678,10 +684,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
/* Read jiffies and the time when jiffies were updated last */
do {
@@ -20985,8 +21715,8 @@ index 3bcb61b52f6c..66d85482a96e 100644
+ } while (read_seqcount_retry(&jiffies_seq, seq));
ts->last_jiffies = basejiff;
- if (rcu_needs_cpu(basemono, &next_rcu) ||
-@@ -877,14 +883,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
+ /*
+@@ -892,14 +898,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
return false;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
@@ -21002,7 +21732,7 @@ index 3bcb61b52f6c..66d85482a96e 100644
return false;
}
-@@ -1193,6 +1192,7 @@ void tick_setup_sched_timer(void)
+@@ -1208,6 +1207,7 @@ void tick_setup_sched_timer(void)
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@@ -21011,10 +21741,10 @@ index 3bcb61b52f6c..66d85482a96e 100644
/* Get the next period (per-CPU) */
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
-index 46e312e9be38..fa75cf5d9253 100644
+index d831827d7ab0..76d982c11ac3 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
-@@ -2328,8 +2328,10 @@ EXPORT_SYMBOL(hardpps);
+@@ -2348,8 +2348,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
@@ -21042,7 +21772,7 @@ index 704f595ce83f..763a3e5121ff 100644
#define CS_NAME_LEN 32
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
-index c611c47de884..08a5ab762495 100644
+index e872f7f05e8a..8e75e7442aaa 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -193,8 +193,11 @@ EXPORT_SYMBOL(jiffies_64);
@@ -21058,7 +21788,7 @@ index c611c47de884..08a5ab762495 100644
unsigned long clk;
unsigned long next_expiry;
unsigned int cpu;
-@@ -948,10 +951,10 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
+@@ -953,10 +956,10 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
if (!(tf & TIMER_MIGRATING)) {
base = get_timer_base(tf);
@@ -21071,7 +21801,7 @@ index c611c47de884..08a5ab762495 100644
}
cpu_relax();
}
-@@ -1023,9 +1026,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+@@ -1033,9 +1036,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
/* See the comment in lock_timer_base() */
timer->flags |= TIMER_MIGRATING;
@@ -21082,8 +21812,8 @@ index c611c47de884..08a5ab762495 100644
+ raw_spin_lock(&base->lock);
WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | base->cpu);
- }
-@@ -1050,7 +1053,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+ forward_timer_base(base);
+@@ -1060,7 +1063,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
}
out_unlock:
@@ -21092,7 +21822,7 @@ index c611c47de884..08a5ab762495 100644
return ret;
}
-@@ -1144,19 +1147,46 @@ void add_timer_on(struct timer_list *timer, int cpu)
+@@ -1154,9 +1157,9 @@ void add_timer_on(struct timer_list *timer, int cpu)
if (base != new_base) {
timer->flags |= TIMER_MIGRATING;
@@ -21104,6 +21834,7 @@ index c611c47de884..08a5ab762495 100644
WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | cpu);
}
+@@ -1164,10 +1167,37 @@ void add_timer_on(struct timer_list *timer, int cpu)
debug_activate(timer, timer->expires);
internal_add_timer(base, timer);
@@ -21142,7 +21873,7 @@ index c611c47de884..08a5ab762495 100644
/**
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
-@@ -1180,7 +1210,7 @@ int del_timer(struct timer_list *timer)
+@@ -1191,7 +1221,7 @@ int del_timer(struct timer_list *timer)
if (timer_pending(timer)) {
base = lock_timer_base(timer, &flags);
ret = detach_if_pending(timer, base, true);
@@ -21151,7 +21882,7 @@ index c611c47de884..08a5ab762495 100644
}
return ret;
-@@ -1208,13 +1238,13 @@ int try_to_del_timer_sync(struct timer_list *timer)
+@@ -1219,13 +1249,13 @@ int try_to_del_timer_sync(struct timer_list *timer)
timer_stats_timer_clear_start_info(timer);
ret = detach_if_pending(timer, base, true);
}
@@ -21167,7 +21898,7 @@ index c611c47de884..08a5ab762495 100644
/**
* del_timer_sync - deactivate a timer and wait for the handler to finish.
* @timer: the timer to be deactivated
-@@ -1274,7 +1304,7 @@ int del_timer_sync(struct timer_list *timer)
+@@ -1285,7 +1315,7 @@ int del_timer_sync(struct timer_list *timer)
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
return ret;
@@ -21176,7 +21907,7 @@ index c611c47de884..08a5ab762495 100644
}
}
EXPORT_SYMBOL(del_timer_sync);
-@@ -1339,14 +1369,17 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
+@@ -1350,14 +1380,17 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
fn = timer->function;
data = timer->data;
@@ -21199,7 +21930,7 @@ index c611c47de884..08a5ab762495 100644
}
}
}
-@@ -1515,7 +1548,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
+@@ -1526,7 +1559,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
if (cpu_is_offline(smp_processor_id()))
return expires;
@@ -21208,16 +21939,16 @@ index c611c47de884..08a5ab762495 100644
nextevt = __next_timer_interrupt(base);
is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
base->next_expiry = nextevt;
-@@ -1543,7 +1576,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
- if ((expires - basem) > TICK_NSEC)
+@@ -1560,7 +1593,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
base->is_idle = true;
+ }
}
- spin_unlock(&base->lock);
+ raw_spin_unlock(&base->lock);
return cmp_next_hrtimer_event(basem, expires);
}
-@@ -1608,13 +1641,13 @@ void update_process_times(int user_tick)
+@@ -1625,13 +1658,13 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
@@ -21233,7 +21964,7 @@ index c611c47de884..08a5ab762495 100644
run_posix_cpu_timers(p);
}
-@@ -1630,7 +1663,7 @@ static inline void __run_timers(struct timer_base *base)
+@@ -1647,7 +1680,7 @@ static inline void __run_timers(struct timer_base *base)
if (!time_after_eq(jiffies, base->clk))
return;
@@ -21242,7 +21973,7 @@ index c611c47de884..08a5ab762495 100644
while (time_after_eq(jiffies, base->clk)) {
-@@ -1640,8 +1673,8 @@ static inline void __run_timers(struct timer_base *base)
+@@ -1657,8 +1690,8 @@ static inline void __run_timers(struct timer_base *base)
while (levels--)
expire_timers(base, heads + levels);
}
@@ -21253,16 +21984,16 @@ index c611c47de884..08a5ab762495 100644
}
/*
-@@ -1651,6 +1684,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
- {
- struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+@@ -1681,6 +1714,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
+ */
+ base->must_forward_clk = false;
+ irq_work_tick_soft();
+
__run_timers(base);
- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
-@@ -1836,16 +1871,16 @@ int timers_dead_cpu(unsigned int cpu)
+@@ -1881,16 +1916,16 @@ int timers_dead_cpu(unsigned int cpu)
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
@@ -21283,7 +22014,7 @@ index c611c47de884..08a5ab762495 100644
put_cpu_ptr(&timer_bases);
}
return 0;
-@@ -1861,8 +1896,11 @@ static void __init init_timer_cpu(int cpu)
+@@ -1906,8 +1941,11 @@ static void __init init_timer_cpu(int cpu)
for (i = 0; i < NR_BASES; i++) {
base = per_cpu_ptr(&timer_bases[i], cpu);
base->cpu = cpu;
@@ -22625,7 +23356,7 @@ index 000000000000..7f6ee70dea41
+
+device_initcall(latency_hist_init);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 83c60f9013cb..6fb207964a84 100644
+index 15b02645ce8b..00d9ebcf42e2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1897,6 +1897,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
@@ -22639,7 +23370,7 @@ index 83c60f9013cb..6fb207964a84 100644
@@ -1907,8 +1908,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
- ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+ ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
+ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
+ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
@@ -22649,7 +23380,7 @@ index 83c60f9013cb..6fb207964a84 100644
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -2892,14 +2896,17 @@ get_total_entries(struct trace_buffer *buf,
+@@ -2898,14 +2902,17 @@ get_total_entries(struct trace_buffer *buf,
static void print_lat_help_header(struct seq_file *m)
{
@@ -22675,7 +23406,7 @@ index 83c60f9013cb..6fb207964a84 100644
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2925,11 +2932,14 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
+@@ -2931,11 +2938,14 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
print_event_info(buf, m);
seq_puts(m, "# _-----=> irqs-off\n"
"# / _----=> need-resched\n"
@@ -22884,19 +23615,30 @@ index b069ccbfb0b0..1a2e88e98b5e 100644
struct user_struct *alloc_uid(kuid_t uid)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
-index 6d1020c03d41..70c6a2f79f7e 100644
+index 63177be0159e..59fe007ad496 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
-@@ -315,6 +315,8 @@ static int is_softlockup(unsigned long touch_ts)
-
- #ifdef CONFIG_HARDLOCKUP_DETECTOR
+@@ -381,6 +381,7 @@ static void watchdog_enable(unsigned int cpu)
+ /* kick off the timer for the hardlockup detector */
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer->function = watchdog_timer_fn;
++ hrtimer->irqsafe = 1;
+ /* Enable the perf event */
+ watchdog_nmi_enable(cpu);
+diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
+index 12b8dd640786..4c90d2ee7433 100644
+--- a/kernel/watchdog_hld.c
++++ b/kernel/watchdog_hld.c
+@@ -19,6 +19,7 @@
+ static DEFINE_PER_CPU(bool, hard_watchdog_warn);
+ static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
+ static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
-+
- static struct perf_event_attr wd_hw_attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
-@@ -348,6 +350,13 @@ static void watchdog_overflow_callback(struct perf_event *event,
+
+ /* boot commands */
+ /*
+@@ -104,6 +105,13 @@ static void watchdog_overflow_callback(struct perf_event *event,
/* only print hardlockups once */
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
@@ -22910,7 +23652,7 @@ index 6d1020c03d41..70c6a2f79f7e 100644
pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
print_modules();
-@@ -365,6 +374,7 @@ static void watchdog_overflow_callback(struct perf_event *event,
+@@ -121,6 +129,7 @@ static void watchdog_overflow_callback(struct perf_event *event,
!test_and_set_bit(0, &hardlockup_allcpu_dumped))
trigger_allbutself_cpu_backtrace();
@@ -22918,16 +23660,8 @@ index 6d1020c03d41..70c6a2f79f7e 100644
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
-@@ -512,6 +522,7 @@ static void watchdog_enable(unsigned int cpu)
- /* kick off the timer for the hardlockup detector */
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer->function = watchdog_timer_fn;
-+ hrtimer->irqsafe = 1;
-
- /* Enable the perf event */
- watchdog_nmi_enable(cpu);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
-index 479d840db286..24eba6620a45 100644
+index 181c2ad0cb54..7eed129f114a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -48,6 +48,8 @@
@@ -22939,7 +23673,7 @@ index 479d840db286..24eba6620a45 100644
#include "workqueue_internal.h"
-@@ -121,11 +123,16 @@ enum {
+@@ -122,11 +124,16 @@ enum {
* cpu or grabbing pool->lock is enough for read access. If
* POOL_DISASSOCIATED is set, it's identical to L.
*
@@ -22957,7 +23691,7 @@ index 479d840db286..24eba6620a45 100644
*
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
*
-@@ -134,7 +141,7 @@ enum {
+@@ -135,7 +142,7 @@ enum {
*
* WQ: wq->mutex protected.
*
@@ -22984,7 +23718,7 @@ index 479d840db286..24eba6620a45 100644
* determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
-@@ -348,6 +355,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
+@@ -349,6 +356,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq);
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
@@ -22993,7 +23727,7 @@ index 479d840db286..24eba6620a45 100644
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
-@@ -355,20 +364,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+@@ -356,20 +365,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
@@ -23020,7 +23754,7 @@ index 479d840db286..24eba6620a45 100644
#define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
-@@ -380,7 +389,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+@@ -381,7 +390,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* @pool: iteration cursor
* @pi: integer used for iteration
*
@@ -23029,7 +23763,7 @@ index 479d840db286..24eba6620a45 100644
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
-@@ -412,7 +421,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+@@ -413,7 +422,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
* @pwq: iteration cursor
* @wq: the target workqueue
*
@@ -23038,7 +23772,7 @@ index 479d840db286..24eba6620a45 100644
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
-@@ -424,6 +433,31 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+@@ -425,6 +434,31 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
else
@@ -23070,7 +23804,7 @@ index 479d840db286..24eba6620a45 100644
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
-@@ -548,7 +582,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
+@@ -549,7 +583,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
* @wq: the target workqueue
* @node: the node ID
*
@@ -23079,7 +23813,7 @@ index 479d840db286..24eba6620a45 100644
* read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
-@@ -692,8 +726,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
+@@ -693,8 +727,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
@@ -23090,7 +23824,7 @@ index 479d840db286..24eba6620a45 100644
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
-@@ -830,50 +864,45 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
+@@ -831,50 +865,45 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
*/
static void wake_up_worker(struct worker_pool *pool)
{
@@ -23159,7 +23893,7 @@ index 479d840db286..24eba6620a45 100644
struct worker_pool *pool;
/*
-@@ -882,29 +911,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
+@@ -883,29 +912,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
@@ -23199,7 +23933,7 @@ index 479d840db286..24eba6620a45 100644
}
/**
-@@ -1098,12 +1124,14 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
+@@ -1099,12 +1125,14 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
{
if (pwq) {
/*
@@ -23217,7 +23951,7 @@ index 479d840db286..24eba6620a45 100644
}
}
-@@ -1207,7 +1235,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+@@ -1208,7 +1236,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
struct worker_pool *pool;
struct pool_workqueue *pwq;
@@ -23226,7 +23960,7 @@ index 479d840db286..24eba6620a45 100644
/* try to steal the timer if it exists */
if (is_dwork) {
-@@ -1226,6 +1254,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+@@ -1227,6 +1255,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
@@ -23234,7 +23968,7 @@ index 479d840db286..24eba6620a45 100644
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1264,14 +1293,16 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
+@@ -1265,14 +1294,16 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
@@ -23253,7 +23987,7 @@ index 479d840db286..24eba6620a45 100644
return -EAGAIN;
}
-@@ -1373,7 +1404,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1374,7 +1405,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
@@ -23262,7 +23996,7 @@ index 479d840db286..24eba6620a45 100644
debug_work_activate(work);
-@@ -1381,6 +1412,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1382,6 +1413,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
@@ -23270,7 +24004,7 @@ index 479d840db286..24eba6620a45 100644
retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-@@ -1437,10 +1469,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1438,10 +1470,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
@@ -23283,7 +24017,7 @@ index 479d840db286..24eba6620a45 100644
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
-@@ -1458,7 +1488,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
+@@ -1459,7 +1489,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
insert_work(pwq, work, worklist, work_flags);
@@ -23293,7 +24027,7 @@ index 479d840db286..24eba6620a45 100644
}
/**
-@@ -1478,14 +1510,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
+@@ -1479,14 +1511,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
bool ret = false;
unsigned long flags;
@@ -23310,7 +24044,7 @@ index 479d840db286..24eba6620a45 100644
return ret;
}
EXPORT_SYMBOL(queue_work_on);
-@@ -1552,14 +1584,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+@@ -1554,14 +1586,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
unsigned long flags;
/* read the comment in __queue_work() */
@@ -23327,7 +24061,7 @@ index 479d840db286..24eba6620a45 100644
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1594,7 +1626,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
+@@ -1596,7 +1628,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
@@ -23336,7 +24070,7 @@ index 479d840db286..24eba6620a45 100644
}
/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -1627,7 +1659,9 @@ static void worker_enter_idle(struct worker *worker)
+@@ -1629,7 +1661,9 @@ static void worker_enter_idle(struct worker *worker)
worker->last_active = jiffies;
/* idle_list is LIFO */
@@ -23346,7 +24080,7 @@ index 479d840db286..24eba6620a45 100644
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1660,7 +1694,9 @@ static void worker_leave_idle(struct worker *worker)
+@@ -1662,7 +1696,9 @@ static void worker_leave_idle(struct worker *worker)
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
@@ -23356,7 +24090,7 @@ index 479d840db286..24eba6620a45 100644
}
static struct worker *alloc_worker(int node)
-@@ -1826,7 +1862,9 @@ static void destroy_worker(struct worker *worker)
+@@ -1828,7 +1864,9 @@ static void destroy_worker(struct worker *worker)
pool->nr_workers--;
pool->nr_idle--;
@@ -23366,7 +24100,7 @@ index 479d840db286..24eba6620a45 100644
worker->flags |= WORKER_DIE;
wake_up_process(worker->task);
}
-@@ -2785,14 +2823,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
+@@ -2780,14 +2818,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
might_sleep();
@@ -23384,7 +24118,7 @@ index 479d840db286..24eba6620a45 100644
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
-@@ -2821,10 +2859,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
+@@ -2816,10 +2854,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
else
lock_map_acquire_read(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
@@ -23397,7 +24131,7 @@ index 479d840db286..24eba6620a45 100644
return false;
}
-@@ -2911,7 +2950,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+@@ -2906,7 +2945,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
@@ -23406,7 +24140,7 @@ index 479d840db286..24eba6620a45 100644
flush_work(work);
clear_work_data(work);
-@@ -2966,10 +3005,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+@@ -2961,10 +3000,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
@@ -23419,7 +24153,7 @@ index 479d840db286..24eba6620a45 100644
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
-@@ -2987,7 +3026,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
+@@ -2982,7 +3021,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
return false;
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
@@ -23428,7 +24162,7 @@ index 479d840db286..24eba6620a45 100644
return ret;
}
-@@ -3245,7 +3284,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
+@@ -3239,7 +3278,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
@@ -23437,7 +24171,7 @@ index 479d840db286..24eba6620a45 100644
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
-@@ -3299,8 +3338,8 @@ static void put_unbound_pool(struct worker_pool *pool)
+@@ -3293,8 +3332,8 @@ static void put_unbound_pool(struct worker_pool *pool)
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
@@ -23448,7 +24182,7 @@ index 479d840db286..24eba6620a45 100644
}
/**
-@@ -3407,14 +3446,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
+@@ -3401,14 +3440,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
@@ -23465,7 +24199,7 @@ index 479d840db286..24eba6620a45 100644
}
/**
-@@ -4064,7 +4103,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
+@@ -4072,7 +4111,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
@@ -23474,7 +24208,7 @@ index 479d840db286..24eba6620a45 100644
} else {
/*
* We're the sole accessor of @wq at this point. Directly
-@@ -4157,7 +4196,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
+@@ -4166,7 +4205,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
struct pool_workqueue *pwq;
bool ret;
@@ -23484,7 +24218,7 @@ index 479d840db286..24eba6620a45 100644
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
-@@ -4168,7 +4208,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
+@@ -4177,7 +4217,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
@@ -23494,7 +24228,7 @@ index 479d840db286..24eba6620a45 100644
return ret;
}
-@@ -4194,15 +4235,15 @@ unsigned int work_busy(struct work_struct *work)
+@@ -4203,15 +4244,15 @@ unsigned int work_busy(struct work_struct *work)
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
@@ -23514,7 +24248,7 @@ index 479d840db286..24eba6620a45 100644
return ret;
}
-@@ -4391,7 +4432,7 @@ void show_workqueue_state(void)
+@@ -4400,7 +4441,7 @@ void show_workqueue_state(void)
unsigned long flags;
int pi;
@@ -23523,7 +24257,7 @@ index 479d840db286..24eba6620a45 100644
pr_info("Showing busy workqueues and worker pools:\n");
-@@ -4444,7 +4485,7 @@ void show_workqueue_state(void)
+@@ -4453,7 +4494,7 @@ void show_workqueue_state(void)
spin_unlock_irqrestore(&pool->lock, flags);
}
@@ -23532,7 +24266,7 @@ index 479d840db286..24eba6620a45 100644
}
/*
-@@ -4782,16 +4823,16 @@ bool freeze_workqueues_busy(void)
+@@ -4791,16 +4832,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
@@ -23552,7 +24286,7 @@ index 479d840db286..24eba6620a45 100644
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
-@@ -4981,7 +5022,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
+@@ -4990,7 +5031,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
const char *delim = "";
int node, written = 0;
@@ -23562,7 +24296,7 @@ index 479d840db286..24eba6620a45 100644
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
-@@ -4989,7 +5031,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
+@@ -4998,7 +5040,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
@@ -23573,10 +24307,10 @@ index 479d840db286..24eba6620a45 100644
return written;
}
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
-index 8635417c587b..f000c4d6917e 100644
+index 29fa81f0f51a..42d1e3974554 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
-@@ -43,6 +43,7 @@ struct worker {
+@@ -44,6 +44,7 @@ struct worker {
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
@@ -23584,7 +24318,7 @@ index 8635417c587b..f000c4d6917e 100644
/*
* Opaque string set with work_set_desc(). Printed out with task
-@@ -68,7 +69,7 @@ static inline struct worker *current_wq_worker(void)
+@@ -69,7 +70,7 @@ static inline struct worker *current_wq_worker(void)
* Scheduler hooks for concurrency managed workqueue. Only to be used from
* sched/core.c and workqueue.c.
*/
@@ -24248,7 +24982,7 @@ index 50b4ca6787f0..77518a3b35a1 100644
unsigned int nr_free_highpages (void)
{
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index 47559cc0cdcc..1f2ebc924916 100644
+index 2a800c4a39bd..c04403033aec 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -67,6 +67,7 @@
@@ -24268,67 +25002,7 @@ index 47559cc0cdcc..1f2ebc924916 100644
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -1692,6 +1695,7 @@ struct memcg_stock_pcp {
- #define FLUSHING_CACHED_CHARGE 0
- };
- static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
-+static DEFINE_LOCAL_IRQ_LOCK(memcg_stock_ll);
- static DEFINE_MUTEX(percpu_charge_mutex);
-
- /**
-@@ -1714,7 +1718,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
- if (nr_pages > CHARGE_BATCH)
- return ret;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(memcg_stock_ll, flags);
-
- stock = this_cpu_ptr(&memcg_stock);
- if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
-@@ -1722,7 +1726,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
- ret = true;
- }
-
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(memcg_stock_ll, flags);
-
- return ret;
- }
-@@ -1749,13 +1753,13 @@ static void drain_local_stock(struct work_struct *dummy)
- struct memcg_stock_pcp *stock;
- unsigned long flags;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(memcg_stock_ll, flags);
-
- stock = this_cpu_ptr(&memcg_stock);
- drain_stock(stock);
- clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
-
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(memcg_stock_ll, flags);
- }
-
- /*
-@@ -1767,7 +1771,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
- struct memcg_stock_pcp *stock;
- unsigned long flags;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(memcg_stock_ll, flags);
-
- stock = this_cpu_ptr(&memcg_stock);
- if (stock->cached != memcg) { /* reset if necessary */
-@@ -1776,7 +1780,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
- }
- stock->nr_pages += nr_pages;
-
-- local_irq_restore(flags);
-+ local_unlock_irqrestore(memcg_stock_ll, flags);
- }
-
- /*
-@@ -1792,7 +1796,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
+@@ -1795,7 +1798,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
return;
/* Notify other cpus that system-wide "drain" is running */
get_online_cpus();
@@ -24337,7 +25011,7 @@ index 47559cc0cdcc..1f2ebc924916 100644
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -1809,7 +1813,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
+@@ -1812,7 +1815,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
schedule_work_on(cpu, &stock->work);
}
}
@@ -24346,7 +25020,7 @@ index 47559cc0cdcc..1f2ebc924916 100644
put_online_cpus();
mutex_unlock(&percpu_charge_mutex);
}
-@@ -4555,12 +4559,12 @@ static int mem_cgroup_move_account(struct page *page,
+@@ -4558,12 +4561,12 @@ static int mem_cgroup_move_account(struct page *page,
ret = 0;
@@ -24361,7 +25035,7 @@ index 47559cc0cdcc..1f2ebc924916 100644
out_unlock:
unlock_page(page);
out:
-@@ -5435,10 +5439,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
+@@ -5438,10 +5441,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
commit_charge(page, memcg, lrucare);
@@ -24374,7 +25048,7 @@ index 47559cc0cdcc..1f2ebc924916 100644
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5494,14 +5498,14 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
+@@ -5497,14 +5500,14 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
memcg_oom_recover(memcg);
}
@@ -24391,7 +25065,7 @@ index 47559cc0cdcc..1f2ebc924916 100644
if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_pages);
-@@ -5656,10 +5660,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
+@@ -5659,10 +5662,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
commit_charge(newpage, memcg, false);
@@ -24404,7 +25078,7 @@ index 47559cc0cdcc..1f2ebc924916 100644
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -5850,6 +5854,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+@@ -5853,6 +5856,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
{
struct mem_cgroup *memcg, *swap_memcg;
unsigned short oldid;
@@ -24412,7 +25086,7 @@ index 47559cc0cdcc..1f2ebc924916 100644
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5890,12 +5895,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+@@ -5893,12 +5897,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
@@ -24450,7 +25124,7 @@ index 6f4d27c5bb32..5cd25c745a8f 100644
#ifdef finish_arch_post_lock_switch
finish_arch_post_lock_switch();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 5b06fb385dd7..86457a2fca20 100644
+index fbc38888252b..1cb08e1406ea 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,6 +61,7 @@
@@ -24480,7 +25154,7 @@ index 5b06fb385dd7..86457a2fca20 100644
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1072,7 +1085,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
+@@ -1092,7 +1105,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
#endif /* CONFIG_DEBUG_VM */
/*
@@ -24489,7 +25163,7 @@ index 5b06fb385dd7..86457a2fca20 100644
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -1083,19 +1096,58 @@ static bool bulkfree_pcp_prepare(struct page *page)
+@@ -1103,19 +1116,58 @@ static bool bulkfree_pcp_prepare(struct page *page)
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -24552,7 +25226,7 @@ index 5b06fb385dd7..86457a2fca20 100644
while (count) {
struct page *page;
struct list_head *list;
-@@ -1111,7 +1163,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+@@ -1131,7 +1183,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
@@ -24561,7 +25235,7 @@ index 5b06fb385dd7..86457a2fca20 100644
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
-@@ -1119,27 +1171,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
+@@ -1139,27 +1191,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
batch_free = count;
do {
@@ -24590,7 +25264,7 @@ index 5b06fb385dd7..86457a2fca20 100644
}
static void free_one_page(struct zone *zone,
-@@ -1148,7 +1185,9 @@ static void free_one_page(struct zone *zone,
+@@ -1168,7 +1205,9 @@ static void free_one_page(struct zone *zone,
int migratetype)
{
unsigned long nr_scanned;
@@ -24601,7 +25275,7 @@ index 5b06fb385dd7..86457a2fca20 100644
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
-@@ -1158,7 +1197,7 @@ static void free_one_page(struct zone *zone,
+@@ -1178,7 +1217,7 @@ static void free_one_page(struct zone *zone,
migratetype = get_pfnblock_migratetype(page, pfn);
}
__free_one_page(page, pfn, zone, order, migratetype);
@@ -24610,7 +25284,7 @@ index 5b06fb385dd7..86457a2fca20 100644
}
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
-@@ -1244,10 +1283,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
+@@ -1264,10 +1303,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -24623,7 +25297,7 @@ index 5b06fb385dd7..86457a2fca20 100644
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
-@@ -2246,16 +2285,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
+@@ -2282,16 +2321,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
@@ -24645,7 +25319,7 @@ index 5b06fb385dd7..86457a2fca20 100644
}
#endif
-@@ -2271,16 +2312,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
+@@ -2307,16 +2348,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -24671,7 +25345,7 @@ index 5b06fb385dd7..86457a2fca20 100644
}
/*
-@@ -2366,8 +2412,17 @@ void drain_all_pages(struct zone *zone)
+@@ -2402,8 +2448,17 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -24689,7 +25363,7 @@ index 5b06fb385dd7..86457a2fca20 100644
}
#ifdef CONFIG_HIBERNATION
-@@ -2427,7 +2482,7 @@ void free_hot_cold_page(struct page *page, bool cold)
+@@ -2463,7 +2518,7 @@ void free_hot_cold_page(struct page *page, bool cold)
migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
@@ -24698,7 +25372,7 @@ index 5b06fb385dd7..86457a2fca20 100644
__count_vm_event(PGFREE);
/*
-@@ -2453,12 +2508,17 @@ void free_hot_cold_page(struct page *page, bool cold)
+@@ -2489,12 +2544,17 @@ void free_hot_cold_page(struct page *page, bool cold)
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@@ -24718,7 +25392,7 @@ index 5b06fb385dd7..86457a2fca20 100644
}
/*
-@@ -2600,7 +2660,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+@@ -2629,7 +2689,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
struct per_cpu_pages *pcp;
struct list_head *list;
@@ -24727,7 +25401,7 @@ index 5b06fb385dd7..86457a2fca20 100644
do {
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
-@@ -2627,7 +2687,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+@@ -2656,7 +2716,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -24736,7 +25410,7 @@ index 5b06fb385dd7..86457a2fca20 100644
do {
page = NULL;
-@@ -2639,22 +2699,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
+@@ -2668,22 +2728,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
if (!page)
page = __rmqueue(zone, order, migratetype);
} while (page && check_new_pages(page, order));
@@ -24765,7 +25439,7 @@ index 5b06fb385dd7..86457a2fca20 100644
return NULL;
}
-@@ -6532,7 +6594,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
+@@ -6561,7 +6623,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
int cpu = (unsigned long)hcpu;
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
@@ -24775,7 +25449,7 @@ index 5b06fb385dd7..86457a2fca20 100644
drain_pages(cpu);
/*
-@@ -6558,6 +6622,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
+@@ -6587,6 +6651,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -24783,7 +25457,7 @@ index 5b06fb385dd7..86457a2fca20 100644
}
/*
-@@ -7386,7 +7451,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7422,7 +7487,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -24792,7 +25466,7 @@ index 5b06fb385dd7..86457a2fca20 100644
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7395,7 +7460,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7431,7 +7496,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
@@ -24805,60 +25479,67 @@ diff --git a/mm/percpu.c b/mm/percpu.c
index f014cebbf405..4e739fcf91bf 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
-@@ -1283,6 +1283,31 @@ void free_percpu(void __percpu *ptr)
+@@ -1283,18 +1283,7 @@ void free_percpu(void __percpu *ptr)
}
EXPORT_SYMBOL_GPL(free_percpu);
+-/**
+- * is_kernel_percpu_address - test whether address is from static percpu area
+- * @addr: address to test
+- *
+- * Test whether @addr belongs to in-kernel static percpu area. Module
+- * static percpu areas are not considered. For those, use
+- * is_module_percpu_address().
+- *
+- * RETURNS:
+- * %true if @addr is from in-kernel static percpu area, %false otherwise.
+- */
+-bool is_kernel_percpu_address(unsigned long addr)
+bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
-+{
-+#ifdef CONFIG_SMP
-+ const size_t static_size = __per_cpu_end - __per_cpu_start;
-+ void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
-+ unsigned int cpu;
-+
-+ for_each_possible_cpu(cpu) {
-+ void *start = per_cpu_ptr(base, cpu);
+ {
+ #ifdef CONFIG_SMP
+ const size_t static_size = __per_cpu_end - __per_cpu_start;
+@@ -1303,15 +1292,38 @@ bool is_kernel_percpu_address(unsigned long addr)
+
+ for_each_possible_cpu(cpu) {
+ void *start = per_cpu_ptr(base, cpu);
+ void *va = (void *)addr;
-+
+
+- if ((void *)addr >= start && (void *)addr < start + static_size)
+ if (va >= start && va < start + static_size) {
+ if (can_addr) {
+ *can_addr = (unsigned long) (va - start);
+ *can_addr += (unsigned long)
+ per_cpu_ptr(base, get_boot_cpu_id());
+ }
-+ return true;
+ return true;
+- }
+ }
+ }
-+#endif
-+ /* on UP, can't distinguish from other static vars, always false */
-+ return false;
-+}
-+
- /**
- * is_kernel_percpu_address - test whether address is from static percpu area
- * @addr: address to test
-@@ -1296,20 +1321,7 @@ EXPORT_SYMBOL_GPL(free_percpu);
- */
- bool is_kernel_percpu_address(unsigned long addr)
- {
--#ifdef CONFIG_SMP
-- const size_t static_size = __per_cpu_end - __per_cpu_start;
-- void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
-- unsigned int cpu;
--
-- for_each_possible_cpu(cpu) {
-- void *start = per_cpu_ptr(base, cpu);
--
-- if ((void *)addr >= start && (void *)addr < start + static_size)
-- return true;
-- }
--#endif
-- /* on UP, can't distinguish from other static vars, always false */
-- return false;
-+ return __is_kernel_percpu_address(addr, NULL);
+ #endif
+ /* on UP, can't distinguish from other static vars, always false */
+ return false;
}
++/**
++ * is_kernel_percpu_address - test whether address is from static percpu area
++ * @addr: address to test
++ *
++ * Test whether @addr belongs to in-kernel static percpu area. Module
++ * static percpu areas are not considered. For those, use
++ * is_module_percpu_address().
++ *
++ * RETURNS:
++ * %true if @addr is from in-kernel static percpu area, %false otherwise.
++ */
++bool is_kernel_percpu_address(unsigned long addr)
++{
++ return __is_kernel_percpu_address(addr, NULL);
++}
++
/**
+ * per_cpu_ptr_to_phys - convert translated percpu address to physical address
+ * @addr: the address to be converted to physical address
diff --git a/mm/slab.h b/mm/slab.h
index ceb7d70cdb76..dfd281e43fbe 100644
--- a/mm/slab.h
@@ -24876,10 +25557,10 @@ index ceb7d70cdb76..dfd281e43fbe 100644
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
diff --git a/mm/slub.c b/mm/slub.c
-index 58c7526f8de2..6d72b7f87129 100644
+index edc79ca3c6d5..67eb368b9314 100644
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1141,7 +1141,7 @@ static noinline int free_debug_processing(
+@@ -1144,7 +1144,7 @@ static noinline int free_debug_processing(
unsigned long uninitialized_var(flags);
int ret = 0;
@@ -24888,7 +25569,7 @@ index 58c7526f8de2..6d72b7f87129 100644
slab_lock(page);
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-@@ -1176,7 +1176,7 @@ static noinline int free_debug_processing(
+@@ -1179,7 +1179,7 @@ static noinline int free_debug_processing(
bulk_cnt, cnt);
slab_unlock(page);
@@ -24897,7 +25578,7 @@ index 58c7526f8de2..6d72b7f87129 100644
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
return ret;
-@@ -1304,6 +1304,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
+@@ -1307,6 +1307,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
#endif /* CONFIG_SLUB_DEBUG */
@@ -24910,7 +25591,7 @@ index 58c7526f8de2..6d72b7f87129 100644
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
-@@ -1527,10 +1533,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+@@ -1530,10 +1536,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
void *start, *p;
int idx, order;
bool shuffle;
@@ -24928,7 +25609,7 @@ index 58c7526f8de2..6d72b7f87129 100644
local_irq_enable();
flags |= s->allocflags;
-@@ -1605,7 +1618,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+@@ -1608,7 +1621,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page->frozen = 1;
out:
@@ -24937,7 +25618,7 @@ index 58c7526f8de2..6d72b7f87129 100644
local_irq_disable();
if (!page)
return NULL;
-@@ -1664,6 +1677,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
+@@ -1667,6 +1680,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__free_pages(page, order);
}
@@ -24954,7 +25635,7 @@ index 58c7526f8de2..6d72b7f87129 100644
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-@@ -1695,6 +1718,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
+@@ -1698,6 +1721,12 @@ static void free_slab(struct kmem_cache *s, struct page *page)
}
call_rcu(head, rcu_free_slab);
@@ -24967,7 +25648,7 @@ index 58c7526f8de2..6d72b7f87129 100644
} else
__free_slab(s, page);
}
-@@ -1802,7 +1831,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+@@ -1805,7 +1834,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
if (!n || !n->nr_partial)
return NULL;
@@ -24976,7 +25657,7 @@ index 58c7526f8de2..6d72b7f87129 100644
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
-@@ -1827,7 +1856,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+@@ -1830,7 +1859,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
break;
}
@@ -24985,7 +25666,7 @@ index 58c7526f8de2..6d72b7f87129 100644
return object;
}
-@@ -2073,7 +2102,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+@@ -2076,7 +2105,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -24994,7 +25675,7 @@ index 58c7526f8de2..6d72b7f87129 100644
}
} else {
m = M_FULL;
-@@ -2084,7 +2113,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+@@ -2087,7 +2116,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
@@ -25003,7 +25684,7 @@ index 58c7526f8de2..6d72b7f87129 100644
}
}
-@@ -2119,7 +2148,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+@@ -2122,7 +2151,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
goto redo;
if (lock)
@@ -25012,7 +25693,7 @@ index 58c7526f8de2..6d72b7f87129 100644
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
-@@ -2151,10 +2180,10 @@ static void unfreeze_partials(struct kmem_cache *s,
+@@ -2154,10 +2183,10 @@ static void unfreeze_partials(struct kmem_cache *s,
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
@@ -25025,7 +25706,7 @@ index 58c7526f8de2..6d72b7f87129 100644
}
do {
-@@ -2183,7 +2212,7 @@ static void unfreeze_partials(struct kmem_cache *s,
+@@ -2186,7 +2215,7 @@ static void unfreeze_partials(struct kmem_cache *s,
}
if (n)
@@ -25034,7 +25715,7 @@ index 58c7526f8de2..6d72b7f87129 100644
while (discard_page) {
page = discard_page;
-@@ -2222,14 +2251,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
+@@ -2225,14 +2254,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
@@ -25056,7 +25737,7 @@ index 58c7526f8de2..6d72b7f87129 100644
oldpage = NULL;
pobjects = 0;
pages = 0;
-@@ -2301,7 +2337,22 @@ static bool has_cpu_slab(int cpu, void *info)
+@@ -2304,7 +2340,22 @@ static bool has_cpu_slab(int cpu, void *info)
static void flush_all(struct kmem_cache *s)
{
@@ -25079,7 +25760,7 @@ index 58c7526f8de2..6d72b7f87129 100644
}
/*
-@@ -2356,10 +2407,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
+@@ -2359,10 +2410,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
unsigned long x = 0;
struct page *page;
@@ -25092,7 +25773,7 @@ index 58c7526f8de2..6d72b7f87129 100644
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2497,8 +2548,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
+@@ -2500,8 +2551,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
* already disabled (which is the case for bulk allocation).
*/
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -25104,7 +25785,7 @@ index 58c7526f8de2..6d72b7f87129 100644
void *freelist;
struct page *page;
-@@ -2558,6 +2611,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2561,6 +2614,13 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
@@ -25118,7 +25799,7 @@ index 58c7526f8de2..6d72b7f87129 100644
return freelist;
new_slab:
-@@ -2589,7 +2649,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2592,7 +2652,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
@@ -25127,7 +25808,7 @@ index 58c7526f8de2..6d72b7f87129 100644
}
/*
-@@ -2601,6 +2661,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2604,6 +2664,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
{
void *p;
unsigned long flags;
@@ -25135,7 +25816,7 @@ index 58c7526f8de2..6d72b7f87129 100644
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
-@@ -2612,8 +2673,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+@@ -2615,8 +2676,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
c = this_cpu_ptr(s->cpu_slab);
#endif
@@ -25146,7 +25827,7 @@ index 58c7526f8de2..6d72b7f87129 100644
return p;
}
-@@ -2799,7 +2861,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+@@ -2802,7 +2864,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
do {
if (unlikely(n)) {
@@ -25155,7 +25836,7 @@ index 58c7526f8de2..6d72b7f87129 100644
n = NULL;
}
prior = page->freelist;
-@@ -2831,7 +2893,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+@@ -2834,7 +2896,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
@@ -25164,7 +25845,7 @@ index 58c7526f8de2..6d72b7f87129 100644
}
}
-@@ -2873,7 +2935,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+@@ -2876,7 +2938,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -25173,7 +25854,7 @@ index 58c7526f8de2..6d72b7f87129 100644
return;
slab_empty:
-@@ -2888,7 +2950,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+@@ -2891,7 +2953,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
remove_full(s, n, page);
}
@@ -25182,7 +25863,7 @@ index 58c7526f8de2..6d72b7f87129 100644
stat(s, FREE_SLAB);
discard_slab(s, page);
}
-@@ -3093,6 +3155,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+@@ -3096,6 +3158,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void **p)
{
struct kmem_cache_cpu *c;
@@ -25190,7 +25871,7 @@ index 58c7526f8de2..6d72b7f87129 100644
int i;
/* memcg and kmem_cache debug support */
-@@ -3116,7 +3179,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+@@ -3119,7 +3182,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
* of re-populating per CPU c->freelist
*/
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@@ -25199,7 +25880,7 @@ index 58c7526f8de2..6d72b7f87129 100644
if (unlikely(!p[i]))
goto error;
-@@ -3128,6 +3191,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+@@ -3131,6 +3194,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
}
c->tid = next_tid(c->tid);
local_irq_enable();
@@ -25207,7 +25888,7 @@ index 58c7526f8de2..6d72b7f87129 100644
/* Clear memory outside IRQ disabled fastpath loop */
if (unlikely(flags & __GFP_ZERO)) {
-@@ -3275,7 +3339,7 @@ static void
+@@ -3278,7 +3342,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
@@ -25216,7 +25897,7 @@ index 58c7526f8de2..6d72b7f87129 100644
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
-@@ -3619,6 +3683,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
+@@ -3622,6 +3686,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
const char *text)
{
#ifdef CONFIG_SLUB_DEBUG
@@ -25227,7 +25908,7 @@ index 58c7526f8de2..6d72b7f87129 100644
void *addr = page_address(page);
void *p;
unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
-@@ -3639,6 +3707,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
+@@ -3642,6 +3710,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
slab_unlock(page);
kfree(map);
#endif
@@ -25235,7 +25916,7 @@ index 58c7526f8de2..6d72b7f87129 100644
}
/*
-@@ -3652,7 +3721,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
+@@ -3655,7 +3724,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
struct page *page, *h;
BUG_ON(irqs_disabled());
@@ -25244,7 +25925,7 @@ index 58c7526f8de2..6d72b7f87129 100644
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
remove_partial(n, page);
-@@ -3662,7 +3731,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
+@@ -3665,7 +3734,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
@@ -25253,7 +25934,7 @@ index 58c7526f8de2..6d72b7f87129 100644
list_for_each_entry_safe(page, h, &discard, lru)
discard_slab(s, page);
-@@ -3905,7 +3974,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
+@@ -3908,7 +3977,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
@@ -25262,7 +25943,7 @@ index 58c7526f8de2..6d72b7f87129 100644
/*
* Build lists of slabs to discard or promote.
-@@ -3936,7 +4005,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
+@@ -3939,7 +4008,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
@@ -25271,7 +25952,7 @@ index 58c7526f8de2..6d72b7f87129 100644
/* Release empty slabs */
list_for_each_entry_safe(page, t, &discard, lru)
-@@ -4112,6 +4181,12 @@ void __init kmem_cache_init(void)
+@@ -4115,6 +4184,12 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
@@ -25284,7 +25965,7 @@ index 58c7526f8de2..6d72b7f87129 100644
if (debug_guardpage_minorder())
slub_max_order = 0;
-@@ -4320,7 +4395,7 @@ static int validate_slab_node(struct kmem_cache *s,
+@@ -4323,7 +4398,7 @@ static int validate_slab_node(struct kmem_cache *s,
struct page *page;
unsigned long flags;
@@ -25293,7 +25974,7 @@ index 58c7526f8de2..6d72b7f87129 100644
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
-@@ -4342,7 +4417,7 @@ static int validate_slab_node(struct kmem_cache *s,
+@@ -4345,7 +4420,7 @@ static int validate_slab_node(struct kmem_cache *s,
s->name, count, atomic_long_read(&n->nr_slabs));
out:
@@ -25302,7 +25983,7 @@ index 58c7526f8de2..6d72b7f87129 100644
return count;
}
-@@ -4530,12 +4605,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
+@@ -4533,12 +4608,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
if (!atomic_long_read(&n->nr_slabs))
continue;
@@ -25523,7 +26204,7 @@ index 4dcf852e1e6d..69c3a5b24060 100644
put_online_cpus();
mutex_unlock(&lock);
diff --git a/mm/truncate.c b/mm/truncate.c
-index 8d8c62d89e6d..5bf1bd25d077 100644
+index 9c809e7d73c3..b7681e888ba0 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -62,9 +62,12 @@ static void clear_exceptional_entry(struct address_space *mapping,
@@ -25542,10 +26223,10 @@ index 8d8c62d89e6d..5bf1bd25d077 100644
unlock:
spin_unlock_irq(&mapping->tree_lock);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index f2481cb4e6b2..db4de08fa97c 100644
+index 195de42bea1f..b46cb686fde7 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -845,7 +845,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
+@@ -855,7 +855,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -25554,7 +26235,7 @@ index f2481cb4e6b2..db4de08fa97c 100644
void *vaddr;
node = numa_node_id();
-@@ -888,11 +888,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
+@@ -898,11 +898,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
BUG_ON(err);
radix_tree_preload_end();
@@ -25569,7 +26250,7 @@ index f2481cb4e6b2..db4de08fa97c 100644
return vaddr;
}
-@@ -961,6 +962,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+@@ -971,6 +972,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
@@ -25577,7 +26258,7 @@ index f2481cb4e6b2..db4de08fa97c 100644
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -975,7 +977,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+@@ -985,7 +987,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
order = get_order(size);
rcu_read_lock();
@@ -25587,7 +26268,7 @@ index f2481cb4e6b2..db4de08fa97c 100644
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -998,7 +1001,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+@@ -1008,7 +1011,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
break;
}
@@ -25597,7 +26278,7 @@ index f2481cb4e6b2..db4de08fa97c 100644
/* Allocate new block if nothing was found */
diff --git a/mm/vmstat.c b/mm/vmstat.c
-index 604f26a4f696..312006d2db50 100644
+index 6a088df04b29..abda95be88b4 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -245,6 +245,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
@@ -25768,7 +26449,7 @@ index 4c4f05655e6e..b97b1e87b54c 100644
return ret;
}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
-index 1689bb58e0d1..e52a8cb6aa5a 100644
+index d3548c48369f..8894f0749d8d 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -53,6 +53,7 @@
@@ -25948,8 +26629,55 @@ index 1689bb58e0d1..e52a8cb6aa5a 100644
migrate_read_unlock(zspage);
unpin_tag(handle);
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index c88a6007e643..5de85b55a821 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -251,15 +251,13 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
+ }
+
+ /* Send frame to sockets with specific channel */
+-void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
+- int flag, struct sock *skip_sk)
++static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
++ int flag, struct sock *skip_sk)
+ {
+ struct sock *sk;
+
+ BT_DBG("channel %u len %d", channel, skb->len);
+
+- read_lock(&hci_sk_list.lock);
+-
+ sk_for_each(sk, &hci_sk_list.head) {
+ struct sk_buff *nskb;
+
+@@ -285,6 +283,13 @@ void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
+ kfree_skb(nskb);
+ }
+
++}
++
++void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
++ int flag, struct sock *skip_sk)
++{
++ read_lock(&hci_sk_list.lock);
++ __hci_send_to_channel(channel, skb, flag, skip_sk);
+ read_unlock(&hci_sk_list.lock);
+ }
+
+@@ -388,8 +393,8 @@ void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
+ hdr->index = index;
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+- hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+- HCI_SOCK_TRUSTED, NULL);
++ __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
++ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+
diff --git a/net/core/dev.c b/net/core/dev.c
-index 2e04fd188081..3ba60ef8c79e 100644
+index 09007a71c8dd..6cb279747408 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPUS;
@@ -26052,7 +26780,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
}
/**
-@@ -2285,6 +2291,7 @@ static void __netif_reschedule(struct Qdisc *q)
+@@ -2287,6 +2293,7 @@ static void __netif_reschedule(struct Qdisc *q)
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -26060,7 +26788,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
}
void __netif_schedule(struct Qdisc *q)
-@@ -2366,6 +2373,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+@@ -2371,6 +2378,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -26068,7 +26796,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -3100,7 +3108,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
+@@ -3112,7 +3120,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
@@ -26080,7 +26808,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
if (unlikely(contended))
spin_lock(&q->busylock);
-@@ -3163,8 +3175,10 @@ static void skb_update_prio(struct sk_buff *skb)
+@@ -3175,8 +3187,10 @@ static void skb_update_prio(struct sk_buff *skb)
#define skb_update_prio(skb)
#endif
@@ -26091,7 +26819,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
/**
* dev_loopback_xmit - loop back @skb
-@@ -3398,8 +3412,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+@@ -3410,8 +3424,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
@@ -26101,7 +26829,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
goto recursion_alert;
skb = validate_xmit_skb(skb, dev);
-@@ -3409,9 +3422,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+@@ -3421,9 +3434,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
@@ -26113,7 +26841,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
-@@ -3785,6 +3798,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+@@ -3797,6 +3810,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
rps_unlock(sd);
local_irq_restore(flags);
@@ -26121,7 +26849,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -3803,7 +3817,7 @@ static int netif_rx_internal(struct sk_buff *skb)
+@@ -3815,7 +3829,7 @@ static int netif_rx_internal(struct sk_buff *skb)
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -26130,7 +26858,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3813,13 +3827,13 @@ static int netif_rx_internal(struct sk_buff *skb)
+@@ -3825,13 +3839,13 @@ static int netif_rx_internal(struct sk_buff *skb)
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
@@ -26147,7 +26875,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
}
return ret;
}
-@@ -3853,11 +3867,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3865,11 +3879,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
@@ -26161,7 +26889,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
return err;
}
-@@ -4336,7 +4348,7 @@ static void flush_backlog(struct work_struct *work)
+@@ -4348,7 +4360,7 @@ static void flush_backlog(struct work_struct *work)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -26170,7 +26898,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
input_queue_head_incr(sd);
}
}
-@@ -4346,11 +4358,14 @@ static void flush_backlog(struct work_struct *work)
+@@ -4358,11 +4370,14 @@ static void flush_backlog(struct work_struct *work)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
@@ -26186,7 +26914,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
}
static void flush_all_backlogs(void)
-@@ -4831,6 +4846,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+@@ -4853,6 +4868,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -26194,7 +26922,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
/* Send pending IPI's to kick RPS processing on remote cpus. */
while (remsd) {
-@@ -4844,6 +4860,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+@@ -4866,6 +4882,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
} else
#endif
local_irq_enable();
@@ -26202,7 +26930,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4873,7 +4890,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
+@@ -4895,7 +4912,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
while (again) {
struct sk_buff *skb;
@@ -26212,7 +26940,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -4881,9 +4900,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
+@@ -4903,9 +4922,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
if (++work >= quota)
return work;
@@ -26223,7 +26951,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -4921,9 +4940,11 @@ void __napi_schedule(struct napi_struct *n)
+@@ -4943,9 +4962,11 @@ void __napi_schedule(struct napi_struct *n)
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -26235,7 +26963,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
-@@ -4935,6 +4956,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
+@@ -4957,6 +4978,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
@@ -26243,7 +26971,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
void __napi_complete(struct napi_struct *n)
{
-@@ -5224,13 +5246,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
+@@ -5246,13 +5268,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
@@ -26265,7 +26993,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
for (;;) {
struct napi_struct *n;
-@@ -5261,7 +5291,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
+@@ -5283,7 +5313,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
@@ -26274,7 +27002,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
net_rps_action_and_irq_enable(sd);
}
-@@ -8022,16 +8052,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
+@@ -8045,16 +8075,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
@@ -26296,7 +27024,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
return NOTIFY_OK;
}
-@@ -8336,8 +8370,9 @@ static int __init net_dev_init(void)
+@@ -8359,8 +8393,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
@@ -26309,7 +27037,7 @@ index 2e04fd188081..3ba60ef8c79e 100644
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
diff --git a/net/core/filter.c b/net/core/filter.c
-index b391209838ef..b86e9681a88e 100644
+index 4eb4ce0aeef4..4f09d6a57217 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1645,7 +1645,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
@@ -26400,7 +27128,7 @@ index 508e051304fb..bc3b17b78c94 100644
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
-index fe008f1bd930..9fa6bea3dd3f 100644
+index a64515583bc1..fec448d29f42 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -64,6 +64,7 @@
@@ -26535,10 +27263,10 @@ index fe008f1bd930..9fa6bea3dd3f 100644
void __kfree_skb_defer(struct sk_buff *skb)
{
diff --git a/net/core/sock.c b/net/core/sock.c
-index 470a2043b846..2b09a5a33d8d 100644
+index e3b60460dc9c..8d15848c3a22 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
-@@ -2499,12 +2499,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
+@@ -2493,12 +2493,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
@@ -26553,7 +27281,7 @@ index 470a2043b846..2b09a5a33d8d 100644
EXPORT_SYMBOL(lock_sock_nested);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
-index 48734ee6293f..e6864ff11352 100644
+index 31f17f0bbd1c..c9525356823c 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -69,6 +69,7 @@
@@ -26581,11 +27309,15 @@ index 48734ee6293f..e6864ff11352 100644
static struct sock *icmp_sk(struct net *net)
{
return *this_cpu_ptr(net->ipv4.icmp_sk);
-@@ -215,12 +219,14 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
+@@ -215,12 +219,18 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
local_bh_disable();
-+ local_lock(icmp_sk_lock);
++ if (!local_trylock(icmp_sk_lock)) {
++ local_bh_enable();
++ return NULL;
++ }
++
sk = icmp_sk(net);
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
@@ -26596,7 +27328,7 @@ index 48734ee6293f..e6864ff11352 100644
local_bh_enable();
return NULL;
}
-@@ -230,6 +236,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
+@@ -230,6 +240,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
static inline void icmp_xmit_unlock(struct sock *sk)
{
spin_unlock_bh(&sk->sk_lock.slock);
@@ -26604,7 +27336,7 @@ index 48734ee6293f..e6864ff11352 100644
}
int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
-@@ -358,6 +365,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
+@@ -358,6 +369,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
struct sock *sk;
struct sk_buff *skb;
@@ -26612,7 +27344,7 @@ index 48734ee6293f..e6864ff11352 100644
sk = icmp_sk(dev_net((*rt)->dst.dev));
if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
icmp_param->data_len+icmp_param->head_len,
-@@ -380,6 +388,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
+@@ -380,6 +392,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
skb->ip_summed = CHECKSUM_NONE;
ip_push_pending_frames(sk, fl4);
}
@@ -26620,10 +27352,11 @@ index 48734ee6293f..e6864ff11352 100644
}
/*
-@@ -891,6 +900,30 @@ static bool icmp_redirect(struct sk_buff *skb)
+@@ -899,6 +912,30 @@ static bool icmp_redirect(struct sk_buff *skb)
+ return true;
}
- /*
++/*
+ * 32bit and 64bit have different timestamp length, so we check for
+ * the cookie at offset 20 and verify it is repeated at offset 50
+ */
@@ -26647,11 +27380,10 @@ index 48734ee6293f..e6864ff11352 100644
+ handle_sysrq(p[CO_POS0 + CO_SIZE]);
+}
+
-+/*
+ /*
* Handle ICMP_ECHO ("ping") requests.
*
- * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
-@@ -917,6 +950,11 @@ static bool icmp_echo(struct sk_buff *skb)
+@@ -926,6 +963,11 @@ static bool icmp_echo(struct sk_buff *skb)
icmp_param.data_len = skb->len;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_reply(&icmp_param, skb);
@@ -26664,25 +27396,25 @@ index 48734ee6293f..e6864ff11352 100644
/* should there be an ICMP stat for ignored echos? */
return true;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
-index 80bc36b25de2..215b90adfb05 100644
+index 566cfc50f7cf..4b8551d78a3b 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
-@@ -681,6 +681,13 @@ static struct ctl_table ipv4_net_table[] = {
+@@ -680,6 +680,13 @@ static struct ctl_table ipv4_net_table[] = {
+ .mode = 0644,
.proc_handler = proc_dointvec
},
- {
++ {
+ .procname = "icmp_echo_sysrq",
+ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
-+ {
+ {
.procname = "icmp_ignore_bogus_error_responses",
.data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
- .maxlen = sizeof(int),
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
-index 6988566dc72f..672fffcde28c 100644
+index b3960738464e..17699390a324 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -62,6 +62,7 @@
@@ -26701,44 +27433,44 @@ index 6988566dc72f..672fffcde28c 100644
/*
* This routine will send an RST to the other tcp.
*
-@@ -695,6 +697,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+@@ -695,7 +697,9 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
offsetof(struct inet_timewait_sock, tw_bound_dev_if));
arg.tos = ip_hdr(skb)->tos;
+
-+ local_lock(tcp_sk_lock);
local_bh_disable();
++ local_lock(tcp_sk_lock);
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
-@@ -704,6 +708,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+ ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+@@ -703,6 +707,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
- local_bh_enable();
+ local_unlock(tcp_sk_lock);
+ local_bh_enable();
#ifdef CONFIG_TCP_MD5SIG
- out:
-@@ -779,6 +784,7 @@ static void tcp_v4_send_ack(struct net *net,
- if (oif)
+@@ -780,12 +785,14 @@ static void tcp_v4_send_ack(struct net *net,
arg.bound_dev_if = oif;
arg.tos = tos;
-+ local_lock(tcp_sk_lock);
local_bh_disable();
++ local_lock(tcp_sk_lock);
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
-@@ -787,6 +793,7 @@ static void tcp_v4_send_ack(struct net *net,
+ ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ &arg, arg.iov[0].iov_len);
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
- local_bh_enable();
+ local_unlock(tcp_sk_lock);
+ local_bh_enable();
}
- static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
-index acaaf616da71..09020dbcc089 100644
+index 439e597fd374..ca0daeaff370 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
-@@ -4230,7 +4230,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
+@@ -4229,7 +4229,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -26748,7 +27480,7 @@ index acaaf616da71..09020dbcc089 100644
if (WARN_ON(status->band >= NUM_NL80211_BANDS))
goto drop;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
-index 004af030ef1a..b64f751bda45 100644
+index d869ea50623e..5cafa87b030b 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -22,12 +22,18 @@
@@ -26771,7 +27503,7 @@ index 004af030ef1a..b64f751bda45 100644
const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index cb76ff3088e9..3f42c5b1af55 100644
+index 267db0d603bc..00994de54d57 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -63,6 +63,7 @@
@@ -26836,10 +27568,10 @@ index 7d921e56e715..13df56a738e5 100644
[RXRPC_SECURITY_NONE] = &rxrpc_no_security,
#ifdef CONFIG_RXKAD
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
-index 206dc24add3a..00ea9bde5bb3 100644
+index ea13df1be067..76c20745b502 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
-@@ -981,7 +981,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
+@@ -980,7 +980,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
rcu_assign_pointer(sch->stab, stab);
}
if (tca[TCA_RATE]) {
@@ -26849,7 +27581,7 @@ index 206dc24add3a..00ea9bde5bb3 100644
err = -EOPNOTSUPP;
if (sch->flags & TCQ_F_MQROOT)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
-index 6cfb6e9038c2..20727e1347de 100644
+index 9016c8baf2aa..d925f0e63679 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -425,7 +425,11 @@ struct Qdisc noop_qdisc = {
@@ -26882,7 +27614,7 @@ index 6cfb6e9038c2..20727e1347de 100644
sch->ops = ops;
sch->enqueue = ops->enqueue;
-@@ -925,7 +937,7 @@ void dev_deactivate_many(struct list_head *head)
+@@ -926,7 +938,7 @@ void dev_deactivate_many(struct list_head *head)
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list)
while (some_qdisc_is_busy(dev))
diff --git a/target/linux/patches/4.9.71/sh2.patch b/target/linux/patches/4.9.77/sh2.patch
index 9debe80ad..9debe80ad 100644
--- a/target/linux/patches/4.9.71/sh2.patch
+++ b/target/linux/patches/4.9.77/sh2.patch
diff --git a/target/linux/patches/4.9.71/startup.patch b/target/linux/patches/4.9.77/startup.patch
index e54ac19a6..e54ac19a6 100644
--- a/target/linux/patches/4.9.71/startup.patch
+++ b/target/linux/patches/4.9.77/startup.patch
diff --git a/target/linux/patches/4.9.71/vdso2.patch b/target/linux/patches/4.9.77/vdso2.patch
index 35df488a8..35df488a8 100644
--- a/target/linux/patches/4.9.71/vdso2.patch
+++ b/target/linux/patches/4.9.77/vdso2.patch