summaryrefslogtreecommitdiff
path: root/target/linux/patches/3.18.9
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/patches/3.18.9')
-rw-r--r--target/linux/patches/3.18.9/bsd-compatibility.patch2538
-rw-r--r--target/linux/patches/3.18.9/cleankernel.patch11
-rw-r--r--target/linux/patches/3.18.9/cris-header.patch50
-rw-r--r--target/linux/patches/3.18.9/defaults.patch46
-rw-r--r--target/linux/patches/3.18.9/export-symbol-for-exmap.patch11
-rw-r--r--target/linux/patches/3.18.9/gemalto.patch11
-rw-r--r--target/linux/patches/3.18.9/initramfs-nosizelimit.patch57
-rw-r--r--target/linux/patches/3.18.9/lemote-rfkill.patch21
-rw-r--r--target/linux/patches/3.18.9/microblaze-ethernet.patch11
-rw-r--r--target/linux/patches/3.18.9/mkpiggy.patch28
-rw-r--r--target/linux/patches/3.18.9/mtd-rootfs.patch26
-rw-r--r--target/linux/patches/3.18.9/nfsv3-tcp.patch12
-rw-r--r--target/linux/patches/3.18.9/non-static.patch33
-rw-r--r--target/linux/patches/3.18.9/patch-fblogo2057
-rw-r--r--target/linux/patches/3.18.9/patch-linuxrt23744
-rw-r--r--target/linux/patches/3.18.9/patch-yaffs216551
-rw-r--r--target/linux/patches/3.18.9/ppc64-missing-zlib.patch11
-rw-r--r--target/linux/patches/3.18.9/regmap-bool.patch27
-rw-r--r--target/linux/patches/3.18.9/relocs.patch2709
-rw-r--r--target/linux/patches/3.18.9/sgidefs.patch18
-rw-r--r--target/linux/patches/3.18.9/sortext.patch33
-rw-r--r--target/linux/patches/3.18.9/startup.patch37
-rw-r--r--target/linux/patches/3.18.9/wlan-cf.patch11
-rw-r--r--target/linux/patches/3.18.9/xargs.patch12
24 files changed, 48065 insertions, 0 deletions
diff --git a/target/linux/patches/3.18.9/bsd-compatibility.patch b/target/linux/patches/3.18.9/bsd-compatibility.patch
new file mode 100644
index 000000000..b954b658f
--- /dev/null
+++ b/target/linux/patches/3.18.9/bsd-compatibility.patch
@@ -0,0 +1,2538 @@
+diff -Nur linux-3.11.5.orig/scripts/Makefile.lib linux-3.11.5/scripts/Makefile.lib
+--- linux-3.11.5.orig/scripts/Makefile.lib 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/scripts/Makefile.lib 2013-10-16 18:09:31.000000000 +0200
+@@ -281,7 +281,12 @@
+ size_append = printf $(shell \
+ dec_size=0; \
+ for F in $1; do \
+- fsize=$$(stat -c "%s" $$F); \
++ if stat -qs .>/dev/null 2>&1; then \
++ statcmd='stat -f %z'; \
++ else \
++ statcmd='stat -c %s'; \
++ fi; \
++ fsize=$$($$statcmd $$F); \
+ dec_size=$$(expr $$dec_size + $$fsize); \
+ done; \
+ printf "%08x\n" $$dec_size | \
+diff -Nur linux-3.11.5.orig/scripts/mod/mk_elfconfig.c linux-3.11.5/scripts/mod/mk_elfconfig.c
+--- linux-3.11.5.orig/scripts/mod/mk_elfconfig.c 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/scripts/mod/mk_elfconfig.c 2013-10-16 18:09:31.000000000 +0200
+@@ -1,7 +1,18 @@
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+-#include <elf.h>
++
++#define EI_NIDENT (16)
++#define ELFMAG "\177ELF"
++
++#define SELFMAG 4
++#define EI_CLASS 4
++#define ELFCLASS32 1 /* 32-bit objects */
++#define ELFCLASS64 2 /* 64-bit objects */
++
++#define EI_DATA 5 /* Data encoding byte index */
++#define ELFDATA2LSB 1 /* 2's complement, little endian */
++#define ELFDATA2MSB 2 /* 2's complement, big endian */
+
+ int
+ main(int argc, char **argv)
+diff -Nur linux-3.11.5.orig/scripts/mod/modpost.h linux-3.11.5/scripts/mod/modpost.h
+--- linux-3.11.5.orig/scripts/mod/modpost.h 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/scripts/mod/modpost.h 2013-10-16 18:09:31.000000000 +0200
+@@ -7,7 +7,2453 @@
+ #include <sys/mman.h>
+ #include <fcntl.h>
+ #include <unistd.h>
+-#include <elf.h>
++
++
++/* This file defines standard ELF types, structures, and macros.
++ Copyright (C) 1995-1999,2000,2001,2002,2003 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, write to the Free
++ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307 USA. */
++
++#ifndef _ELF_H
++#define _ELF_H 1
++
++__BEGIN_DECLS
++
++/* Standard ELF types. */
++
++#include <stdint.h>
++
++/* Type for a 16-bit quantity. */
++typedef uint16_t Elf32_Half;
++typedef uint16_t Elf64_Half;
++
++/* Types for signed and unsigned 32-bit quantities. */
++typedef uint32_t Elf32_Word;
++typedef int32_t Elf32_Sword;
++typedef uint32_t Elf64_Word;
++typedef int32_t Elf64_Sword;
++
++/* Types for signed and unsigned 64-bit quantities. */
++typedef uint64_t Elf32_Xword;
++typedef int64_t Elf32_Sxword;
++typedef uint64_t Elf64_Xword;
++typedef int64_t Elf64_Sxword;
++
++/* Type of addresses. */
++typedef uint32_t Elf32_Addr;
++typedef uint64_t Elf64_Addr;
++
++/* Type of file offsets. */
++typedef uint32_t Elf32_Off;
++typedef uint64_t Elf64_Off;
++
++/* Type for section indices, which are 16-bit quantities. */
++typedef uint16_t Elf32_Section;
++typedef uint16_t Elf64_Section;
++
++/* Type for version symbol information. */
++typedef Elf32_Half Elf32_Versym;
++typedef Elf64_Half Elf64_Versym;
++
++
++/* The ELF file header. This appears at the start of every ELF file. */
++
++#define EI_NIDENT (16)
++
++typedef struct
++{
++ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
++ Elf32_Half e_type; /* Object file type */
++ Elf32_Half e_machine; /* Architecture */
++ Elf32_Word e_version; /* Object file version */
++ Elf32_Addr e_entry; /* Entry point virtual address */
++ Elf32_Off e_phoff; /* Program header table file offset */
++ Elf32_Off e_shoff; /* Section header table file offset */
++ Elf32_Word e_flags; /* Processor-specific flags */
++ Elf32_Half e_ehsize; /* ELF header size in bytes */
++ Elf32_Half e_phentsize; /* Program header table entry size */
++ Elf32_Half e_phnum; /* Program header table entry count */
++ Elf32_Half e_shentsize; /* Section header table entry size */
++ Elf32_Half e_shnum; /* Section header table entry count */
++ Elf32_Half e_shstrndx; /* Section header string table index */
++} Elf32_Ehdr;
++
++typedef struct
++{
++ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
++ Elf64_Half e_type; /* Object file type */
++ Elf64_Half e_machine; /* Architecture */
++ Elf64_Word e_version; /* Object file version */
++ Elf64_Addr e_entry; /* Entry point virtual address */
++ Elf64_Off e_phoff; /* Program header table file offset */
++ Elf64_Off e_shoff; /* Section header table file offset */
++ Elf64_Word e_flags; /* Processor-specific flags */
++ Elf64_Half e_ehsize; /* ELF header size in bytes */
++ Elf64_Half e_phentsize; /* Program header table entry size */
++ Elf64_Half e_phnum; /* Program header table entry count */
++ Elf64_Half e_shentsize; /* Section header table entry size */
++ Elf64_Half e_shnum; /* Section header table entry count */
++ Elf64_Half e_shstrndx; /* Section header string table index */
++} Elf64_Ehdr;
++
++/* Fields in the e_ident array. The EI_* macros are indices into the
++ array. The macros under each EI_* macro are the values the byte
++ may have. */
++
++#define EI_MAG0 0 /* File identification byte 0 index */
++#define ELFMAG0 0x7f /* Magic number byte 0 */
++
++#define EI_MAG1 1 /* File identification byte 1 index */
++#define ELFMAG1 'E' /* Magic number byte 1 */
++
++#define EI_MAG2 2 /* File identification byte 2 index */
++#define ELFMAG2 'L' /* Magic number byte 2 */
++
++#define EI_MAG3 3 /* File identification byte 3 index */
++#define ELFMAG3 'F' /* Magic number byte 3 */
++
++/* Conglomeration of the identification bytes, for easy testing as a word. */
++#define ELFMAG "\177ELF"
++#define SELFMAG 4
++
++#define EI_CLASS 4 /* File class byte index */
++#define ELFCLASSNONE 0 /* Invalid class */
++#define ELFCLASS32 1 /* 32-bit objects */
++#define ELFCLASS64 2 /* 64-bit objects */
++#define ELFCLASSNUM 3
++
++#define EI_DATA 5 /* Data encoding byte index */
++#define ELFDATANONE 0 /* Invalid data encoding */
++#define ELFDATA2LSB 1 /* 2's complement, little endian */
++#define ELFDATA2MSB 2 /* 2's complement, big endian */
++#define ELFDATANUM 3
++
++#define EI_VERSION 6 /* File version byte index */
++ /* Value must be EV_CURRENT */
++
++#define EI_OSABI 7 /* OS ABI identification */
++#define ELFOSABI_NONE 0 /* UNIX System V ABI */
++#define ELFOSABI_SYSV 0 /* Alias. */
++#define ELFOSABI_HPUX 1 /* HP-UX */
++#define ELFOSABI_NETBSD 2 /* NetBSD. */
++#define ELFOSABI_LINUX 3 /* Linux. */
++#define ELFOSABI_SOLARIS 6 /* Sun Solaris. */
++#define ELFOSABI_AIX 7 /* IBM AIX. */
++#define ELFOSABI_IRIX 8 /* SGI Irix. */
++#define ELFOSABI_FREEBSD 9 /* FreeBSD. */
++#define ELFOSABI_TRU64 10 /* Compaq TRU64 UNIX. */
++#define ELFOSABI_MODESTO 11 /* Novell Modesto. */
++#define ELFOSABI_OPENBSD 12 /* OpenBSD. */
++#define ELFOSABI_ARM 97 /* ARM */
++#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
++
++#define EI_ABIVERSION 8 /* ABI version */
++
++#define EI_PAD 9 /* Byte index of padding bytes */
++
++/* Legal values for e_type (object file type). */
++
++#define ET_NONE 0 /* No file type */
++#define ET_REL 1 /* Relocatable file */
++#define ET_EXEC 2 /* Executable file */
++#define ET_DYN 3 /* Shared object file */
++#define ET_CORE 4 /* Core file */
++#define ET_NUM 5 /* Number of defined types */
++#define ET_LOOS 0xfe00 /* OS-specific range start */
++#define ET_HIOS 0xfeff /* OS-specific range end */
++#define ET_LOPROC 0xff00 /* Processor-specific range start */
++#define ET_HIPROC 0xffff /* Processor-specific range end */
++
++/* Legal values for e_machine (architecture). */
++
++#define EM_NONE 0 /* No machine */
++#define EM_M32 1 /* AT&T WE 32100 */
++#define EM_SPARC 2 /* SUN SPARC */
++#define EM_386 3 /* Intel 80386 */
++#define EM_68K 4 /* Motorola m68k family */
++#define EM_88K 5 /* Motorola m88k family */
++#define EM_860 7 /* Intel 80860 */
++#define EM_MIPS 8 /* MIPS R3000 big-endian */
++#define EM_S370 9 /* IBM System/370 */
++#define EM_MIPS_RS3_LE 10 /* MIPS R3000 little-endian */
++
++#define EM_PARISC 15 /* HPPA */
++#define EM_VPP500 17 /* Fujitsu VPP500 */
++#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */
++#define EM_960 19 /* Intel 80960 */
++#define EM_PPC 20 /* PowerPC */
++#define EM_PPC64 21 /* PowerPC 64-bit */
++#define EM_S390 22 /* IBM S390 */
++
++#define EM_V800 36 /* NEC V800 series */
++#define EM_FR20 37 /* Fujitsu FR20 */
++#define EM_RH32 38 /* TRW RH-32 */
++#define EM_RCE 39 /* Motorola RCE */
++#define EM_ARM 40 /* ARM */
++#define EM_FAKE_ALPHA 41 /* Digital Alpha */
++#define EM_SH 42 /* Hitachi SH */
++#define EM_SPARCV9 43 /* SPARC v9 64-bit */
++#define EM_TRICORE 44 /* Siemens Tricore */
++#define EM_ARC 45 /* Argonaut RISC Core */
++#define EM_H8_300 46 /* Hitachi H8/300 */
++#define EM_H8_300H 47 /* Hitachi H8/300H */
++#define EM_H8S 48 /* Hitachi H8S */
++#define EM_H8_500 49 /* Hitachi H8/500 */
++#define EM_IA_64 50 /* Intel Merced */
++#define EM_MIPS_X 51 /* Stanford MIPS-X */
++#define EM_COLDFIRE 52 /* Motorola Coldfire */
++#define EM_68HC12 53 /* Motorola M68HC12 */
++#define EM_MMA 54 /* Fujitsu MMA Multimedia Accelerator*/
++#define EM_PCP 55 /* Siemens PCP */
++#define EM_NCPU 56 /* Sony nCPU embeeded RISC */
++#define EM_NDR1 57 /* Denso NDR1 microprocessor */
++#define EM_STARCORE 58 /* Motorola Start*Core processor */
++#define EM_ME16 59 /* Toyota ME16 processor */
++#define EM_ST100 60 /* STMicroelectronic ST100 processor */
++#define EM_TINYJ 61 /* Advanced Logic Corp. Tinyj emb.fam*/
++#define EM_X86_64 62 /* AMD x86-64 architecture */
++#define EM_PDSP 63 /* Sony DSP Processor */
++
++#define EM_FX66 66 /* Siemens FX66 microcontroller */
++#define EM_ST9PLUS 67 /* STMicroelectronics ST9+ 8/16 mc */
++#define EM_ST7 68 /* STmicroelectronics ST7 8 bit mc */
++#define EM_68HC16 69 /* Motorola MC68HC16 microcontroller */
++#define EM_68HC11 70 /* Motorola MC68HC11 microcontroller */
++#define EM_68HC08 71 /* Motorola MC68HC08 microcontroller */
++#define EM_68HC05 72 /* Motorola MC68HC05 microcontroller */
++#define EM_SVX 73 /* Silicon Graphics SVx */
++#define EM_ST19 74 /* STMicroelectronics ST19 8 bit mc */
++#define EM_VAX 75 /* Digital VAX */
++#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
++#define EM_JAVELIN 77 /* Infineon Technologies 32-bit embedded processor */
++#define EM_FIREPATH 78 /* Element 14 64-bit DSP Processor */
++#define EM_ZSP 79 /* LSI Logic 16-bit DSP Processor */
++#define EM_MMIX 80 /* Donald Knuth's educational 64-bit processor */
++#define EM_HUANY 81 /* Harvard University machine-independent object files */
++#define EM_PRISM 82 /* SiTera Prism */
++#define EM_AVR 83 /* Atmel AVR 8-bit microcontroller */
++#define EM_FR30 84 /* Fujitsu FR30 */
++#define EM_D10V 85 /* Mitsubishi D10V */
++#define EM_D30V 86 /* Mitsubishi D30V */
++#define EM_V850 87 /* NEC v850 */
++#define EM_M32R 88 /* Mitsubishi M32R */
++#define EM_MN10300 89 /* Matsushita MN10300 */
++#define EM_MN10200 90 /* Matsushita MN10200 */
++#define EM_PJ 91 /* picoJava */
++#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */
++#define EM_ARC_A5 93 /* ARC Cores Tangent-A5 */
++#define EM_XTENSA 94 /* Tensilica Xtensa Architecture */
++#define EM_NUM 95
++
++/* If it is necessary to assign new unofficial EM_* values, please
++ pick large random numbers (0x8523, 0xa7f2, etc.) to minimize the
++ chances of collision with official or non-GNU unofficial values. */
++
++#define EM_ALPHA 0x9026
++
++/* Legal values for e_version (version). */
++
++#define EV_NONE 0 /* Invalid ELF version */
++#define EV_CURRENT 1 /* Current version */
++#define EV_NUM 2
++
++/* Section header. */
++
++typedef struct
++{
++ Elf32_Word sh_name; /* Section name (string tbl index) */
++ Elf32_Word sh_type; /* Section type */
++ Elf32_Word sh_flags; /* Section flags */
++ Elf32_Addr sh_addr; /* Section virtual addr at execution */
++ Elf32_Off sh_offset; /* Section file offset */
++ Elf32_Word sh_size; /* Section size in bytes */
++ Elf32_Word sh_link; /* Link to another section */
++ Elf32_Word sh_info; /* Additional section information */
++ Elf32_Word sh_addralign; /* Section alignment */
++ Elf32_Word sh_entsize; /* Entry size if section holds table */
++} Elf32_Shdr;
++
++typedef struct
++{
++ Elf64_Word sh_name; /* Section name (string tbl index) */
++ Elf64_Word sh_type; /* Section type */
++ Elf64_Xword sh_flags; /* Section flags */
++ Elf64_Addr sh_addr; /* Section virtual addr at execution */
++ Elf64_Off sh_offset; /* Section file offset */
++ Elf64_Xword sh_size; /* Section size in bytes */
++ Elf64_Word sh_link; /* Link to another section */
++ Elf64_Word sh_info; /* Additional section information */
++ Elf64_Xword sh_addralign; /* Section alignment */
++ Elf64_Xword sh_entsize; /* Entry size if section holds table */
++} Elf64_Shdr;
++
++/* Special section indices. */
++
++#define SHN_UNDEF 0 /* Undefined section */
++#define SHN_LORESERVE 0xff00 /* Start of reserved indices */
++#define SHN_LOPROC 0xff00 /* Start of processor-specific */
++#define SHN_HIPROC 0xff1f /* End of processor-specific */
++#define SHN_LOOS 0xff20 /* Start of OS-specific */
++#define SHN_HIOS 0xff3f /* End of OS-specific */
++#define SHN_ABS 0xfff1 /* Associated symbol is absolute */
++#define SHN_COMMON 0xfff2 /* Associated symbol is common */
++#define SHN_XINDEX 0xffff /* Index is in extra table. */
++#define SHN_HIRESERVE 0xffff /* End of reserved indices */
++
++/* Legal values for sh_type (section type). */
++
++#define SHT_NULL 0 /* Section header table entry unused */
++#define SHT_PROGBITS 1 /* Program data */
++#define SHT_SYMTAB 2 /* Symbol table */
++#define SHT_STRTAB 3 /* String table */
++#define SHT_RELA 4 /* Relocation entries with addends */
++#define SHT_HASH 5 /* Symbol hash table */
++#define SHT_DYNAMIC 6 /* Dynamic linking information */
++#define SHT_NOTE 7 /* Notes */
++#define SHT_NOBITS 8 /* Program space with no data (bss) */
++#define SHT_REL 9 /* Relocation entries, no addends */
++#define SHT_SHLIB 10 /* Reserved */
++#define SHT_DYNSYM 11 /* Dynamic linker symbol table */
++#define SHT_INIT_ARRAY 14 /* Array of constructors */
++#define SHT_FINI_ARRAY 15 /* Array of destructors */
++#define SHT_PREINIT_ARRAY 16 /* Array of pre-constructors */
++#define SHT_GROUP 17 /* Section group */
++#define SHT_SYMTAB_SHNDX 18 /* Extended section indeces */
++#define SHT_NUM 19 /* Number of defined types. */
++#define SHT_LOOS 0x60000000 /* Start OS-specific */
++#define SHT_GNU_LIBLIST 0x6ffffff7 /* Prelink library list */
++#define SHT_CHECKSUM 0x6ffffff8 /* Checksum for DSO content. */
++#define SHT_LOSUNW 0x6ffffffa /* Sun-specific low bound. */
++#define SHT_SUNW_move 0x6ffffffa
++#define SHT_SUNW_COMDAT 0x6ffffffb
++#define SHT_SUNW_syminfo 0x6ffffffc
++#define SHT_GNU_verdef 0x6ffffffd /* Version definition section. */
++#define SHT_GNU_verneed 0x6ffffffe /* Version needs section. */
++#define SHT_GNU_versym 0x6fffffff /* Version symbol table. */
++#define SHT_HISUNW 0x6fffffff /* Sun-specific high bound. */
++#define SHT_HIOS 0x6fffffff /* End OS-specific type */
++#define SHT_LOPROC 0x70000000 /* Start of processor-specific */
++#define SHT_HIPROC 0x7fffffff /* End of processor-specific */
++#define SHT_LOUSER 0x80000000 /* Start of application-specific */
++#define SHT_HIUSER 0x8fffffff /* End of application-specific */
++
++/* Legal values for sh_flags (section flags). */
++
++#define SHF_WRITE (1 << 0) /* Writable */
++#define SHF_ALLOC (1 << 1) /* Occupies memory during execution */
++#define SHF_EXECINSTR (1 << 2) /* Executable */
++#define SHF_MERGE (1 << 4) /* Might be merged */
++#define SHF_STRINGS (1 << 5) /* Contains nul-terminated strings */
++#define SHF_INFO_LINK (1 << 6) /* `sh_info' contains SHT index */
++#define SHF_LINK_ORDER (1 << 7) /* Preserve order after combining */
++#define SHF_OS_NONCONFORMING (1 << 8) /* Non-standard OS specific handling
++ required */
++#define SHF_GROUP (1 << 9) /* Section is member of a group. */
++#define SHF_TLS (1 << 10) /* Section hold thread-local data. */
++#define SHF_MASKOS 0x0ff00000 /* OS-specific. */
++#define SHF_MASKPROC 0xf0000000 /* Processor-specific */
++
++/* Section group handling. */
++#define GRP_COMDAT 0x1 /* Mark group as COMDAT. */
++
++/* Symbol table entry. */
++
++typedef struct
++{
++ Elf32_Word st_name; /* Symbol name (string tbl index) */
++ Elf32_Addr st_value; /* Symbol value */
++ Elf32_Word st_size; /* Symbol size */
++ unsigned char st_info; /* Symbol type and binding */
++ unsigned char st_other; /* Symbol visibility */
++ Elf32_Section st_shndx; /* Section index */
++} Elf32_Sym;
++
++typedef struct
++{
++ Elf64_Word st_name; /* Symbol name (string tbl index) */
++ unsigned char st_info; /* Symbol type and binding */
++ unsigned char st_other; /* Symbol visibility */
++ Elf64_Section st_shndx; /* Section index */
++ Elf64_Addr st_value; /* Symbol value */
++ Elf64_Xword st_size; /* Symbol size */
++} Elf64_Sym;
++
++/* The syminfo section if available contains additional information about
++ every dynamic symbol. */
++
++typedef struct
++{
++ Elf32_Half si_boundto; /* Direct bindings, symbol bound to */
++ Elf32_Half si_flags; /* Per symbol flags */
++} Elf32_Syminfo;
++
++typedef struct
++{
++ Elf64_Half si_boundto; /* Direct bindings, symbol bound to */
++ Elf64_Half si_flags; /* Per symbol flags */
++} Elf64_Syminfo;
++
++/* Possible values for si_boundto. */
++#define SYMINFO_BT_SELF 0xffff /* Symbol bound to self */
++#define SYMINFO_BT_PARENT 0xfffe /* Symbol bound to parent */
++#define SYMINFO_BT_LOWRESERVE 0xff00 /* Beginning of reserved entries */
++
++/* Possible bitmasks for si_flags. */
++#define SYMINFO_FLG_DIRECT 0x0001 /* Direct bound symbol */
++#define SYMINFO_FLG_PASSTHRU 0x0002 /* Pass-thru symbol for translator */
++#define SYMINFO_FLG_COPY 0x0004 /* Symbol is a copy-reloc */
++#define SYMINFO_FLG_LAZYLOAD 0x0008 /* Symbol bound to object to be lazy
++ loaded */
++/* Syminfo version values. */
++#define SYMINFO_NONE 0
++#define SYMINFO_CURRENT 1
++#define SYMINFO_NUM 2
++
++
++/* How to extract and insert information held in the st_info field. */
++
++#define ELF32_ST_BIND(val) (((unsigned char) (val)) >> 4)
++#define ELF32_ST_TYPE(val) ((val) & 0xf)
++#define ELF32_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
++
++/* Both Elf32_Sym and Elf64_Sym use the same one-byte st_info field. */
++#define ELF64_ST_BIND(val) ELF32_ST_BIND (val)
++#define ELF64_ST_TYPE(val) ELF32_ST_TYPE (val)
++#define ELF64_ST_INFO(bind, type) ELF32_ST_INFO ((bind), (type))
++
++/* Legal values for ST_BIND subfield of st_info (symbol binding). */
++
++#define STB_LOCAL 0 /* Local symbol */
++#define STB_GLOBAL 1 /* Global symbol */
++#define STB_WEAK 2 /* Weak symbol */
++#define STB_NUM 3 /* Number of defined types. */
++#define STB_LOOS 10 /* Start of OS-specific */
++#define STB_HIOS 12 /* End of OS-specific */
++#define STB_LOPROC 13 /* Start of processor-specific */
++#define STB_HIPROC 15 /* End of processor-specific */
++
++/* Legal values for ST_TYPE subfield of st_info (symbol type). */
++
++#define STT_NOTYPE 0 /* Symbol type is unspecified */
++#define STT_OBJECT 1 /* Symbol is a data object */
++#define STT_FUNC 2 /* Symbol is a code object */
++#define STT_SECTION 3 /* Symbol associated with a section */
++#define STT_FILE 4 /* Symbol's name is file name */
++#define STT_COMMON 5 /* Symbol is a common data object */
++#define STT_TLS 6 /* Symbol is thread-local data object*/
++#define STT_NUM 7 /* Number of defined types. */
++#define STT_LOOS 10 /* Start of OS-specific */
++#define STT_HIOS 12 /* End of OS-specific */
++#define STT_LOPROC 13 /* Start of processor-specific */
++#define STT_HIPROC 15 /* End of processor-specific */
++
++
++/* Symbol table indices are found in the hash buckets and chain table
++ of a symbol hash table section. This special index value indicates
++ the end of a chain, meaning no further symbols are found in that bucket. */
++
++#define STN_UNDEF 0 /* End of a chain. */
++
++
++/* How to extract and insert information held in the st_other field. */
++
++#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
++
++/* For ELF64 the definitions are the same. */
++#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
++
++/* Symbol visibility specification encoded in the st_other field. */
++#define STV_DEFAULT 0 /* Default symbol visibility rules */
++#define STV_INTERNAL 1 /* Processor specific hidden class */
++#define STV_HIDDEN 2 /* Sym unavailable in other modules */
++#define STV_PROTECTED 3 /* Not preemptible, not exported */
++
++
++/* Relocation table entry without addend (in section of type SHT_REL). */
++
++typedef struct
++{
++ Elf32_Addr r_offset; /* Address */
++ Elf32_Word r_info; /* Relocation type and symbol index */
++} Elf32_Rel;
++
++/* I have seen two different definitions of the Elf64_Rel and
++ Elf64_Rela structures, so we'll leave them out until Novell (or
++ whoever) gets their act together. */
++/* The following, at least, is used on Sparc v9, MIPS, and Alpha. */
++
++typedef struct
++{
++ Elf64_Addr r_offset; /* Address */
++ Elf64_Xword r_info; /* Relocation type and symbol index */
++} Elf64_Rel;
++
++/* Relocation table entry with addend (in section of type SHT_RELA). */
++
++typedef struct
++{
++ Elf32_Addr r_offset; /* Address */
++ Elf32_Word r_info; /* Relocation type and symbol index */
++ Elf32_Sword r_addend; /* Addend */
++} Elf32_Rela;
++
++typedef struct
++{
++ Elf64_Addr r_offset; /* Address */
++ Elf64_Xword r_info; /* Relocation type and symbol index */
++ Elf64_Sxword r_addend; /* Addend */
++} Elf64_Rela;
++
++/* How to extract and insert information held in the r_info field. */
++
++#define ELF32_R_SYM(val) ((val) >> 8)
++#define ELF32_R_TYPE(val) ((val) & 0xff)
++#define ELF32_R_INFO(sym, type) (((sym) << 8) + ((type) & 0xff))
++
++#define ELF64_R_SYM(i) ((i) >> 32)
++#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
++#define ELF64_R_INFO(sym,type) ((((Elf64_Xword) (sym)) << 32) + (type))
++
++/* Program segment header. */
++
++typedef struct
++{
++ Elf32_Word p_type; /* Segment type */
++ Elf32_Off p_offset; /* Segment file offset */
++ Elf32_Addr p_vaddr; /* Segment virtual address */
++ Elf32_Addr p_paddr; /* Segment physical address */
++ Elf32_Word p_filesz; /* Segment size in file */
++ Elf32_Word p_memsz; /* Segment size in memory */
++ Elf32_Word p_flags; /* Segment flags */
++ Elf32_Word p_align; /* Segment alignment */
++} Elf32_Phdr;
++
++typedef struct
++{
++ Elf64_Word p_type; /* Segment type */
++ Elf64_Word p_flags; /* Segment flags */
++ Elf64_Off p_offset; /* Segment file offset */
++ Elf64_Addr p_vaddr; /* Segment virtual address */
++ Elf64_Addr p_paddr; /* Segment physical address */
++ Elf64_Xword p_filesz; /* Segment size in file */
++ Elf64_Xword p_memsz; /* Segment size in memory */
++ Elf64_Xword p_align; /* Segment alignment */
++} Elf64_Phdr;
++
++/* Legal values for p_type (segment type). */
++
++#define PT_NULL 0 /* Program header table entry unused */
++#define PT_LOAD 1 /* Loadable program segment */
++#define PT_DYNAMIC 2 /* Dynamic linking information */
++#define PT_INTERP 3 /* Program interpreter */
++#define PT_NOTE 4 /* Auxiliary information */
++#define PT_SHLIB 5 /* Reserved */
++#define PT_PHDR 6 /* Entry for header table itself */
++#define PT_TLS 7 /* Thread-local storage segment */
++#define PT_NUM 8 /* Number of defined types */
++#define PT_LOOS 0x60000000 /* Start of OS-specific */
++#define PT_GNU_EH_FRAME 0x6474e550 /* GCC .eh_frame_hdr segment */
++#define PT_GNU_STACK 0x6474e551 /* Indicates stack executability */
++#define PT_LOSUNW 0x6ffffffa
++#define PT_SUNWBSS 0x6ffffffa /* Sun Specific segment */
++#define PT_SUNWSTACK 0x6ffffffb /* Stack segment */
++#define PT_HISUNW 0x6fffffff
++#define PT_HIOS 0x6fffffff /* End of OS-specific */
++#define PT_LOPROC 0x70000000 /* Start of processor-specific */
++#define PT_HIPROC 0x7fffffff /* End of processor-specific */
++
++/* Legal values for p_flags (segment flags). */
++
++#define PF_X (1 << 0) /* Segment is executable */
++#define PF_W (1 << 1) /* Segment is writable */
++#define PF_R (1 << 2) /* Segment is readable */
++#define PF_MASKOS 0x0ff00000 /* OS-specific */
++#define PF_MASKPROC 0xf0000000 /* Processor-specific */
++
++/* Legal values for note segment descriptor types for core files. */
++
++#define NT_PRSTATUS 1 /* Contains copy of prstatus struct */
++#define NT_FPREGSET 2 /* Contains copy of fpregset struct */
++#define NT_PRPSINFO 3 /* Contains copy of prpsinfo struct */
++#define NT_PRXREG 4 /* Contains copy of prxregset struct */
++#define NT_TASKSTRUCT 4 /* Contains copy of task structure */
++#define NT_PLATFORM 5 /* String from sysinfo(SI_PLATFORM) */
++#define NT_AUXV 6 /* Contains copy of auxv array */
++#define NT_GWINDOWS 7 /* Contains copy of gwindows struct */
++#define NT_ASRS 8 /* Contains copy of asrset struct */
++#define NT_PSTATUS 10 /* Contains copy of pstatus struct */
++#define NT_PSINFO 13 /* Contains copy of psinfo struct */
++#define NT_PRCRED 14 /* Contains copy of prcred struct */
++#define NT_UTSNAME 15 /* Contains copy of utsname struct */
++#define NT_LWPSTATUS 16 /* Contains copy of lwpstatus struct */
++#define NT_LWPSINFO 17 /* Contains copy of lwpinfo struct */
++#define NT_PRFPXREG 20 /* Contains copy of fprxregset struct*/
++
++/* Legal values for the note segment descriptor types for object files. */
++
++#define NT_VERSION 1 /* Contains a version string. */
++
++
++/* Dynamic section entry. */
++
++typedef struct
++{
++ Elf32_Sword d_tag; /* Dynamic entry type */
++ union
++ {
++ Elf32_Word d_val; /* Integer value */
++ Elf32_Addr d_ptr; /* Address value */
++ } d_un;
++} Elf32_Dyn;
++
++typedef struct
++{
++ Elf64_Sxword d_tag; /* Dynamic entry type */
++ union
++ {
++ Elf64_Xword d_val; /* Integer value */
++ Elf64_Addr d_ptr; /* Address value */
++ } d_un;
++} Elf64_Dyn;
++
++/* Legal values for d_tag (dynamic entry type). */
++
++#define DT_NULL 0 /* Marks end of dynamic section */
++#define DT_NEEDED 1 /* Name of needed library */
++#define DT_PLTRELSZ 2 /* Size in bytes of PLT relocs */
++#define DT_PLTGOT 3 /* Processor defined value */
++#define DT_HASH 4 /* Address of symbol hash table */
++#define DT_STRTAB 5 /* Address of string table */
++#define DT_SYMTAB 6 /* Address of symbol table */
++#define DT_RELA 7 /* Address of Rela relocs */
++#define DT_RELASZ 8 /* Total size of Rela relocs */
++#define DT_RELAENT 9 /* Size of one Rela reloc */
++#define DT_STRSZ 10 /* Size of string table */
++#define DT_SYMENT 11 /* Size of one symbol table entry */
++#define DT_INIT 12 /* Address of init function */
++#define DT_FINI 13 /* Address of termination function */
++#define DT_SONAME 14 /* Name of shared object */
++#define DT_RPATH 15 /* Library search path (deprecated) */
++#define DT_SYMBOLIC 16 /* Start symbol search here */
++#define DT_REL 17 /* Address of Rel relocs */
++#define DT_RELSZ 18 /* Total size of Rel relocs */
++#define DT_RELENT 19 /* Size of one Rel reloc */
++#define DT_PLTREL 20 /* Type of reloc in PLT */
++#define DT_DEBUG 21 /* For debugging; unspecified */
++#define DT_TEXTREL 22 /* Reloc might modify .text */
++#define DT_JMPREL 23 /* Address of PLT relocs */
++#define DT_BIND_NOW 24 /* Process relocations of object */
++#define DT_INIT_ARRAY 25 /* Array with addresses of init fct */
++#define DT_FINI_ARRAY 26 /* Array with addresses of fini fct */
++#define DT_INIT_ARRAYSZ 27 /* Size in bytes of DT_INIT_ARRAY */
++#define DT_FINI_ARRAYSZ 28 /* Size in bytes of DT_FINI_ARRAY */
++#define DT_RUNPATH 29 /* Library search path */
++#define DT_FLAGS 30 /* Flags for the object being loaded */
++#define DT_ENCODING 32 /* Start of encoded range */
++#define DT_PREINIT_ARRAY 32 /* Array with addresses of preinit fct*/
++#define DT_PREINIT_ARRAYSZ 33 /* size in bytes of DT_PREINIT_ARRAY */
++#define DT_NUM 34 /* Number used */
++#define DT_LOOS 0x6000000d /* Start of OS-specific */
++#define DT_HIOS 0x6ffff000 /* End of OS-specific */
++#define DT_LOPROC 0x70000000 /* Start of processor-specific */
++#define DT_HIPROC 0x7fffffff /* End of processor-specific */
++#define DT_PROCNUM DT_MIPS_NUM /* Most used by any processor */
++
++/* DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the
++ Dyn.d_un.d_val field of the Elf*_Dyn structure. This follows Sun's
++ approach. */
++#define DT_VALRNGLO 0x6ffffd00
++#define DT_GNU_PRELINKED 0x6ffffdf5 /* Prelinking timestamp */
++#define DT_GNU_CONFLICTSZ 0x6ffffdf6 /* Size of conflict section */
++#define DT_GNU_LIBLISTSZ 0x6ffffdf7 /* Size of library list */
++#define DT_CHECKSUM 0x6ffffdf8
++#define DT_PLTPADSZ 0x6ffffdf9
++#define DT_MOVEENT 0x6ffffdfa
++#define DT_MOVESZ 0x6ffffdfb
++#define DT_FEATURE_1 0x6ffffdfc /* Feature selection (DTF_*). */
++#define DT_POSFLAG_1 0x6ffffdfd /* Flags for DT_* entries, effecting
++ the following DT_* entry. */
++#define DT_SYMINSZ 0x6ffffdfe /* Size of syminfo table (in bytes) */
++#define DT_SYMINENT 0x6ffffdff /* Entry size of syminfo */
++#define DT_VALRNGHI 0x6ffffdff
++#define DT_VALTAGIDX(tag) (DT_VALRNGHI - (tag)) /* Reverse order! */
++#define DT_VALNUM 12
++
++/* DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
++ Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
++
++ If any adjustment is made to the ELF object after it has been
++ built these entries will need to be adjusted. */
++#define DT_ADDRRNGLO 0x6ffffe00
++#define DT_GNU_CONFLICT 0x6ffffef8 /* Start of conflict section */
++#define DT_GNU_LIBLIST 0x6ffffef9 /* Library list */
++#define DT_CONFIG 0x6ffffefa /* Configuration information. */
++#define DT_DEPAUDIT 0x6ffffefb /* Dependency auditing. */
++#define DT_AUDIT 0x6ffffefc /* Object auditing. */
++#define DT_PLTPAD 0x6ffffefd /* PLT padding. */
++#define DT_MOVETAB 0x6ffffefe /* Move table. */
++#define DT_SYMINFO 0x6ffffeff /* Syminfo table. */
++#define DT_ADDRRNGHI 0x6ffffeff
++#define DT_ADDRTAGIDX(tag) (DT_ADDRRNGHI - (tag)) /* Reverse order! */
++#define DT_ADDRNUM 10
++
++/* The versioning entry types. The next are defined as part of the
++ GNU extension. */
++#define DT_VERSYM 0x6ffffff0
++
++#define DT_RELACOUNT 0x6ffffff9
++#define DT_RELCOUNT 0x6ffffffa
++
++/* These were chosen by Sun. */
++#define DT_FLAGS_1 0x6ffffffb /* State flags, see DF_1_* below. */
++#define DT_VERDEF 0x6ffffffc /* Address of version definition
++ table */
++#define DT_VERDEFNUM 0x6ffffffd /* Number of version definitions */
++#define DT_VERNEED 0x6ffffffe /* Address of table with needed
++ versions */
++#define DT_VERNEEDNUM 0x6fffffff /* Number of needed versions */
++#define DT_VERSIONTAGIDX(tag) (DT_VERNEEDNUM - (tag)) /* Reverse order! */
++#define DT_VERSIONTAGNUM 16
++
++/* Sun added these machine-independent extensions in the "processor-specific"
++ range. Be compatible. */
++#define DT_AUXILIARY 0x7ffffffd /* Shared object to load before self */
++#define DT_FILTER 0x7fffffff /* Shared object to get values from */
++#define DT_EXTRATAGIDX(tag) ((Elf32_Word)-((Elf32_Sword) (tag) <<1>>1)-1)
++#define DT_EXTRANUM 3
++
++/* Values of `d_un.d_val' in the DT_FLAGS entry. */
++#define DF_ORIGIN 0x00000001 /* Object may use DF_ORIGIN */
++#define DF_SYMBOLIC 0x00000002 /* Symbol resolutions starts here */
++#define DF_TEXTREL 0x00000004 /* Object contains text relocations */
++#define DF_BIND_NOW 0x00000008 /* No lazy binding for this object */
++#define DF_STATIC_TLS 0x00000010 /* Module uses the static TLS model */
++
++/* State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1
++ entry in the dynamic section. */
++#define DF_1_NOW 0x00000001 /* Set RTLD_NOW for this object. */
++#define DF_1_GLOBAL 0x00000002 /* Set RTLD_GLOBAL for this object. */
++#define DF_1_GROUP 0x00000004 /* Set RTLD_GROUP for this object. */
++#define DF_1_NODELETE 0x00000008 /* Set RTLD_NODELETE for this object.*/
++#define DF_1_LOADFLTR 0x00000010 /* Trigger filtee loading at runtime.*/
++#define DF_1_INITFIRST 0x00000020 /* Set RTLD_INITFIRST for this object*/
++#define DF_1_NOOPEN 0x00000040 /* Set RTLD_NOOPEN for this object. */
++#define DF_1_ORIGIN 0x00000080 /* $ORIGIN must be handled. */
++#define DF_1_DIRECT 0x00000100 /* Direct binding enabled. */
++#define DF_1_TRANS 0x00000200
++#define DF_1_INTERPOSE 0x00000400 /* Object is used to interpose. */
++#define DF_1_NODEFLIB 0x00000800 /* Ignore default lib search path. */
++#define DF_1_NODUMP 0x00001000 /* Object can't be dldump'ed. */
++#define DF_1_CONFALT 0x00002000 /* Configuration alternative created.*/
++#define DF_1_ENDFILTEE 0x00004000 /* Filtee terminates filters search. */
++#define DF_1_DISPRELDNE 0x00008000 /* Disp reloc applied at build time. */
++#define DF_1_DISPRELPND 0x00010000 /* Disp reloc applied at run-time. */
++
++/* Flags for the feature selection in DT_FEATURE_1. */
++#define DTF_1_PARINIT 0x00000001
++#define DTF_1_CONFEXP 0x00000002
++
++/* Flags in the DT_POSFLAG_1 entry effecting only the next DT_* entry. */
++#define DF_P1_LAZYLOAD 0x00000001 /* Lazyload following object. */
++#define DF_P1_GROUPPERM 0x00000002 /* Symbols from next object are not
++ generally available. */
++
++/* Version definition sections. */
++
++typedef struct
++{
++ Elf32_Half vd_version; /* Version revision */
++ Elf32_Half vd_flags; /* Version information */
++ Elf32_Half vd_ndx; /* Version Index */
++ Elf32_Half vd_cnt; /* Number of associated aux entries */
++ Elf32_Word vd_hash; /* Version name hash value */
++ Elf32_Word vd_aux; /* Offset in bytes to verdaux array */
++ Elf32_Word vd_next; /* Offset in bytes to next verdef
++ entry */
++} Elf32_Verdef;
++
++typedef struct
++{
++ Elf64_Half vd_version; /* Version revision */
++ Elf64_Half vd_flags; /* Version information */
++ Elf64_Half vd_ndx; /* Version Index */
++ Elf64_Half vd_cnt; /* Number of associated aux entries */
++ Elf64_Word vd_hash; /* Version name hash value */
++ Elf64_Word vd_aux; /* Offset in bytes to verdaux array */
++ Elf64_Word vd_next; /* Offset in bytes to next verdef
++ entry */
++} Elf64_Verdef;
++
++
++/* Legal values for vd_version (version revision). */
++#define VER_DEF_NONE 0 /* No version */
++#define VER_DEF_CURRENT 1 /* Current version */
++#define VER_DEF_NUM 2 /* Given version number */
++
++/* Legal values for vd_flags (version information flags). */
++#define VER_FLG_BASE 0x1 /* Version definition of file itself */
++#define VER_FLG_WEAK 0x2 /* Weak version identifier */
++
++/* Versym symbol index values. */
++#define VER_NDX_LOCAL 0 /* Symbol is local. */
++#define VER_NDX_GLOBAL 1 /* Symbol is global. */
++#define VER_NDX_LORESERVE 0xff00 /* Beginning of reserved entries. */
++#define VER_NDX_ELIMINATE 0xff01 /* Symbol is to be eliminated. */
++
++/* Auxialiary version information. */
++
++typedef struct
++{
++ Elf32_Word vda_name; /* Version or dependency names */
++ Elf32_Word vda_next; /* Offset in bytes to next verdaux
++ entry */
++} Elf32_Verdaux;
++
++typedef struct
++{
++ Elf64_Word vda_name; /* Version or dependency names */
++ Elf64_Word vda_next; /* Offset in bytes to next verdaux
++ entry */
++} Elf64_Verdaux;
++
++
++/* Version dependency section. */
++
++typedef struct
++{
++ Elf32_Half vn_version; /* Version of structure */
++ Elf32_Half vn_cnt; /* Number of associated aux entries */
++ Elf32_Word vn_file; /* Offset of filename for this
++ dependency */
++ Elf32_Word vn_aux; /* Offset in bytes to vernaux array */
++ Elf32_Word vn_next; /* Offset in bytes to next verneed
++ entry */
++} Elf32_Verneed;
++
++typedef struct
++{
++ Elf64_Half vn_version; /* Version of structure */
++ Elf64_Half vn_cnt; /* Number of associated aux entries */
++ Elf64_Word vn_file; /* Offset of filename for this
++ dependency */
++ Elf64_Word vn_aux; /* Offset in bytes to vernaux array */
++ Elf64_Word vn_next; /* Offset in bytes to next verneed
++ entry */
++} Elf64_Verneed;
++
++
++/* Legal values for vn_version (version revision). */
++#define VER_NEED_NONE 0 /* No version */
++#define VER_NEED_CURRENT 1 /* Current version */
++#define VER_NEED_NUM 2 /* Given version number */
++
++/* Auxiliary needed version information. */
++
++typedef struct
++{
++ Elf32_Word vna_hash; /* Hash value of dependency name */
++ Elf32_Half vna_flags; /* Dependency specific information */
++ Elf32_Half vna_other; /* Unused */
++ Elf32_Word vna_name; /* Dependency name string offset */
++ Elf32_Word vna_next; /* Offset in bytes to next vernaux
++ entry */
++} Elf32_Vernaux;
++
++typedef struct
++{
++ Elf64_Word vna_hash; /* Hash value of dependency name */
++ Elf64_Half vna_flags; /* Dependency specific information */
++ Elf64_Half vna_other; /* Unused */
++ Elf64_Word vna_name; /* Dependency name string offset */
++ Elf64_Word vna_next; /* Offset in bytes to next vernaux
++ entry */
++} Elf64_Vernaux;
++
++
++/* Legal values for vna_flags. */
++#define VER_FLG_WEAK 0x2 /* Weak version identifier */
++
++
++/* Auxiliary vector. */
++
++/* This vector is normally only used by the program interpreter. The
++ usual definition in an ABI supplement uses the name auxv_t. The
++ vector is not usually defined in a standard <elf.h> file, but it
++ can't hurt. We rename it to avoid conflicts. The sizes of these
++ types are an arrangement between the exec server and the program
++ interpreter, so we don't fully specify them here. */
++
++typedef struct
++{
++ int a_type; /* Entry type */
++ union
++ {
++ long int a_val; /* Integer value */
++ void *a_ptr; /* Pointer value */
++ void (*a_fcn) (void); /* Function pointer value */
++ } a_un;
++} Elf32_auxv_t;
++
++typedef struct
++{
++ long int a_type; /* Entry type */
++ union
++ {
++ long int a_val; /* Integer value */
++ void *a_ptr; /* Pointer value */
++ void (*a_fcn) (void); /* Function pointer value */
++ } a_un;
++} Elf64_auxv_t;
++
++/* Legal values for a_type (entry type). */
++
++#define AT_NULL 0 /* End of vector */
++#define AT_IGNORE 1 /* Entry should be ignored */
++#define AT_EXECFD 2 /* File descriptor of program */
++#define AT_PHDR 3 /* Program headers for program */
++#define AT_PHENT 4 /* Size of program header entry */
++#define AT_PHNUM 5 /* Number of program headers */
++#define AT_PAGESZ 6 /* System page size */
++#define AT_BASE 7 /* Base address of interpreter */
++#define AT_FLAGS 8 /* Flags */
++#define AT_ENTRY 9 /* Entry point of program */
++#define AT_NOTELF 10 /* Program is not ELF */
++#define AT_UID 11 /* Real uid */
++#define AT_EUID 12 /* Effective uid */
++#define AT_GID 13 /* Real gid */
++#define AT_EGID 14 /* Effective gid */
++#define AT_CLKTCK 17 /* Frequency of times() */
++
++/* Some more special a_type values describing the hardware. */
++#define AT_PLATFORM 15 /* String identifying platform. */
++#define AT_HWCAP 16 /* Machine dependent hints about
++ processor capabilities. */
++
++/* This entry gives some information about the FPU initialization
++ performed by the kernel. */
++#define AT_FPUCW 18 /* Used FPU control word. */
++
++/* Cache block sizes. */
++#define AT_DCACHEBSIZE 19 /* Data cache block size. */
++#define AT_ICACHEBSIZE 20 /* Instruction cache block size. */
++#define AT_UCACHEBSIZE 21 /* Unified cache block size. */
++
++/* A special ignored value for PPC, used by the kernel to control the
++ interpretation of the AUXV. Must be > 16. */
++#define AT_IGNOREPPC 22 /* Entry should be ignored. */
++
++#define AT_SECURE 23 /* Boolean, was exec setuid-like? */
++
++/* Pointer to the global system page used for system calls and other
++ nice things. */
++#define AT_SYSINFO 32
++#define AT_SYSINFO_EHDR 33
++
++
++/* Note section contents. Each entry in the note section begins with
++ a header of a fixed form. */
++
++typedef struct
++{
++ Elf32_Word n_namesz; /* Length of the note's name. */
++ Elf32_Word n_descsz; /* Length of the note's descriptor. */
++ Elf32_Word n_type; /* Type of the note. */
++} Elf32_Nhdr;
++
++typedef struct
++{
++ Elf64_Word n_namesz; /* Length of the note's name. */
++ Elf64_Word n_descsz; /* Length of the note's descriptor. */
++ Elf64_Word n_type; /* Type of the note. */
++} Elf64_Nhdr;
++
++/* Known names of notes. */
++
++/* Solaris entries in the note section have this name. */
++#define ELF_NOTE_SOLARIS "SUNW Solaris"
++
++/* Note entries for GNU systems have this name. */
++#define ELF_NOTE_GNU "GNU"
++
++
++/* Defined types of notes for Solaris. */
++
++/* Value of descriptor (one word) is desired pagesize for the binary. */
++#define ELF_NOTE_PAGESIZE_HINT 1
++
++
++/* Defined note types for GNU systems. */
++
++/* ABI information. The descriptor consists of words:
++ word 0: OS descriptor
++ word 1: major version of the ABI
++ word 2: minor version of the ABI
++ word 3: subminor version of the ABI
++*/
++#define ELF_NOTE_ABI 1
++
++/* Known OSes. These value can appear in word 0 of an ELF_NOTE_ABI
++ note section entry. */
++#define ELF_NOTE_OS_LINUX 0
++#define ELF_NOTE_OS_GNU 1
++#define ELF_NOTE_OS_SOLARIS2 2
++#define ELF_NOTE_OS_FREEBSD 3
++
++
++/* Move records. */
++typedef struct
++{
++ Elf32_Xword m_value; /* Symbol value. */
++ Elf32_Word m_info; /* Size and index. */
++ Elf32_Word m_poffset; /* Symbol offset. */
++ Elf32_Half m_repeat; /* Repeat count. */
++ Elf32_Half m_stride; /* Stride info. */
++} Elf32_Move;
++
++typedef struct
++{
++ Elf64_Xword m_value; /* Symbol value. */
++ Elf64_Xword m_info; /* Size and index. */
++ Elf64_Xword m_poffset; /* Symbol offset. */
++ Elf64_Half m_repeat; /* Repeat count. */
++ Elf64_Half m_stride; /* Stride info. */
++} Elf64_Move;
++
++/* Macro to construct move records. */
++#define ELF32_M_SYM(info) ((info) >> 8)
++#define ELF32_M_SIZE(info) ((unsigned char) (info))
++#define ELF32_M_INFO(sym, size) (((sym) << 8) + (unsigned char) (size))
++
++#define ELF64_M_SYM(info) ELF32_M_SYM (info)
++#define ELF64_M_SIZE(info) ELF32_M_SIZE (info)
++#define ELF64_M_INFO(sym, size) ELF32_M_INFO (sym, size)
++
++
++/* Motorola 68k specific definitions. */
++
++/* Values for Elf32_Ehdr.e_flags. */
++#define EF_CPU32 0x00810000
++
++/* m68k relocs. */
++
++#define R_68K_NONE 0 /* No reloc */
++#define R_68K_32 1 /* Direct 32 bit */
++#define R_68K_16 2 /* Direct 16 bit */
++#define R_68K_8 3 /* Direct 8 bit */
++#define R_68K_PC32 4 /* PC relative 32 bit */
++#define R_68K_PC16 5 /* PC relative 16 bit */
++#define R_68K_PC8 6 /* PC relative 8 bit */
++#define R_68K_GOT32 7 /* 32 bit PC relative GOT entry */
++#define R_68K_GOT16 8 /* 16 bit PC relative GOT entry */
++#define R_68K_GOT8 9 /* 8 bit PC relative GOT entry */
++#define R_68K_GOT32O 10 /* 32 bit GOT offset */
++#define R_68K_GOT16O 11 /* 16 bit GOT offset */
++#define R_68K_GOT8O 12 /* 8 bit GOT offset */
++#define R_68K_PLT32 13 /* 32 bit PC relative PLT address */
++#define R_68K_PLT16 14 /* 16 bit PC relative PLT address */
++#define R_68K_PLT8 15 /* 8 bit PC relative PLT address */
++#define R_68K_PLT32O 16 /* 32 bit PLT offset */
++#define R_68K_PLT16O 17 /* 16 bit PLT offset */
++#define R_68K_PLT8O 18 /* 8 bit PLT offset */
++#define R_68K_COPY 19 /* Copy symbol at runtime */
++#define R_68K_GLOB_DAT 20 /* Create GOT entry */
++#define R_68K_JMP_SLOT 21 /* Create PLT entry */
++#define R_68K_RELATIVE 22 /* Adjust by program base */
++/* Keep this the last entry. */
++#define R_68K_NUM 23
++
++/* Intel 80386 specific definitions. */
++
++/* i386 relocs. */
++
++#define R_386_NONE 0 /* No reloc */
++#define R_386_32 1 /* Direct 32 bit */
++#define R_386_PC32 2 /* PC relative 32 bit */
++#define R_386_GOT32 3 /* 32 bit GOT entry */
++#define R_386_PLT32 4 /* 32 bit PLT address */
++#define R_386_COPY 5 /* Copy symbol at runtime */
++#define R_386_GLOB_DAT 6 /* Create GOT entry */
++#define R_386_JMP_SLOT 7 /* Create PLT entry */
++#define R_386_RELATIVE 8 /* Adjust by program base */
++#define R_386_GOTOFF 9 /* 32 bit offset to GOT */
++#define R_386_GOTPC 10 /* 32 bit PC relative offset to GOT */
++#define R_386_32PLT 11
++#define R_386_TLS_TPOFF 14 /* Offset in static TLS block */
++#define R_386_TLS_IE 15 /* Address of GOT entry for static TLS
++ block offset */
++#define R_386_TLS_GOTIE 16 /* GOT entry for static TLS block
++ offset */
++#define R_386_TLS_LE 17 /* Offset relative to static TLS
++ block */
++#define R_386_TLS_GD 18 /* Direct 32 bit for GNU version of
++ general dynamic thread local data */
++#define R_386_TLS_LDM 19 /* Direct 32 bit for GNU version of
++ local dynamic thread local data
++ in LE code */
++#define R_386_16 20
++#define R_386_PC16 21
++#define R_386_8 22
++#define R_386_PC8 23
++#define R_386_TLS_GD_32 24 /* Direct 32 bit for general dynamic
++ thread local data */
++#define R_386_TLS_GD_PUSH 25 /* Tag for pushl in GD TLS code */
++#define R_386_TLS_GD_CALL 26 /* Relocation for call to
++ __tls_get_addr() */
++#define R_386_TLS_GD_POP 27 /* Tag for popl in GD TLS code */
++#define R_386_TLS_LDM_32 28 /* Direct 32 bit for local dynamic
++ thread local data in LE code */
++#define R_386_TLS_LDM_PUSH 29 /* Tag for pushl in LDM TLS code */
++#define R_386_TLS_LDM_CALL 30 /* Relocation for call to
++ __tls_get_addr() in LDM code */
++#define R_386_TLS_LDM_POP 31 /* Tag for popl in LDM TLS code */
++#define R_386_TLS_LDO_32 32 /* Offset relative to TLS block */
++#define R_386_TLS_IE_32 33 /* GOT entry for negated static TLS
++ block offset */
++#define R_386_TLS_LE_32 34 /* Negated offset relative to static
++ TLS block */
++#define R_386_TLS_DTPMOD32 35 /* ID of module containing symbol */
++#define R_386_TLS_DTPOFF32 36 /* Offset in TLS block */
++#define R_386_TLS_TPOFF32 37 /* Negated offset in static TLS block */
++/* Keep this the last entry. */
++#define R_386_NUM 38
++
++/* SUN SPARC specific definitions. */
++
++/* Legal values for ST_TYPE subfield of st_info (symbol type). */
++
++#define STT_REGISTER 13 /* Global register reserved to app. */
++
++/* Values for Elf64_Ehdr.e_flags. */
++
++#define EF_SPARCV9_MM 3
++#define EF_SPARCV9_TSO 0
++#define EF_SPARCV9_PSO 1
++#define EF_SPARCV9_RMO 2
++#define EF_SPARC_LEDATA 0x800000 /* little endian data */
++#define EF_SPARC_EXT_MASK 0xFFFF00
++#define EF_SPARC_32PLUS 0x000100 /* generic V8+ features */
++#define EF_SPARC_SUN_US1 0x000200 /* Sun UltraSPARC1 extensions */
++#define EF_SPARC_HAL_R1 0x000400 /* HAL R1 extensions */
++#define EF_SPARC_SUN_US3 0x000800 /* Sun UltraSPARCIII extensions */
++
++/* SPARC relocs. */
++
++#define R_SPARC_NONE 0 /* No reloc */
++#define R_SPARC_8 1 /* Direct 8 bit */
++#define R_SPARC_16 2 /* Direct 16 bit */
++#define R_SPARC_32 3 /* Direct 32 bit */
++#define R_SPARC_DISP8 4 /* PC relative 8 bit */
++#define R_SPARC_DISP16 5 /* PC relative 16 bit */
++#define R_SPARC_DISP32 6 /* PC relative 32 bit */
++#define R_SPARC_WDISP30 7 /* PC relative 30 bit shifted */
++#define R_SPARC_WDISP22 8 /* PC relative 22 bit shifted */
++#define R_SPARC_HI22 9 /* High 22 bit */
++#define R_SPARC_22 10 /* Direct 22 bit */
++#define R_SPARC_13 11 /* Direct 13 bit */
++#define R_SPARC_LO10 12 /* Truncated 10 bit */
++#define R_SPARC_GOT10 13 /* Truncated 10 bit GOT entry */
++#define R_SPARC_GOT13 14 /* 13 bit GOT entry */
++#define R_SPARC_GOT22 15 /* 22 bit GOT entry shifted */
++#define R_SPARC_PC10 16 /* PC relative 10 bit truncated */
++#define R_SPARC_PC22 17 /* PC relative 22 bit shifted */
++#define R_SPARC_WPLT30 18 /* 30 bit PC relative PLT address */
++#define R_SPARC_COPY 19 /* Copy symbol at runtime */
++#define R_SPARC_GLOB_DAT 20 /* Create GOT entry */
++#define R_SPARC_JMP_SLOT 21 /* Create PLT entry */
++#define R_SPARC_RELATIVE 22 /* Adjust by program base */
++#define R_SPARC_UA32 23 /* Direct 32 bit unaligned */
++
++/* Additional Sparc64 relocs. */
++
++#define R_SPARC_PLT32 24 /* Direct 32 bit ref to PLT entry */
++#define R_SPARC_HIPLT22 25 /* High 22 bit PLT entry */
++#define R_SPARC_LOPLT10 26 /* Truncated 10 bit PLT entry */
++#define R_SPARC_PCPLT32 27 /* PC rel 32 bit ref to PLT entry */
++#define R_SPARC_PCPLT22 28 /* PC rel high 22 bit PLT entry */
++#define R_SPARC_PCPLT10 29 /* PC rel trunc 10 bit PLT entry */
++#define R_SPARC_10 30 /* Direct 10 bit */
++#define R_SPARC_11 31 /* Direct 11 bit */
++#define R_SPARC_64 32 /* Direct 64 bit */
++#define R_SPARC_OLO10 33 /* 10bit with secondary 13bit addend */
++#define R_SPARC_HH22 34 /* Top 22 bits of direct 64 bit */
++#define R_SPARC_HM10 35 /* High middle 10 bits of ... */
++#define R_SPARC_LM22 36 /* Low middle 22 bits of ... */
++#define R_SPARC_PC_HH22 37 /* Top 22 bits of pc rel 64 bit */
++#define R_SPARC_PC_HM10 38 /* High middle 10 bit of ... */
++#define R_SPARC_PC_LM22 39 /* Low miggle 22 bits of ... */
++#define R_SPARC_WDISP16 40 /* PC relative 16 bit shifted */
++#define R_SPARC_WDISP19 41 /* PC relative 19 bit shifted */
++#define R_SPARC_7 43 /* Direct 7 bit */
++#define R_SPARC_5 44 /* Direct 5 bit */
++#define R_SPARC_6 45 /* Direct 6 bit */
++#define R_SPARC_DISP64 46 /* PC relative 64 bit */
++#define R_SPARC_PLT64 47 /* Direct 64 bit ref to PLT entry */
++#define R_SPARC_HIX22 48 /* High 22 bit complemented */
++#define R_SPARC_LOX10 49 /* Truncated 11 bit complemented */
++#define R_SPARC_H44 50 /* Direct high 12 of 44 bit */
++#define R_SPARC_M44 51 /* Direct mid 22 of 44 bit */
++#define R_SPARC_L44 52 /* Direct low 10 of 44 bit */
++#define R_SPARC_REGISTER 53 /* Global register usage */
++#define R_SPARC_UA64 54 /* Direct 64 bit unaligned */
++#define R_SPARC_UA16 55 /* Direct 16 bit unaligned */
++#define R_SPARC_TLS_GD_HI22 56
++#define R_SPARC_TLS_GD_LO10 57
++#define R_SPARC_TLS_GD_ADD 58
++#define R_SPARC_TLS_GD_CALL 59
++#define R_SPARC_TLS_LDM_HI22 60
++#define R_SPARC_TLS_LDM_LO10 61
++#define R_SPARC_TLS_LDM_ADD 62
++#define R_SPARC_TLS_LDM_CALL 63
++#define R_SPARC_TLS_LDO_HIX22 64
++#define R_SPARC_TLS_LDO_LOX10 65
++#define R_SPARC_TLS_LDO_ADD 66
++#define R_SPARC_TLS_IE_HI22 67
++#define R_SPARC_TLS_IE_LO10 68
++#define R_SPARC_TLS_IE_LD 69
++#define R_SPARC_TLS_IE_LDX 70
++#define R_SPARC_TLS_IE_ADD 71
++#define R_SPARC_TLS_LE_HIX22 72
++#define R_SPARC_TLS_LE_LOX10 73
++#define R_SPARC_TLS_DTPMOD32 74
++#define R_SPARC_TLS_DTPMOD64 75
++#define R_SPARC_TLS_DTPOFF32 76
++#define R_SPARC_TLS_DTPOFF64 77
++#define R_SPARC_TLS_TPOFF32 78
++#define R_SPARC_TLS_TPOFF64 79
++/* Keep this the last entry. */
++#define R_SPARC_NUM 80
++
++/* For Sparc64, legal values for d_tag of Elf64_Dyn. */
++
++#define DT_SPARC_REGISTER 0x70000001
++#define DT_SPARC_NUM 2
++
++/* Bits present in AT_HWCAP, primarily for Sparc32. */
++
++#define HWCAP_SPARC_FLUSH 1 /* The cpu supports flush insn. */
++#define HWCAP_SPARC_STBAR 2
++#define HWCAP_SPARC_SWAP 4
++#define HWCAP_SPARC_MULDIV 8
++#define HWCAP_SPARC_V9 16 /* The cpu is v9, so v8plus is ok. */
++#define HWCAP_SPARC_ULTRA3 32
++
++/* MIPS R3000 specific definitions. */
++
++/* Legal values for e_flags field of Elf32_Ehdr. */
++
++#define EF_MIPS_NOREORDER 1 /* A .noreorder directive was used */
++#define EF_MIPS_PIC 2 /* Contains PIC code */
++#define EF_MIPS_CPIC 4 /* Uses PIC calling sequence */
++#define EF_MIPS_XGOT 8
++#define EF_MIPS_64BIT_WHIRL 16
++#define EF_MIPS_ABI2 32
++#define EF_MIPS_ABI_ON32 64
++#define EF_MIPS_ARCH 0xf0000000 /* MIPS architecture level */
++
++/* Legal values for MIPS architecture level. */
++
++#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
++#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
++#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
++#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
++#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
++#define EF_MIPS_ARCH_32 0x60000000 /* MIPS32 code. */
++#define EF_MIPS_ARCH_64 0x70000000 /* MIPS64 code. */
++
++/* The following are non-official names and should not be used. */
++
++#define E_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
++#define E_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
++#define E_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
++#define E_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
++#define E_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
++#define E_MIPS_ARCH_32 0x60000000 /* MIPS32 code. */
++#define E_MIPS_ARCH_64 0x70000000 /* MIPS64 code. */
++
++/* Special section indices. */
++
++#define SHN_MIPS_ACOMMON 0xff00 /* Allocated common symbols */
++#define SHN_MIPS_TEXT 0xff01 /* Allocated test symbols. */
++#define SHN_MIPS_DATA 0xff02 /* Allocated data symbols. */
++#define SHN_MIPS_SCOMMON 0xff03 /* Small common symbols */
++#define SHN_MIPS_SUNDEFINED 0xff04 /* Small undefined symbols */
++
++/* Legal values for sh_type field of Elf32_Shdr. */
++
++#define SHT_MIPS_LIBLIST 0x70000000 /* Shared objects used in link */
++#define SHT_MIPS_MSYM 0x70000001
++#define SHT_MIPS_CONFLICT 0x70000002 /* Conflicting symbols */
++#define SHT_MIPS_GPTAB 0x70000003 /* Global data area sizes */
++#define SHT_MIPS_UCODE 0x70000004 /* Reserved for SGI/MIPS compilers */
++#define SHT_MIPS_DEBUG 0x70000005 /* MIPS ECOFF debugging information*/
++#define SHT_MIPS_REGINFO 0x70000006 /* Register usage information */
++#define SHT_MIPS_PACKAGE 0x70000007
++#define SHT_MIPS_PACKSYM 0x70000008
++#define SHT_MIPS_RELD 0x70000009
++#define SHT_MIPS_IFACE 0x7000000b
++#define SHT_MIPS_CONTENT 0x7000000c
++#define SHT_MIPS_OPTIONS 0x7000000d /* Miscellaneous options. */
++#define SHT_MIPS_SHDR 0x70000010
++#define SHT_MIPS_FDESC 0x70000011
++#define SHT_MIPS_EXTSYM 0x70000012
++#define SHT_MIPS_DENSE 0x70000013
++#define SHT_MIPS_PDESC 0x70000014
++#define SHT_MIPS_LOCSYM 0x70000015
++#define SHT_MIPS_AUXSYM 0x70000016
++#define SHT_MIPS_OPTSYM 0x70000017
++#define SHT_MIPS_LOCSTR 0x70000018
++#define SHT_MIPS_LINE 0x70000019
++#define SHT_MIPS_RFDESC 0x7000001a
++#define SHT_MIPS_DELTASYM 0x7000001b
++#define SHT_MIPS_DELTAINST 0x7000001c
++#define SHT_MIPS_DELTACLASS 0x7000001d
++#define SHT_MIPS_DWARF 0x7000001e /* DWARF debugging information. */
++#define SHT_MIPS_DELTADECL 0x7000001f
++#define SHT_MIPS_SYMBOL_LIB 0x70000020
++#define SHT_MIPS_EVENTS 0x70000021 /* Event section. */
++#define SHT_MIPS_TRANSLATE 0x70000022
++#define SHT_MIPS_PIXIE 0x70000023
++#define SHT_MIPS_XLATE 0x70000024
++#define SHT_MIPS_XLATE_DEBUG 0x70000025
++#define SHT_MIPS_WHIRL 0x70000026
++#define SHT_MIPS_EH_REGION 0x70000027
++#define SHT_MIPS_XLATE_OLD 0x70000028
++#define SHT_MIPS_PDR_EXCEPTION 0x70000029
++
++/* Legal values for sh_flags field of Elf32_Shdr. */
++
++#define SHF_MIPS_GPREL 0x10000000 /* Must be part of global data area */
++#define SHF_MIPS_MERGE 0x20000000
++#define SHF_MIPS_ADDR 0x40000000
++#define SHF_MIPS_STRINGS 0x80000000
++#define SHF_MIPS_NOSTRIP 0x08000000
++#define SHF_MIPS_LOCAL 0x04000000
++#define SHF_MIPS_NAMES 0x02000000
++#define SHF_MIPS_NODUPE 0x01000000
++
++
++/* Symbol tables. */
++
++/* MIPS specific values for `st_other'. */
++#define STO_MIPS_DEFAULT 0x0
++#define STO_MIPS_INTERNAL 0x1
++#define STO_MIPS_HIDDEN 0x2
++#define STO_MIPS_PROTECTED 0x3
++#define STO_MIPS_SC_ALIGN_UNUSED 0xff
++
++/* MIPS specific values for `st_info'. */
++#define STB_MIPS_SPLIT_COMMON 13
++
++/* Entries found in sections of type SHT_MIPS_GPTAB. */
++
++typedef union
++{
++ struct
++ {
++ Elf32_Word gt_current_g_value; /* -G value used for compilation */
++ Elf32_Word gt_unused; /* Not used */
++ } gt_header; /* First entry in section */
++ struct
++ {
++ Elf32_Word gt_g_value; /* If this value were used for -G */
++ Elf32_Word gt_bytes; /* This many bytes would be used */
++ } gt_entry; /* Subsequent entries in section */
++} Elf32_gptab;
++
++/* Entry found in sections of type SHT_MIPS_REGINFO. */
++
++typedef struct
++{
++ Elf32_Word ri_gprmask; /* General registers used */
++ Elf32_Word ri_cprmask[4]; /* Coprocessor registers used */
++ Elf32_Sword ri_gp_value; /* $gp register value */
++} Elf32_RegInfo;
++
++/* Entries found in sections of type SHT_MIPS_OPTIONS. */
++
++typedef struct
++{
++ unsigned char kind; /* Determines interpretation of the
++ variable part of descriptor. */
++ unsigned char size; /* Size of descriptor, including header. */
++ Elf32_Section section; /* Section header index of section affected,
++ 0 for global options. */
++ Elf32_Word info; /* Kind-specific information. */
++} Elf_Options;
++
++/* Values for `kind' field in Elf_Options. */
++
++#define ODK_NULL 0 /* Undefined. */
++#define ODK_REGINFO 1 /* Register usage information. */
++#define ODK_EXCEPTIONS 2 /* Exception processing options. */
++#define ODK_PAD 3 /* Section padding options. */
++#define ODK_HWPATCH 4 /* Hardware workarounds performed */
++#define ODK_FILL 5 /* record the fill value used by the linker. */
++#define ODK_TAGS 6 /* reserve space for desktop tools to write. */
++#define ODK_HWAND 7 /* HW workarounds. 'AND' bits when merging. */
++#define ODK_HWOR 8 /* HW workarounds. 'OR' bits when merging. */
++
++/* Values for `info' in Elf_Options for ODK_EXCEPTIONS entries. */
++
++#define OEX_FPU_MIN 0x1f /* FPE's which MUST be enabled. */
++#define OEX_FPU_MAX 0x1f00 /* FPE's which MAY be enabled. */
++#define OEX_PAGE0 0x10000 /* page zero must be mapped. */
++#define OEX_SMM 0x20000 /* Force sequential memory mode? */
++#define OEX_FPDBUG 0x40000 /* Force floating point debug mode? */
++#define OEX_PRECISEFP OEX_FPDBUG
++#define OEX_DISMISS 0x80000 /* Dismiss invalid address faults? */
++
++#define OEX_FPU_INVAL 0x10
++#define OEX_FPU_DIV0 0x08
++#define OEX_FPU_OFLO 0x04
++#define OEX_FPU_UFLO 0x02
++#define OEX_FPU_INEX 0x01
++
++/* Masks for `info' in Elf_Options for an ODK_HWPATCH entry. */
++
++#define OHW_R4KEOP 0x1 /* R4000 end-of-page patch. */
++#define OHW_R8KPFETCH 0x2 /* may need R8000 prefetch patch. */
++#define OHW_R5KEOP 0x4 /* R5000 end-of-page patch. */
++#define OHW_R5KCVTL 0x8 /* R5000 cvt.[ds].l bug. clean=1. */
++
++#define OPAD_PREFIX 0x1
++#define OPAD_POSTFIX 0x2
++#define OPAD_SYMBOL 0x4
++
++/* Entry found in `.options' section. */
++
++typedef struct
++{
++ Elf32_Word hwp_flags1; /* Extra flags. */
++ Elf32_Word hwp_flags2; /* Extra flags. */
++} Elf_Options_Hw;
++
++/* Masks for `info' in ElfOptions for ODK_HWAND and ODK_HWOR entries. */
++
++#define OHWA0_R4KEOP_CHECKED 0x00000001
++#define OHWA1_R4KEOP_CLEAN 0x00000002
++
++/* MIPS relocs. */
++
++#define R_MIPS_NONE 0 /* No reloc */
++#define R_MIPS_16 1 /* Direct 16 bit */
++#define R_MIPS_32 2 /* Direct 32 bit */
++#define R_MIPS_REL32 3 /* PC relative 32 bit */
++#define R_MIPS_26 4 /* Direct 26 bit shifted */
++#define R_MIPS_HI16 5 /* High 16 bit */
++#define R_MIPS_LO16 6 /* Low 16 bit */
++#define R_MIPS_GPREL16 7 /* GP relative 16 bit */
++#define R_MIPS_LITERAL 8 /* 16 bit literal entry */
++#define R_MIPS_GOT16 9 /* 16 bit GOT entry */
++#define R_MIPS_PC16 10 /* PC relative 16 bit */
++#define R_MIPS_CALL16 11 /* 16 bit GOT entry for function */
++#define R_MIPS_GPREL32 12 /* GP relative 32 bit */
++
++#define R_MIPS_SHIFT5 16
++#define R_MIPS_SHIFT6 17
++#define R_MIPS_64 18
++#define R_MIPS_GOT_DISP 19
++#define R_MIPS_GOT_PAGE 20
++#define R_MIPS_GOT_OFST 21
++#define R_MIPS_GOT_HI16 22
++#define R_MIPS_GOT_LO16 23
++#define R_MIPS_SUB 24
++#define R_MIPS_INSERT_A 25
++#define R_MIPS_INSERT_B 26
++#define R_MIPS_DELETE 27
++#define R_MIPS_HIGHER 28
++#define R_MIPS_HIGHEST 29
++#define R_MIPS_CALL_HI16 30
++#define R_MIPS_CALL_LO16 31
++#define R_MIPS_SCN_DISP 32
++#define R_MIPS_REL16 33
++#define R_MIPS_ADD_IMMEDIATE 34
++#define R_MIPS_PJUMP 35
++#define R_MIPS_RELGOT 36
++#define R_MIPS_JALR 37
++/* Keep this the last entry. */
++#define R_MIPS_NUM 38
++
++/* Legal values for p_type field of Elf32_Phdr. */
++
++#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */
++#define PT_MIPS_RTPROC 0x70000001 /* Runtime procedure table. */
++#define PT_MIPS_OPTIONS 0x70000002
++
++/* Special program header types. */
++
++#define PF_MIPS_LOCAL 0x10000000
++
++/* Legal values for d_tag field of Elf32_Dyn. */
++
++#define DT_MIPS_RLD_VERSION 0x70000001 /* Runtime linker interface version */
++#define DT_MIPS_TIME_STAMP 0x70000002 /* Timestamp */
++#define DT_MIPS_ICHECKSUM 0x70000003 /* Checksum */
++#define DT_MIPS_IVERSION 0x70000004 /* Version string (string tbl index) */
++#define DT_MIPS_FLAGS 0x70000005 /* Flags */
++#define DT_MIPS_BASE_ADDRESS 0x70000006 /* Base address */
++#define DT_MIPS_MSYM 0x70000007
++#define DT_MIPS_CONFLICT 0x70000008 /* Address of CONFLICT section */
++#define DT_MIPS_LIBLIST 0x70000009 /* Address of LIBLIST section */
++#define DT_MIPS_LOCAL_GOTNO 0x7000000a /* Number of local GOT entries */
++#define DT_MIPS_CONFLICTNO 0x7000000b /* Number of CONFLICT entries */
++#define DT_MIPS_LIBLISTNO 0x70000010 /* Number of LIBLIST entries */
++#define DT_MIPS_SYMTABNO 0x70000011 /* Number of DYNSYM entries */
++#define DT_MIPS_UNREFEXTNO 0x70000012 /* First external DYNSYM */
++#define DT_MIPS_GOTSYM 0x70000013 /* First GOT entry in DYNSYM */
++#define DT_MIPS_HIPAGENO 0x70000014 /* Number of GOT page table entries */
++#define DT_MIPS_RLD_MAP 0x70000016 /* Address of run time loader map. */
++#define DT_MIPS_DELTA_CLASS 0x70000017 /* Delta C++ class definition. */
++#define DT_MIPS_DELTA_CLASS_NO 0x70000018 /* Number of entries in
++ DT_MIPS_DELTA_CLASS. */
++#define DT_MIPS_DELTA_INSTANCE 0x70000019 /* Delta C++ class instances. */
++#define DT_MIPS_DELTA_INSTANCE_NO 0x7000001a /* Number of entries in
++ DT_MIPS_DELTA_INSTANCE. */
++#define DT_MIPS_DELTA_RELOC 0x7000001b /* Delta relocations. */
++#define DT_MIPS_DELTA_RELOC_NO 0x7000001c /* Number of entries in
++ DT_MIPS_DELTA_RELOC. */
++#define DT_MIPS_DELTA_SYM 0x7000001d /* Delta symbols that Delta
++ relocations refer to. */
++#define DT_MIPS_DELTA_SYM_NO 0x7000001e /* Number of entries in
++ DT_MIPS_DELTA_SYM. */
++#define DT_MIPS_DELTA_CLASSSYM 0x70000020 /* Delta symbols that hold the
++ class declaration. */
++#define DT_MIPS_DELTA_CLASSSYM_NO 0x70000021 /* Number of entries in
++ DT_MIPS_DELTA_CLASSSYM. */
++#define DT_MIPS_CXX_FLAGS 0x70000022 /* Flags indicating for C++ flavor. */
++#define DT_MIPS_PIXIE_INIT 0x70000023
++#define DT_MIPS_SYMBOL_LIB 0x70000024
++#define DT_MIPS_LOCALPAGE_GOTIDX 0x70000025
++#define DT_MIPS_LOCAL_GOTIDX 0x70000026
++#define DT_MIPS_HIDDEN_GOTIDX 0x70000027
++#define DT_MIPS_PROTECTED_GOTIDX 0x70000028
++#define DT_MIPS_OPTIONS 0x70000029 /* Address of .options. */
++#define DT_MIPS_INTERFACE 0x7000002a /* Address of .interface. */
++#define DT_MIPS_DYNSTR_ALIGN 0x7000002b
++#define DT_MIPS_INTERFACE_SIZE 0x7000002c /* Size of the .interface section. */
++#define DT_MIPS_RLD_TEXT_RESOLVE_ADDR 0x7000002d /* Address of rld_text_rsolve
++ function stored in GOT. */
++#define DT_MIPS_PERF_SUFFIX 0x7000002e /* Default suffix of dso to be added
++ by rld on dlopen() calls. */
++#define DT_MIPS_COMPACT_SIZE 0x7000002f /* (O32)Size of compact rel section. */
++#define DT_MIPS_GP_VALUE 0x70000030 /* GP value for aux GOTs. */
++#define DT_MIPS_AUX_DYNAMIC 0x70000031 /* Address of aux .dynamic. */
++#define DT_MIPS_NUM 0x32
++
++/* Legal values for DT_MIPS_FLAGS Elf32_Dyn entry. */
++
++#define RHF_NONE 0 /* No flags */
++#define RHF_QUICKSTART (1 << 0) /* Use quickstart */
++#define RHF_NOTPOT (1 << 1) /* Hash size not power of 2 */
++#define RHF_NO_LIBRARY_REPLACEMENT (1 << 2) /* Ignore LD_LIBRARY_PATH */
++#define RHF_NO_MOVE (1 << 3)
++#define RHF_SGI_ONLY (1 << 4)
++#define RHF_GUARANTEE_INIT (1 << 5)
++#define RHF_DELTA_C_PLUS_PLUS (1 << 6)
++#define RHF_GUARANTEE_START_INIT (1 << 7)
++#define RHF_PIXIE (1 << 8)
++#define RHF_DEFAULT_DELAY_LOAD (1 << 9)
++#define RHF_REQUICKSTART (1 << 10)
++#define RHF_REQUICKSTARTED (1 << 11)
++#define RHF_CORD (1 << 12)
++#define RHF_NO_UNRES_UNDEF (1 << 13)
++#define RHF_RLD_ORDER_SAFE (1 << 14)
++
++/* Entries found in sections of type SHT_MIPS_LIBLIST. */
++
++typedef struct
++{
++ Elf32_Word l_name; /* Name (string table index) */
++ Elf32_Word l_time_stamp; /* Timestamp */
++ Elf32_Word l_checksum; /* Checksum */
++ Elf32_Word l_version; /* Interface version */
++ Elf32_Word l_flags; /* Flags */
++} Elf32_Lib;
++
++typedef struct
++{
++ Elf64_Word l_name; /* Name (string table index) */
++ Elf64_Word l_time_stamp; /* Timestamp */
++ Elf64_Word l_checksum; /* Checksum */
++ Elf64_Word l_version; /* Interface version */
++ Elf64_Word l_flags; /* Flags */
++} Elf64_Lib;
++
++
++/* Legal values for l_flags. */
++
++#define LL_NONE 0
++#define LL_EXACT_MATCH (1 << 0) /* Require exact match */
++#define LL_IGNORE_INT_VER (1 << 1) /* Ignore interface version */
++#define LL_REQUIRE_MINOR (1 << 2)
++#define LL_EXPORTS (1 << 3)
++#define LL_DELAY_LOAD (1 << 4)
++#define LL_DELTA (1 << 5)
++
++/* Entries found in sections of type SHT_MIPS_CONFLICT. */
++
++typedef Elf32_Addr Elf32_Conflict;
++
++
++/* HPPA specific definitions. */
++
++/* Legal values for e_flags field of Elf32_Ehdr. */
++
++#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */
++#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */
++#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
++#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
++#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch
++ prediction. */
++#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
++#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
++
++/* Defined values for `e_flags & EF_PARISC_ARCH' are: */
++
++#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */
++#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */
++#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */
++
++/* Additional section indeces. */
++
++#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared
++ symbols in ANSI C. */
++#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
++
++/* Legal values for sh_type field of Elf32_Shdr. */
++
++#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */
++#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */
++#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */
++
++/* Legal values for sh_flags field of Elf32_Shdr. */
++
++#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */
++#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */
++#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */
++
++/* Legal values for ST_TYPE subfield of st_info (symbol type). */
++
++#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */
++
++#define STT_HP_OPAQUE (STT_LOOS + 0x1)
++#define STT_HP_STUB (STT_LOOS + 0x2)
++
++/* HPPA relocs. */
++
++#define R_PARISC_NONE 0 /* No reloc. */
++#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */
++#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */
++#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */
++#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */
++#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */
++#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */
++#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */
++#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */
++#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */
++#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */
++#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */
++#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */
++#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */
++#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */
++#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */
++#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */
++#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */
++#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */
++#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */
++#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */
++#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */
++#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */
++#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */
++#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */
++#define R_PARISC_FPTR64 64 /* 64 bits function address. */
++#define R_PARISC_PLABEL32 65 /* 32 bits function address. */
++#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */
++#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */
++#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */
++#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */
++#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */
++#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */
++#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */
++#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */
++#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */
++#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */
++#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */
++#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */
++#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */
++#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */
++#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */
++#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */
++#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */
++#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */
++#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */
++#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */
++#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */
++#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */
++#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */
++#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */
++#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */
++#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */
++#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */
++#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */
++#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */
++#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */
++#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */
++#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */
++#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */
++#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */
++#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */
++#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */
++#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */
++#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */
++#define R_PARISC_LORESERVE 128
++#define R_PARISC_COPY 128 /* Copy relocation. */
++#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */
++#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */
++#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */
++#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */
++#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */
++#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */
++#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/
++#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */
++#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */
++#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */
++#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */
++#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */
++#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */
++#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */
++#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */
++#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/
++#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/
++#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */
++#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */
++#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */
++#define R_PARISC_HIRESERVE 255
++
++/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */
++
++#define PT_HP_TLS (PT_LOOS + 0x0)
++#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
++#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
++#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
++#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
++#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
++#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
++#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
++#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
++#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
++#define PT_HP_PARALLEL (PT_LOOS + 0x10)
++#define PT_HP_FASTBIND (PT_LOOS + 0x11)
++#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
++#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
++#define PT_HP_STACK (PT_LOOS + 0x14)
++
++#define PT_PARISC_ARCHEXT 0x70000000
++#define PT_PARISC_UNWIND 0x70000001
++
++/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */
++
++#define PF_PARISC_SBP 0x08000000
++
++#define PF_HP_PAGE_SIZE 0x00100000
++#define PF_HP_FAR_SHARED 0x00200000
++#define PF_HP_NEAR_SHARED 0x00400000
++#define PF_HP_CODE 0x01000000
++#define PF_HP_MODIFY 0x02000000
++#define PF_HP_LAZYSWAP 0x04000000
++#define PF_HP_SBP 0x08000000
++
++
++/* Alpha specific definitions. */
++
++/* Legal values for e_flags field of Elf64_Ehdr. */
++
++#define EF_ALPHA_32BIT 1 /* All addresses must be < 2GB. */
++#define EF_ALPHA_CANRELAX 2 /* Relocations for relaxing exist. */
++
++/* Legal values for sh_type field of Elf64_Shdr. */
++
++/* These two are primerily concerned with ECOFF debugging info. */
++#define SHT_ALPHA_DEBUG 0x70000001
++#define SHT_ALPHA_REGINFO 0x70000002
++
++/* Legal values for sh_flags field of Elf64_Shdr. */
++
++#define SHF_ALPHA_GPREL 0x10000000
++
++/* Legal values for st_other field of Elf64_Sym. */
++#define STO_ALPHA_NOPV 0x80 /* No PV required. */
++#define STO_ALPHA_STD_GPLOAD 0x88 /* PV only used for initial ldgp. */
++
++/* Alpha relocs. */
++
++#define R_ALPHA_NONE 0 /* No reloc */
++#define R_ALPHA_REFLONG 1 /* Direct 32 bit */
++#define R_ALPHA_REFQUAD 2 /* Direct 64 bit */
++#define R_ALPHA_GPREL32 3 /* GP relative 32 bit */
++#define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */
++#define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */
++#define R_ALPHA_GPDISP 6 /* Add displacement to GP */
++#define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */
++#define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */
++#define R_ALPHA_SREL16 9 /* PC relative 16 bit */
++#define R_ALPHA_SREL32 10 /* PC relative 32 bit */
++#define R_ALPHA_SREL64 11 /* PC relative 64 bit */
++#define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */
++#define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */
++#define R_ALPHA_GPREL16 19 /* GP relative 16 bit */
++#define R_ALPHA_COPY 24 /* Copy symbol at runtime */
++#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */
++#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */
++#define R_ALPHA_RELATIVE 27 /* Adjust by program base */
++#define R_ALPHA_TLS_GD_HI 28
++#define R_ALPHA_TLSGD 29
++#define R_ALPHA_TLS_LDM 30
++#define R_ALPHA_DTPMOD64 31
++#define R_ALPHA_GOTDTPREL 32
++#define R_ALPHA_DTPREL64 33
++#define R_ALPHA_DTPRELHI 34
++#define R_ALPHA_DTPRELLO 35
++#define R_ALPHA_DTPREL16 36
++#define R_ALPHA_GOTTPREL 37
++#define R_ALPHA_TPREL64 38
++#define R_ALPHA_TPRELHI 39
++#define R_ALPHA_TPRELLO 40
++#define R_ALPHA_TPREL16 41
++/* Keep this the last entry. */
++#define R_ALPHA_NUM 46
++
++/* Magic values of the LITUSE relocation addend. */
++#define LITUSE_ALPHA_ADDR 0
++#define LITUSE_ALPHA_BASE 1
++#define LITUSE_ALPHA_BYTOFF 2
++#define LITUSE_ALPHA_JSR 3
++#define LITUSE_ALPHA_TLS_GD 4
++#define LITUSE_ALPHA_TLS_LDM 5
++
++
++/* PowerPC specific declarations */
++
++/* Values for Elf32/64_Ehdr.e_flags. */
++#define EF_PPC_EMB 0x80000000 /* PowerPC embedded flag */
++
++/* Cygnus local bits below */
++#define EF_PPC_RELOCATABLE 0x00010000 /* PowerPC -mrelocatable flag*/
++#define EF_PPC_RELOCATABLE_LIB 0x00008000 /* PowerPC -mrelocatable-lib
++ flag */
++
++/* PowerPC relocations defined by the ABIs */
++#define R_PPC_NONE 0
++#define R_PPC_ADDR32 1 /* 32bit absolute address */
++#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
++#define R_PPC_ADDR16 3 /* 16bit absolute address */
++#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
++#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
++#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
++#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
++#define R_PPC_ADDR14_BRTAKEN 8
++#define R_PPC_ADDR14_BRNTAKEN 9
++#define R_PPC_REL24 10 /* PC relative 26 bit */
++#define R_PPC_REL14 11 /* PC relative 16 bit */
++#define R_PPC_REL14_BRTAKEN 12
++#define R_PPC_REL14_BRNTAKEN 13
++#define R_PPC_GOT16 14
++#define R_PPC_GOT16_LO 15
++#define R_PPC_GOT16_HI 16
++#define R_PPC_GOT16_HA 17
++#define R_PPC_PLTREL24 18
++#define R_PPC_COPY 19
++#define R_PPC_GLOB_DAT 20
++#define R_PPC_JMP_SLOT 21
++#define R_PPC_RELATIVE 22
++#define R_PPC_LOCAL24PC 23
++#define R_PPC_UADDR32 24
++#define R_PPC_UADDR16 25
++#define R_PPC_REL32 26
++#define R_PPC_PLT32 27
++#define R_PPC_PLTREL32 28
++#define R_PPC_PLT16_LO 29
++#define R_PPC_PLT16_HI 30
++#define R_PPC_PLT16_HA 31
++#define R_PPC_SDAREL16 32
++#define R_PPC_SECTOFF 33
++#define R_PPC_SECTOFF_LO 34
++#define R_PPC_SECTOFF_HI 35
++#define R_PPC_SECTOFF_HA 36
++
++/* PowerPC relocations defined for the TLS access ABI. */
++#define R_PPC_TLS 67 /* none (sym+add)@tls */
++#define R_PPC_DTPMOD32 68 /* word32 (sym+add)@dtpmod */
++#define R_PPC_TPREL16 69 /* half16* (sym+add)@tprel */
++#define R_PPC_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
++#define R_PPC_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
++#define R_PPC_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
++#define R_PPC_TPREL32 73 /* word32 (sym+add)@tprel */
++#define R_PPC_DTPREL16 74 /* half16* (sym+add)@dtprel */
++#define R_PPC_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
++#define R_PPC_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
++#define R_PPC_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
++#define R_PPC_DTPREL32 78 /* word32 (sym+add)@dtprel */
++#define R_PPC_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
++#define R_PPC_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
++#define R_PPC_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
++#define R_PPC_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
++#define R_PPC_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
++#define R_PPC_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
++#define R_PPC_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
++#define R_PPC_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
++#define R_PPC_GOT_TPREL16 87 /* half16* (sym+add)@got@tprel */
++#define R_PPC_GOT_TPREL16_LO 88 /* half16 (sym+add)@got@tprel@l */
++#define R_PPC_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
++#define R_PPC_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
++#define R_PPC_GOT_DTPREL16 91 /* half16* (sym+add)@got@dtprel */
++#define R_PPC_GOT_DTPREL16_LO 92 /* half16* (sym+add)@got@dtprel@l */
++#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */
++#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */
++
++/* Keep this the last entry. */
++#define R_PPC_NUM 95
++
++/* The remaining relocs are from the Embedded ELF ABI, and are not
++ in the SVR4 ELF ABI. */
++#define R_PPC_EMB_NADDR32 101
++#define R_PPC_EMB_NADDR16 102
++#define R_PPC_EMB_NADDR16_LO 103
++#define R_PPC_EMB_NADDR16_HI 104
++#define R_PPC_EMB_NADDR16_HA 105
++#define R_PPC_EMB_SDAI16 106
++#define R_PPC_EMB_SDA2I16 107
++#define R_PPC_EMB_SDA2REL 108
++#define R_PPC_EMB_SDA21 109 /* 16 bit offset in SDA */
++#define R_PPC_EMB_MRKREF 110
++#define R_PPC_EMB_RELSEC16 111
++#define R_PPC_EMB_RELST_LO 112
++#define R_PPC_EMB_RELST_HI 113
++#define R_PPC_EMB_RELST_HA 114
++#define R_PPC_EMB_BIT_FLD 115
++#define R_PPC_EMB_RELSDA 116 /* 16 bit relative offset in SDA */
++
++/* Diab tool relocations. */
++#define R_PPC_DIAB_SDA21_LO 180 /* like EMB_SDA21, but lower 16 bit */
++#define R_PPC_DIAB_SDA21_HI 181 /* like EMB_SDA21, but high 16 bit */
++#define R_PPC_DIAB_SDA21_HA 182 /* like EMB_SDA21, adjusted high 16 */
++#define R_PPC_DIAB_RELSDA_LO 183 /* like EMB_RELSDA, but lower 16 bit */
++#define R_PPC_DIAB_RELSDA_HI 184 /* like EMB_RELSDA, but high 16 bit */
++#define R_PPC_DIAB_RELSDA_HA 185 /* like EMB_RELSDA, adjusted high 16 */
++
++/* This is a phony reloc to handle any old fashioned TOC16 references
++ that may still be in object files. */
++#define R_PPC_TOC16 255
++
++
++/* PowerPC64 relocations defined by the ABIs */
++#define R_PPC64_NONE R_PPC_NONE
++#define R_PPC64_ADDR32 R_PPC_ADDR32 /* 32bit absolute address */
++#define R_PPC64_ADDR24 R_PPC_ADDR24 /* 26bit address, word aligned */
++#define R_PPC64_ADDR16 R_PPC_ADDR16 /* 16bit absolute address */
++#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO /* lower 16bits of address */
++#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI /* high 16bits of address. */
++#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA /* adjusted high 16bits. */
++#define R_PPC64_ADDR14 R_PPC_ADDR14 /* 16bit address, word aligned */
++#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN
++#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN
++#define R_PPC64_REL24 R_PPC_REL24 /* PC-rel. 26 bit, word aligned */
++#define R_PPC64_REL14 R_PPC_REL14 /* PC relative 16 bit */
++#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN
++#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN
++#define R_PPC64_GOT16 R_PPC_GOT16
++#define R_PPC64_GOT16_LO R_PPC_GOT16_LO
++#define R_PPC64_GOT16_HI R_PPC_GOT16_HI
++#define R_PPC64_GOT16_HA R_PPC_GOT16_HA
++
++#define R_PPC64_COPY R_PPC_COPY
++#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT
++#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT
++#define R_PPC64_RELATIVE R_PPC_RELATIVE
++
++#define R_PPC64_UADDR32 R_PPC_UADDR32
++#define R_PPC64_UADDR16 R_PPC_UADDR16
++#define R_PPC64_REL32 R_PPC_REL32
++#define R_PPC64_PLT32 R_PPC_PLT32
++#define R_PPC64_PLTREL32 R_PPC_PLTREL32
++#define R_PPC64_PLT16_LO R_PPC_PLT16_LO
++#define R_PPC64_PLT16_HI R_PPC_PLT16_HI
++#define R_PPC64_PLT16_HA R_PPC_PLT16_HA
++
++#define R_PPC64_SECTOFF R_PPC_SECTOFF
++#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO
++#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI
++#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA
++#define R_PPC64_ADDR30 37 /* word30 (S + A - P) >> 2 */
++#define R_PPC64_ADDR64 38 /* doubleword64 S + A */
++#define R_PPC64_ADDR16_HIGHER 39 /* half16 #higher(S + A) */
++#define R_PPC64_ADDR16_HIGHERA 40 /* half16 #highera(S + A) */
++#define R_PPC64_ADDR16_HIGHEST 41 /* half16 #highest(S + A) */
++#define R_PPC64_ADDR16_HIGHESTA 42 /* half16 #highesta(S + A) */
++#define R_PPC64_UADDR64 43 /* doubleword64 S + A */
++#define R_PPC64_REL64 44 /* doubleword64 S + A - P */
++#define R_PPC64_PLT64 45 /* doubleword64 L + A */
++#define R_PPC64_PLTREL64 46 /* doubleword64 L + A - P */
++#define R_PPC64_TOC16 47 /* half16* S + A - .TOC */
++#define R_PPC64_TOC16_LO 48 /* half16 #lo(S + A - .TOC.) */
++#define R_PPC64_TOC16_HI 49 /* half16 #hi(S + A - .TOC.) */
++#define R_PPC64_TOC16_HA 50 /* half16 #ha(S + A - .TOC.) */
++#define R_PPC64_TOC 51 /* doubleword64 .TOC */
++#define R_PPC64_PLTGOT16 52 /* half16* M + A */
++#define R_PPC64_PLTGOT16_LO 53 /* half16 #lo(M + A) */
++#define R_PPC64_PLTGOT16_HI 54 /* half16 #hi(M + A) */
++#define R_PPC64_PLTGOT16_HA 55 /* half16 #ha(M + A) */
++
++#define R_PPC64_ADDR16_DS 56 /* half16ds* (S + A) >> 2 */
++#define R_PPC64_ADDR16_LO_DS 57 /* half16ds #lo(S + A) >> 2 */
++#define R_PPC64_GOT16_DS 58 /* half16ds* (G + A) >> 2 */
++#define R_PPC64_GOT16_LO_DS 59 /* half16ds #lo(G + A) >> 2 */
++#define R_PPC64_PLT16_LO_DS 60 /* half16ds #lo(L + A) >> 2 */
++#define R_PPC64_SECTOFF_DS 61 /* half16ds* (R + A) >> 2 */
++#define R_PPC64_SECTOFF_LO_DS 62 /* half16ds #lo(R + A) >> 2 */
++#define R_PPC64_TOC16_DS 63 /* half16ds* (S + A - .TOC.) >> 2 */
++#define R_PPC64_TOC16_LO_DS 64 /* half16ds #lo(S + A - .TOC.) >> 2 */
++#define R_PPC64_PLTGOT16_DS 65 /* half16ds* (M + A) >> 2 */
++#define R_PPC64_PLTGOT16_LO_DS 66 /* half16ds #lo(M + A) >> 2 */
++
++/* PowerPC64 relocations defined for the TLS access ABI. */
++#define R_PPC64_TLS 67 /* none (sym+add)@tls */
++#define R_PPC64_DTPMOD64 68 /* doubleword64 (sym+add)@dtpmod */
++#define R_PPC64_TPREL16 69 /* half16* (sym+add)@tprel */
++#define R_PPC64_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
++#define R_PPC64_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
++#define R_PPC64_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
++#define R_PPC64_TPREL64 73 /* doubleword64 (sym+add)@tprel */
++#define R_PPC64_DTPREL16 74 /* half16* (sym+add)@dtprel */
++#define R_PPC64_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
++#define R_PPC64_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
++#define R_PPC64_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
++#define R_PPC64_DTPREL64 78 /* doubleword64 (sym+add)@dtprel */
++#define R_PPC64_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
++#define R_PPC64_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
++#define R_PPC64_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
++#define R_PPC64_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
++#define R_PPC64_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
++#define R_PPC64_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
++#define R_PPC64_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
++#define R_PPC64_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
++#define R_PPC64_GOT_TPREL16_DS 87 /* half16ds* (sym+add)@got@tprel */
++#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */
++#define R_PPC64_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
++#define R_PPC64_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
++#define R_PPC64_GOT_DTPREL16_DS 91 /* half16ds* (sym+add)@got@dtprel */
++#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */
++#define R_PPC64_GOT_DTPREL16_HI 93 /* half16 (sym+add)@got@dtprel@h */
++#define R_PPC64_GOT_DTPREL16_HA 94 /* half16 (sym+add)@got@dtprel@ha */
++#define R_PPC64_TPREL16_DS 95 /* half16ds* (sym+add)@tprel */
++#define R_PPC64_TPREL16_LO_DS 96 /* half16ds (sym+add)@tprel@l */
++#define R_PPC64_TPREL16_HIGHER 97 /* half16 (sym+add)@tprel@higher */
++#define R_PPC64_TPREL16_HIGHERA 98 /* half16 (sym+add)@tprel@highera */
++#define R_PPC64_TPREL16_HIGHEST 99 /* half16 (sym+add)@tprel@highest */
++#define R_PPC64_TPREL16_HIGHESTA 100 /* half16 (sym+add)@tprel@highesta */
++#define R_PPC64_DTPREL16_DS 101 /* half16ds* (sym+add)@dtprel */
++#define R_PPC64_DTPREL16_LO_DS 102 /* half16ds (sym+add)@dtprel@l */
++#define R_PPC64_DTPREL16_HIGHER 103 /* half16 (sym+add)@dtprel@higher */
++#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */
++#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */
++#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */
++
++/* Keep this the last entry. */
++#define R_PPC64_NUM 107
++
++/* PowerPC64 specific values for the Dyn d_tag field. */
++#define DT_PPC64_GLINK (DT_LOPROC + 0)
++#define DT_PPC64_NUM 1
++
++
++/* ARM specific declarations */
++
++/* Processor specific flags for the ELF header e_flags field. */
++#define EF_ARM_RELEXEC 0x01
++#define EF_ARM_HASENTRY 0x02
++#define EF_ARM_INTERWORK 0x04
++#define EF_ARM_APCS_26 0x08
++#define EF_ARM_APCS_FLOAT 0x10
++#define EF_ARM_PIC 0x20
++#define EF_ARM_ALIGN8 0x40 /* 8-bit structure alignment is in use */
++#define EF_ARM_NEW_ABI 0x80
++#define EF_ARM_OLD_ABI 0x100
++
++/* Other constants defined in the ARM ELF spec. version B-01. */
++/* NB. These conflict with values defined above. */
++#define EF_ARM_SYMSARESORTED 0x04
++#define EF_ARM_DYNSYMSUSESEGIDX 0x08
++#define EF_ARM_MAPSYMSFIRST 0x10
++#define EF_ARM_EABIMASK 0XFF000000
++
++#define EF_ARM_EABI_VERSION(flags) ((flags) & EF_ARM_EABIMASK)
++#define EF_ARM_EABI_UNKNOWN 0x00000000
++#define EF_ARM_EABI_VER1 0x01000000
++#define EF_ARM_EABI_VER2 0x02000000
++
++/* Additional symbol types for Thumb */
++#define STT_ARM_TFUNC 0xd
++
++/* ARM-specific values for sh_flags */
++#define SHF_ARM_ENTRYSECT 0x10000000 /* Section contains an entry point */
++#define SHF_ARM_COMDEF 0x80000000 /* Section may be multiply defined
++ in the input to a link step */
++
++/* ARM-specific program header flags */
++#define PF_ARM_SB 0x10000000 /* Segment contains the location
++ addressed by the static base */
++
++/* ARM relocs. */
++#define R_ARM_NONE 0 /* No reloc */
++#define R_ARM_PC24 1 /* PC relative 26 bit branch */
++#define R_ARM_ABS32 2 /* Direct 32 bit */
++#define R_ARM_REL32 3 /* PC relative 32 bit */
++#define R_ARM_PC13 4
++#define R_ARM_ABS16 5 /* Direct 16 bit */
++#define R_ARM_ABS12 6 /* Direct 12 bit */
++#define R_ARM_THM_ABS5 7
++#define R_ARM_ABS8 8 /* Direct 8 bit */
++#define R_ARM_SBREL32 9
++#define R_ARM_THM_PC22 10
++#define R_ARM_THM_PC8 11
++#define R_ARM_AMP_VCALL9 12
++#define R_ARM_SWI24 13
++#define R_ARM_THM_SWI8 14
++#define R_ARM_XPC25 15
++#define R_ARM_THM_XPC22 16
++#define R_ARM_COPY 20 /* Copy symbol at runtime */
++#define R_ARM_GLOB_DAT 21 /* Create GOT entry */
++#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */
++#define R_ARM_RELATIVE 23 /* Adjust by program base */
++#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */
++#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */
++#define R_ARM_GOT32 26 /* 32 bit GOT entry */
++#define R_ARM_PLT32 27 /* 32 bit PLT address */
++#define R_ARM_ALU_PCREL_7_0 32
++#define R_ARM_ALU_PCREL_15_8 33
++#define R_ARM_ALU_PCREL_23_15 34
++#define R_ARM_LDR_SBREL_11_0 35
++#define R_ARM_ALU_SBREL_19_12 36
++#define R_ARM_ALU_SBREL_27_20 37
++#define R_ARM_GNU_VTENTRY 100
++#define R_ARM_GNU_VTINHERIT 101
++#define R_ARM_THM_PC11 102 /* thumb unconditional branch */
++#define R_ARM_THM_PC9 103 /* thumb conditional branch */
++#define R_ARM_RXPC25 249
++#define R_ARM_RSBREL32 250
++#define R_ARM_THM_RPC22 251
++#define R_ARM_RREL32 252
++#define R_ARM_RABS22 253
++#define R_ARM_RPC24 254
++#define R_ARM_RBASE 255
++/* Keep this the last entry. */
++#define R_ARM_NUM 256
++
++/* IA-64 specific declarations. */
++
++/* Processor specific flags for the Ehdr e_flags field. */
++#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */
++#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */
++#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */
++
++/* Processor specific values for the Phdr p_type field. */
++#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */
++#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */
++
++/* Processor specific flags for the Phdr p_flags field. */
++#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */
++
++/* Processor specific values for the Shdr sh_type field. */
++#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */
++#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */
++
++/* Processor specific flags for the Shdr sh_flags field. */
++#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
++#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */
++
++/* Processor specific values for the Dyn d_tag field. */
++#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0)
++#define DT_IA_64_NUM 1
++
++/* IA-64 relocations. */
++#define R_IA64_NONE 0x00 /* none */
++#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
++#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
++#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
++#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
++#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
++#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
++#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
++#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */
++#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */
++#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */
++#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */
++#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */
++#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */
++#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */
++#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */
++#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */
++#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */
++#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */
++#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */
++#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */
++#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */
++#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */
++#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */
++#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */
++#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */
++#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */
++#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */
++#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */
++#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */
++#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */
++#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */
++#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */
++#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
++#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
++#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */
++#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */
++#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */
++#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */
++#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */
++#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */
++#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */
++#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */
++#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */
++#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */
++#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */
++#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */
++#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
++#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
++#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
++#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
++#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
++#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
++#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
++#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
++#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */
++#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */
++#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */
++#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
++#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
++#define R_IA64_COPY 0x84 /* copy relocation */
++#define R_IA64_SUB 0x85 /* Addend and symbol difference */
++#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
++#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
++#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */
++#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */
++#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */
++#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */
++#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */
++#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */
++#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */
++#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */
++#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */
++#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */
++#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */
++#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */
++#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */
++#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */
++#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */
++#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */
++#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
++
++/* SH specific declarations */
++
++/* SH relocs. */
++#define R_SH_NONE 0
++#define R_SH_DIR32 1
++#define R_SH_REL32 2
++#define R_SH_DIR8WPN 3
++#define R_SH_IND12W 4
++#define R_SH_DIR8WPL 5
++#define R_SH_DIR8WPZ 6
++#define R_SH_DIR8BP 7
++#define R_SH_DIR8W 8
++#define R_SH_DIR8L 9
++#define R_SH_SWITCH16 25
++#define R_SH_SWITCH32 26
++#define R_SH_USES 27
++#define R_SH_COUNT 28
++#define R_SH_ALIGN 29
++#define R_SH_CODE 30
++#define R_SH_DATA 31
++#define R_SH_LABEL 32
++#define R_SH_SWITCH8 33
++#define R_SH_GNU_VTINHERIT 34
++#define R_SH_GNU_VTENTRY 35
++#define R_SH_TLS_GD_32 144
++#define R_SH_TLS_LD_32 145
++#define R_SH_TLS_LDO_32 146
++#define R_SH_TLS_IE_32 147
++#define R_SH_TLS_LE_32 148
++#define R_SH_TLS_DTPMOD32 149
++#define R_SH_TLS_DTPOFF32 150
++#define R_SH_TLS_TPOFF32 151
++#define R_SH_GOT32 160
++#define R_SH_PLT32 161
++#define R_SH_COPY 162
++#define R_SH_GLOB_DAT 163
++#define R_SH_JMP_SLOT 164
++#define R_SH_RELATIVE 165
++#define R_SH_GOTOFF 166
++#define R_SH_GOTPC 167
++/* Keep this the last entry. */
++#define R_SH_NUM 256
++
++/* Additional s390 relocs */
++
++#define R_390_NONE 0 /* No reloc. */
++#define R_390_8 1 /* Direct 8 bit. */
++#define R_390_12 2 /* Direct 12 bit. */
++#define R_390_16 3 /* Direct 16 bit. */
++#define R_390_32 4 /* Direct 32 bit. */
++#define R_390_PC32 5 /* PC relative 32 bit. */
++#define R_390_GOT12 6 /* 12 bit GOT offset. */
++#define R_390_GOT32 7 /* 32 bit GOT offset. */
++#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
++#define R_390_COPY 9 /* Copy symbol at runtime. */
++#define R_390_GLOB_DAT 10 /* Create GOT entry. */
++#define R_390_JMP_SLOT 11 /* Create PLT entry. */
++#define R_390_RELATIVE 12 /* Adjust by program base. */
++#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
++#define R_390_GOTPC 14 /* 32 bit PC relative offset to GOT. */
++#define R_390_GOT16 15 /* 16 bit GOT offset. */
++#define R_390_PC16 16 /* PC relative 16 bit. */
++#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
++#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
++#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
++#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
++#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
++#define R_390_64 22 /* Direct 64 bit. */
++#define R_390_PC64 23 /* PC relative 64 bit. */
++#define R_390_GOT64 24 /* 64 bit GOT offset. */
++#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
++#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
++#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
++#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
++#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
++#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
++#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
++#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
++#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
++#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
++#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
++#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
++#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
++#define R_390_TLS_GDCALL 38 /* Tag for function call in general
++ dynamic TLS code. */
++#define R_390_TLS_LDCALL 39 /* Tag for function call in local
++ dynamic TLS code. */
++#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
++ thread local data. */
++#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
++ thread local data. */
++#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
++ block offset. */
++#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
++ block offset. */
++#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
++ block offset. */
++#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
++ thread local data in LE code. */
++#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
++ thread local data in LE code. */
++#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
++ negated static TLS block offset. */
++#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
++ negated static TLS block offset. */
++#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
++ negated static TLS block offset. */
++#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
++ static TLS block. */
++#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
++ static TLS block. */
++#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
++ block. */
++#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
++ block. */
++#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
++#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
++#define R_390_TLS_TPOFF 56 /* Negated offset in static TLS
++ block. */
++
++/* Keep this the last entry. */
++#define R_390_NUM 57
++
++/* CRIS relocations. */
++#define R_CRIS_NONE 0
++#define R_CRIS_8 1
++#define R_CRIS_16 2
++#define R_CRIS_32 3
++#define R_CRIS_8_PCREL 4
++#define R_CRIS_16_PCREL 5
++#define R_CRIS_32_PCREL 6
++#define R_CRIS_GNU_VTINHERIT 7
++#define R_CRIS_GNU_VTENTRY 8
++#define R_CRIS_COPY 9
++#define R_CRIS_GLOB_DAT 10
++#define R_CRIS_JUMP_SLOT 11
++#define R_CRIS_RELATIVE 12
++#define R_CRIS_16_GOT 13
++#define R_CRIS_32_GOT 14
++#define R_CRIS_16_GOTPLT 15
++#define R_CRIS_32_GOTPLT 16
++#define R_CRIS_32_GOTREL 17
++#define R_CRIS_32_PLT_GOTREL 18
++#define R_CRIS_32_PLT_PCREL 19
++
++#define R_CRIS_NUM 20
++
++/* AMD x86-64 relocations. */
++#define R_X86_64_NONE 0 /* No reloc */
++#define R_X86_64_64 1 /* Direct 64 bit */
++#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
++#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
++#define R_X86_64_PLT32 4 /* 32 bit PLT address */
++#define R_X86_64_COPY 5 /* Copy symbol at runtime */
++#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
++#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
++#define R_X86_64_RELATIVE 8 /* Adjust by program base */
++#define R_X86_64_GOTPCREL 9 /* 32 bit signed PC relative
++ offset to GOT */
++#define R_X86_64_32 10 /* Direct 32 bit zero extended */
++#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
++#define R_X86_64_16 12 /* Direct 16 bit zero extended */
++#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
++#define R_X86_64_8 14 /* Direct 8 bit sign extended */
++#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
++#define R_X86_64_DTPMOD64 16 /* ID of module containing symbol */
++#define R_X86_64_DTPOFF64 17 /* Offset in module's TLS block */
++#define R_X86_64_TPOFF64 18 /* Offset in initial TLS block */
++#define R_X86_64_TLSGD 19 /* 32 bit signed PC relative offset
++ to two GOT entries for GD symbol */
++#define R_X86_64_TLSLD 20 /* 32 bit signed PC relative offset
++ to two GOT entries for LD symbol */
++#define R_X86_64_DTPOFF32 21 /* Offset in TLS block */
++#define R_X86_64_GOTTPOFF 22 /* 32 bit signed PC relative offset
++ to GOT entry for IE symbol */
++#define R_X86_64_TPOFF32 23 /* Offset in initial TLS block */
++
++#define R_X86_64_NUM 24
++
++__END_DECLS
++
++#endif /* elf.h */
+
+ #include "elfconfig.h"
+
+@@ -185,3 +2631,4 @@
+ void fatal(const char *fmt, ...);
+ void warn(const char *fmt, ...);
+ void merror(const char *fmt, ...);
++
+diff -Nur linux-3.11.5.orig/scripts/mod/sumversion.c linux-3.11.5/scripts/mod/sumversion.c
+--- linux-3.11.5.orig/scripts/mod/sumversion.c 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/scripts/mod/sumversion.c 2013-10-16 18:09:31.000000000 +0200
+@@ -1,4 +1,4 @@
+-#include <netinet/in.h>
++/* #include <netinet/in.h> */
+ #ifdef __sun__
+ #include <inttypes.h>
+ #else
+diff -Nur linux-3.11.5.orig/tools/include/tools/linux_types.h linux-3.11.5/tools/include/tools/linux_types.h
+--- linux-3.11.5.orig/tools/include/tools/linux_types.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.11.5/tools/include/tools/linux_types.h 2013-10-16 18:09:31.000000000 +0200
+@@ -0,0 +1,22 @@
++#ifndef __LINUX_TYPES_H
++#define __LINUX_TYPES_H
++
++#include <stdint.h>
++
++typedef uint8_t __u8;
++typedef uint8_t __be8;
++typedef uint8_t __le8;
++
++typedef uint16_t __u16;
++typedef uint16_t __be16;
++typedef uint16_t __le16;
++
++typedef uint32_t __u32;
++typedef uint32_t __be32;
++typedef uint32_t __le32;
++
++typedef uint64_t __u64;
++typedef uint64_t __be64;
++typedef uint64_t __le64;
++
++#endif
diff --git a/target/linux/patches/3.18.9/cleankernel.patch b/target/linux/patches/3.18.9/cleankernel.patch
new file mode 100644
index 000000000..d8c055dc3
--- /dev/null
+++ b/target/linux/patches/3.18.9/cleankernel.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-3.11.5.orig/scripts/Makefile.headersinst linux-3.11.5/scripts/Makefile.headersinst
+--- linux-3.11.5.orig/scripts/Makefile.headersinst 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/scripts/Makefile.headersinst 2013-10-15 16:33:10.000000000 +0200
+@@ -107,7 +107,6 @@
+
+ targets += $(install-file)
+ $(install-file): scripts/headers_install.sh $(input-files1) $(input-files2) $(input-files3) FORCE
+- $(if $(unwanted),$(call cmd,remove),)
+ $(if $(wildcard $(dir $@)),,$(shell mkdir -p $(dir $@)))
+ $(call if_changed,install)
+
diff --git a/target/linux/patches/3.18.9/cris-header.patch b/target/linux/patches/3.18.9/cris-header.patch
new file mode 100644
index 000000000..3db07e530
--- /dev/null
+++ b/target/linux/patches/3.18.9/cris-header.patch
@@ -0,0 +1,50 @@
+diff -Nur linux-3.16.2.orig/arch/cris/include/arch-v10/arch/Kbuild linux-3.16.2/arch/cris/include/arch-v10/arch/Kbuild
+--- linux-3.16.2.orig/arch/cris/include/arch-v10/arch/Kbuild 2014-09-06 01:37:11.000000000 +0200
++++ linux-3.16.2/arch/cris/include/arch-v10/arch/Kbuild 2014-09-26 19:24:50.000000000 +0200
+@@ -1 +1,2 @@
+ # CRISv10 arch
++header-y += ptrace.h
+diff -Nur linux-3.16.2.orig/arch/cris/include/arch-v32/arch/Kbuild linux-3.16.2/arch/cris/include/arch-v32/arch/Kbuild
+--- linux-3.16.2.orig/arch/cris/include/arch-v32/arch/Kbuild 2014-09-06 01:37:11.000000000 +0200
++++ linux-3.16.2/arch/cris/include/arch-v32/arch/Kbuild 2014-09-26 19:24:31.000000000 +0200
+@@ -1 +1,2 @@
+ # CRISv32 arch
++header-y += ptrace.h
+diff -Nur linux-3.16.2.orig/arch/cris/include/asm/Kbuild linux-3.16.2/arch/cris/include/asm/Kbuild
+--- linux-3.16.2.orig/arch/cris/include/asm/Kbuild 2014-09-06 01:37:11.000000000 +0200
++++ linux-3.16.2/arch/cris/include/asm/Kbuild 2014-09-26 19:24:31.000000000 +0200
+@@ -1,8 +1,3 @@
+-
+-header-y += arch-v10/
+-header-y += arch-v32/
+-
+-
+ generic-y += barrier.h
+ generic-y += clkdev.h
+ generic-y += cputime.h
+diff -Nur linux-3.16.2.orig/arch/cris/include/uapi/asm/Kbuild linux-3.16.2/arch/cris/include/uapi/asm/Kbuild
+--- linux-3.16.2.orig/arch/cris/include/uapi/asm/Kbuild 2014-09-06 01:37:11.000000000 +0200
++++ linux-3.16.2/arch/cris/include/uapi/asm/Kbuild 2014-09-26 19:24:31.000000000 +0200
+@@ -1,8 +1,8 @@
+ # UAPI Header export list
+ include include/uapi/asm-generic/Kbuild.asm
+
+-header-y += arch-v10/
+-header-y += arch-v32/
++header-y += ../arch-v10/arch/
++header-y += ../arch-v32/arch/
+ header-y += auxvec.h
+ header-y += bitsperlong.h
+ header-y += byteorder.h
+diff -Nur linux-3.16.2.orig/scripts/headers.sh linux-3.16.2/scripts/headers.sh
+--- linux-3.16.2.orig/scripts/headers.sh 2014-09-06 01:37:11.000000000 +0200
++++ linux-3.16.2/scripts/headers.sh 2014-09-26 19:24:31.000000000 +0200
+@@ -19,8 +19,6 @@
+ case ${arch} in
+ um) # no userspace export
+ ;;
+- cris) # headers export are known broken
+- ;;
+ *)
+ if [ -d ${srctree}/arch/${arch} ]; then
+ do_command $1 ${arch}
diff --git a/target/linux/patches/3.18.9/defaults.patch b/target/linux/patches/3.18.9/defaults.patch
new file mode 100644
index 000000000..6cdca084e
--- /dev/null
+++ b/target/linux/patches/3.18.9/defaults.patch
@@ -0,0 +1,46 @@
+diff -Nur linux-3.0.4.orig/fs/Kconfig linux-3.0.4/fs/Kconfig
+--- linux-3.0.4.orig/fs/Kconfig 2011-08-29 22:56:30.000000000 +0200
++++ linux-3.0.4/fs/Kconfig 2011-10-15 22:08:44.000000000 +0200
+@@ -47,7 +47,7 @@
+ def_bool n
+
+ config EXPORTFS
+- tristate
++ def_bool y
+
+ config FILE_LOCKING
+ bool "Enable POSIX file locking API" if EXPERT
+diff -Nur linux-3.0.4.orig/fs/notify/Kconfig linux-3.0.4/fs/notify/Kconfig
+--- linux-3.0.4.orig/fs/notify/Kconfig 2011-08-29 22:56:30.000000000 +0200
++++ linux-3.0.4/fs/notify/Kconfig 2011-10-15 22:02:00.000000000 +0200
+@@ -1,5 +1,5 @@
+ config FSNOTIFY
+- def_bool n
++ def_bool y
+
+ source "fs/notify/dnotify/Kconfig"
+ source "fs/notify/inotify/Kconfig"
+diff -Nur linux-3.11.10.orig/drivers/scsi/Kconfig linux-3.11.10/drivers/scsi/Kconfig
+--- linux-3.11.10.orig/drivers/scsi/Kconfig 2013-11-29 19:42:37.000000000 +0100
++++ linux-3.11.10/drivers/scsi/Kconfig 2013-12-27 19:13:21.000000000 +0100
+@@ -2,7 +2,7 @@
+
+ config SCSI_MOD
+ tristate
+- default y if SCSI=n || SCSI=y
++ default y if SCSI=y
+ default m if SCSI=m
+
+ config RAID_ATTRS
+diff -Nur linux-3.11.10.orig/usr/Kconfig linux-3.11.10/usr/Kconfig
+--- linux-3.11.10.orig/usr/Kconfig 2013-11-29 19:42:37.000000000 +0100
++++ linux-3.11.10/usr/Kconfig 2013-12-27 19:15:16.000000000 +0100
+@@ -47,7 +47,7 @@
+
+ config RD_GZIP
+ bool "Support initial ramdisks compressed using gzip" if EXPERT
+- default y
++ default n
+ depends on BLK_DEV_INITRD
+ select DECOMPRESS_GZIP
+ help
diff --git a/target/linux/patches/3.18.9/export-symbol-for-exmap.patch b/target/linux/patches/3.18.9/export-symbol-for-exmap.patch
new file mode 100644
index 000000000..4f0fc8449
--- /dev/null
+++ b/target/linux/patches/3.18.9/export-symbol-for-exmap.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-3.11.5.orig/kernel/pid.c linux-3.11.5/kernel/pid.c
+--- linux-3.11.5.orig/kernel/pid.c 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/kernel/pid.c 2013-10-29 15:37:02.000000000 +0100
+@@ -450,6 +450,7 @@
+ {
+ return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
+ }
++EXPORT_SYMBOL(find_task_by_vpid);
+
+ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
+ {
diff --git a/target/linux/patches/3.18.9/gemalto.patch b/target/linux/patches/3.18.9/gemalto.patch
new file mode 100644
index 000000000..65f7af1d7
--- /dev/null
+++ b/target/linux/patches/3.18.9/gemalto.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-2.6.36.orig/drivers/tty/serial/8250/serial_cs.c linux-2.6.36/drivers/serial/8250/serial_cs.c
+--- linux-2.6.36.orig/drivers/tty/serial/8250/serial_cs.c 2010-10-20 22:30:22.000000000 +0200
++++ linux-2.6.36/drivers/tty/serial/8250/serial_cs.c 2010-12-13 23:03:40.000000000 +0100
+@@ -794,6 +794,7 @@
+ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0025),
+ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0045),
+ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0052),
++ PCMCIA_DEVICE_MANF_CARD(0x0157, 0x0100), /* Gemalto SCR */
+ PCMCIA_DEVICE_MANF_CARD(0x016c, 0x0006), /* Psion 56K+Fax */
+ PCMCIA_DEVICE_MANF_CARD(0x0200, 0x0001), /* MultiMobile */
+ PCMCIA_DEVICE_PROD_ID134("ADV", "TECH", "COMpad-32/85", 0x67459937, 0x916d02ba, 0x8fbe92ae),
diff --git a/target/linux/patches/3.18.9/initramfs-nosizelimit.patch b/target/linux/patches/3.18.9/initramfs-nosizelimit.patch
new file mode 100644
index 000000000..40d2f6bd8
--- /dev/null
+++ b/target/linux/patches/3.18.9/initramfs-nosizelimit.patch
@@ -0,0 +1,57 @@
+From 9a18df7a71bfa620b1278777d64783a359d7eb4e Mon Sep 17 00:00:00 2001
+From: Thorsten Glaser <tg@mirbsd.org>
+Date: Sun, 4 May 2014 01:37:54 +0200
+Subject: [PATCH] mount tmpfs-as-rootfs (initramfs) with -o
+ nr_blocks=0,nr_inodes=0
+
+I would have preferred to write this patch to be able to pass
+rootflags=nr_blocks=0,nr_inodes=0 on the kernel command line,
+and then hand these rootflags over to the initramfs (tmpfs)
+mount in the same way the kernel hands them over to the block
+device rootfs mount. But at least the Debian/m68k initrd also
+parses $rootflags from the environment and adds it to the call
+to the user-space mount for the eventual root device, which
+would make the kernel command line rootflags option be used in
+both places (tmpfs and e.g. ext4) which is guaranteed to error
+out in at least one of them.
+
+This change is intended to aid people in a setup where the
+initrd is the final root filesystem, i.e. not mounted over.
+This is especially useful in automated tests running on qemu
+for boards with constrained memory (e.g. 64 MiB on sh4).
+
+Considering that the initramfs is normally emptied out then
+overmounted, this change is probably safe for setups where
+initramfs just hosts early userspace, too, since the tmpfs
+backing it is not accessible any more later on, AFAICT.
+
+Signed-off-by: Thorsten Glaser <tg@mirbsd.org>
+---
+ init/do_mounts.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index 82f2288..55a4cfe 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -594,6 +594,7 @@ out:
+ }
+
+ static bool is_tmpfs;
++static char tmpfs_rootflags[] = "nr_blocks=0,nr_inodes=0";
+ static struct dentry *rootfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+ {
+@@ -606,6 +607,9 @@ static struct dentry *rootfs_mount(struct file_system_type *fs_type,
+ if (IS_ENABLED(CONFIG_TMPFS) && is_tmpfs)
+ fill = shmem_fill_super;
+
++ if (is_tmpfs)
++ data = tmpfs_rootflags;
++
+ return mount_nodev(fs_type, flags, data, fill);
+ }
+
+--
+2.0.0.rc0
+
diff --git a/target/linux/patches/3.18.9/lemote-rfkill.patch b/target/linux/patches/3.18.9/lemote-rfkill.patch
new file mode 100644
index 000000000..a61488434
--- /dev/null
+++ b/target/linux/patches/3.18.9/lemote-rfkill.patch
@@ -0,0 +1,21 @@
+diff -Nur linux-3.3.orig/drivers/net/wireless/rtl818x/rtl8187/rfkill.c linux-3.3/drivers/net/wireless/rtl818x/rtl8187/rfkill.c
+--- linux-3.3.orig/drivers/net/wireless/rtl818x/rtl8187/rfkill.c 2012-03-19 00:15:34.000000000 +0100
++++ linux-3.3/drivers/net/wireless/rtl818x/rtl8187/rfkill.c 2012-03-27 23:29:46.000000000 +0200
+@@ -22,6 +22,9 @@
+
+ static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv)
+ {
++#ifdef CONFIG_LEMOTE_MACH2F
++ return 1;
++#else
+ u8 gpio;
+
+ gpio = rtl818x_ioread8(priv, &priv->map->GPIO0);
+@@ -29,6 +32,7 @@
+ gpio = rtl818x_ioread8(priv, &priv->map->GPIO1);
+
+ return gpio & priv->rfkill_mask;
++#endif
+ }
+
+ void rtl8187_rfkill_init(struct ieee80211_hw *hw)
diff --git a/target/linux/patches/3.18.9/microblaze-ethernet.patch b/target/linux/patches/3.18.9/microblaze-ethernet.patch
new file mode 100644
index 000000000..742ab477e
--- /dev/null
+++ b/target/linux/patches/3.18.9/microblaze-ethernet.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-3.11.10.orig/drivers/net/ethernet/xilinx/xilinx_emaclite.c linux-3.11.10/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+--- linux-3.11.10.orig/drivers/net/ethernet/xilinx/xilinx_emaclite.c 2013-11-29 19:42:37.000000000 +0100
++++ linux-3.11.10/drivers/net/ethernet/xilinx/xilinx_emaclite.c 2013-12-23 20:01:14.000000000 +0100
+@@ -1282,6 +1282,7 @@
+ { .compatible = "xlnx,opb-ethernetlite-1.01.b", },
+ { .compatible = "xlnx,xps-ethernetlite-1.00.a", },
+ { .compatible = "xlnx,xps-ethernetlite-2.00.a", },
++ { .compatible = "xlnx,xps-ethernetlite-2.00.b", },
+ { .compatible = "xlnx,xps-ethernetlite-2.01.a", },
+ { .compatible = "xlnx,xps-ethernetlite-3.00.a", },
+ { /* end of list */ },
diff --git a/target/linux/patches/3.18.9/mkpiggy.patch b/target/linux/patches/3.18.9/mkpiggy.patch
new file mode 100644
index 000000000..751678b74
--- /dev/null
+++ b/target/linux/patches/3.18.9/mkpiggy.patch
@@ -0,0 +1,28 @@
+diff -Nur linux-3.13.3.orig/arch/x86/boot/compressed/mkpiggy.c linux-3.13.3/arch/x86/boot/compressed/mkpiggy.c
+--- linux-3.13.3.orig/arch/x86/boot/compressed/mkpiggy.c 2014-02-13 23:00:14.000000000 +0100
++++ linux-3.13.3/arch/x86/boot/compressed/mkpiggy.c 2014-02-17 11:09:06.000000000 +0100
+@@ -29,7 +29,14 @@
+ #include <stdio.h>
+ #include <string.h>
+ #include <inttypes.h>
+-#include <tools/le_byteshift.h>
++
++static uint32_t getle32(const void *p)
++{
++ const uint8_t *cp = p;
++
++ return (uint32_t)cp[0] + ((uint32_t)cp[1] << 8) +
++ ((uint32_t)cp[2] << 16) + ((uint32_t)cp[3] << 24);
++}
+
+ int main(int argc, char *argv[])
+ {
+@@ -63,7 +70,7 @@
+ }
+
+ ilen = ftell(f);
+- olen = get_unaligned_le32(&olen);
++ olen = getle32(&olen);
+
+ /*
+ * Now we have the input (compressed) and output (uncompressed)
diff --git a/target/linux/patches/3.18.9/mtd-rootfs.patch b/target/linux/patches/3.18.9/mtd-rootfs.patch
new file mode 100644
index 000000000..775d5fc80
--- /dev/null
+++ b/target/linux/patches/3.18.9/mtd-rootfs.patch
@@ -0,0 +1,26 @@
+diff -Nur linux-3.5.orig//drivers/mtd/mtdpart.c linux-3.5/drivers/mtd/mtdpart.c
+--- linux-3.5.orig//drivers/mtd/mtdpart.c 2012-07-21 22:58:29.000000000 +0200
++++ linux-3.5/drivers/mtd/mtdpart.c 2012-07-31 23:59:07.000000000 +0200
+@@ -30,6 +30,7 @@
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
+ #include <linux/err.h>
++#include <linux/root_dev.h>
+
+ #include "mtdcore.h"
+
+@@ -637,6 +638,14 @@
+ if (IS_ERR(slave))
+ return PTR_ERR(slave);
+
++ if (strcmp(parts[i].name, "rootfs") == 0) {
++ if (ROOT_DEV == 0) {
++ printk(KERN_NOTICE "mtd: partition \"rootfs\" "
++ "set to be root filesystem\n");
++ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, i);
++ }
++ }
++
+ mutex_lock(&mtd_partitions_mutex);
+ list_add(&slave->list, &mtd_partitions);
+ mutex_unlock(&mtd_partitions_mutex);
diff --git a/target/linux/patches/3.18.9/nfsv3-tcp.patch b/target/linux/patches/3.18.9/nfsv3-tcp.patch
new file mode 100644
index 000000000..d5e07e1c2
--- /dev/null
+++ b/target/linux/patches/3.18.9/nfsv3-tcp.patch
@@ -0,0 +1,12 @@
+diff -Nur linux-3.15-rc5.orig/fs/nfs/nfsroot.c linux-3.15-rc5/fs/nfs/nfsroot.c
+--- linux-3.15-rc5.orig/fs/nfs/nfsroot.c 2014-05-09 22:10:52.000000000 +0200
++++ linux-3.15-rc5/fs/nfs/nfsroot.c 2014-05-16 15:45:38.000000000 +0200
+@@ -87,7 +87,7 @@
+ #define NFS_ROOT "/tftpboot/%s"
+
+ /* Default NFSROOT mount options. */
+-#define NFS_DEF_OPTIONS "vers=2,udp,rsize=4096,wsize=4096"
++#define NFS_DEF_OPTIONS "nfsvers=3,proto=tcp,rsize=4096,wsize=4096"
+
+ /* Parameters passed from the kernel command line */
+ static char nfs_root_parms[256] __initdata = "";
diff --git a/target/linux/patches/3.18.9/non-static.patch b/target/linux/patches/3.18.9/non-static.patch
new file mode 100644
index 000000000..a967703d0
--- /dev/null
+++ b/target/linux/patches/3.18.9/non-static.patch
@@ -0,0 +1,33 @@
+diff -Nur linux-2.6.39-rc6.orig/fs/namei.c linux-2.6.39-rc6/fs/namei.c
+--- linux-2.6.39-rc6.orig/fs/namei.c 2011-05-04 04:59:13.000000000 +0200
++++ linux-2.6.39-rc6/fs/namei.c 2011-05-05 11:30:14.000000000 +0200
+@@ -1769,7 +1769,7 @@
+ * needs parent already locked. Doesn't follow mounts.
+ * SMP-safe.
+ */
+-static struct dentry *lookup_hash(struct nameidata *nd)
++struct dentry *lookup_hash(struct nameidata *nd)
+ {
+ return __lookup_hash(&nd->last, nd->path.dentry, nd);
+ }
+diff -Nur linux-2.6.39-rc6.orig/fs/splice.c linux-2.6.39-rc6/fs/splice.c
+--- linux-2.6.39-rc6.orig/fs/splice.c 2011-05-04 04:59:13.000000000 +0200
++++ linux-2.6.39-rc6/fs/splice.c 2011-05-05 11:31:04.000000000 +0200
+@@ -1081,7 +1081,7 @@
+ /*
+ * Attempt to initiate a splice from pipe to file.
+ */
+-static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
++long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
+ {
+ ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
+@@ -1109,7 +1109,7 @@
+ /*
+ * Attempt to initiate a splice from a file to a pipe.
+ */
+-static long do_splice_to(struct file *in, loff_t *ppos,
++long do_splice_to(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+ {
diff --git a/target/linux/patches/3.18.9/patch-fblogo b/target/linux/patches/3.18.9/patch-fblogo
new file mode 100644
index 000000000..f1fad64c1
--- /dev/null
+++ b/target/linux/patches/3.18.9/patch-fblogo
@@ -0,0 +1,2057 @@
+diff -Nur linux-3.15.1.orig/Documentation/fb/00-INDEX linux-3.15.1/Documentation/fb/00-INDEX
+--- linux-3.15.1.orig/Documentation/fb/00-INDEX 2014-06-16 22:44:27.000000000 +0200
++++ linux-3.15.1/Documentation/fb/00-INDEX 2014-06-28 11:21:19.000000000 +0200
+@@ -23,6 +23,8 @@
+ - info on the driver for EP93xx LCD controller.
+ fbcon.txt
+ - intro to and usage guide for the framebuffer console (fbcon).
++fbcondecor.txt
++ - info on the Framebuffer Console Decoration
+ framebuffer.txt
+ - introduction to frame buffer devices.
+ gxfb.txt
+diff -Nur linux-3.15.1.orig/Documentation/fb/fbcondecor.txt linux-3.15.1/Documentation/fb/fbcondecor.txt
+--- linux-3.15.1.orig/Documentation/fb/fbcondecor.txt 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15.1/Documentation/fb/fbcondecor.txt 2014-06-28 11:21:19.000000000 +0200
+@@ -0,0 +1,207 @@
++What is it?
++-----------
++
++The framebuffer decorations are a kernel feature which allows displaying a
++background picture on selected consoles.
++
++What do I need to get it to work?
++---------------------------------
++
++To get fbcondecor up-and-running you will have to:
++ 1) get a copy of splashutils [1] or a similar program
++ 2) get some fbcondecor themes
++ 3) build the kernel helper program
++ 4) build your kernel with the FB_CON_DECOR option enabled.
++
++To get fbcondecor operational right after fbcon initialization is finished, you
++will have to include a theme and the kernel helper into your initramfs image.
++Please refer to splashutils documentation for instructions on how to do that.
++
++[1] The splashutils package can be downloaded from:
++ http://github.com/alanhaggai/fbsplash
++
++The userspace helper
++--------------------
++
++The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
++kernel whenever an important event occurs and the kernel needs some kind of
++job to be carried out. Important events include console switches and video
++mode switches (the kernel requests background images and configuration
++parameters for the current console). The fbcondecor helper must be accessible at
++all times. If it's not, fbcondecor will be switched off automatically.
++
++It's possible to set path to the fbcondecor helper by writing it to
++/proc/sys/kernel/fbcondecor.
++
++*****************************************************************************
++
++The information below is mostly technical stuff. There's probably no need to
++read it unless you plan to develop a userspace helper.
++
++The fbcondecor protocol
++-----------------------
++
++The fbcondecor protocol defines a communication interface between the kernel and
++the userspace fbcondecor helper.
++
++The kernel side is responsible for:
++
++ * rendering console text, using an image as a background (instead of a
++ standard solid color fbcon uses),
++ * accepting commands from the user via ioctls on the fbcondecor device,
++ * calling the userspace helper to set things up as soon as the fb subsystem
++ is initialized.
++
++The userspace helper is responsible for everything else, including parsing
++configuration files, decompressing the image files whenever the kernel needs
++it, and communicating with the kernel if necessary.
++
++The fbcondecor protocol specifies how communication is done in both ways:
++kernel->userspace and userspace->helper.
++
++Kernel -> Userspace
++-------------------
++
++The kernel communicates with the userspace helper by calling it and specifying
++the task to be done in a series of arguments.
++
++The arguments follow the pattern:
++<fbcondecor protocol version> <command> <parameters>
++
++All commands defined in fbcondecor protocol v2 have the following parameters:
++ virtual console
++ framebuffer number
++ theme
++
++Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
++framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
++
++Fbcondecor protocol v2 specifies the following commands:
++
++getpic
++------
++ The kernel issues this command to request image data. It's up to the
++ userspace helper to find a background image appropriate for the specified
++ theme and the current resolution. The userspace helper should respond by
++ issuing the FBIOCONDECOR_SETPIC ioctl.
++
++init
++----
++ The kernel issues this command after the fbcondecor device is created and
++ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
++ helper should parse the kernel command line (/proc/cmdline) or otherwise
++ decide whether fbcondecor is to be activated.
++
++ To activate fbcondecor on the first console the helper should issue the
++ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
++ in the above-mentioned order.
++
++ When the userspace helper is called in an early phase of the boot process
++ (right after the initialization of fbcon), no filesystems will be mounted.
++ The helper program should mount sysfs and then create the appropriate
++ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
++ current display settings and to be able to communicate with the kernel side.
++ It should probably also mount the procfs to be able to parse the kernel
++ command line parameters.
++
++ Note that the console sem is not held when the kernel calls fbcondecor_helper
++ with the 'init' command. The fbcondecor helper should perform all ioctls with
++ origin set to FBCON_DECOR_IO_ORIG_USER.
++
++modechange
++----------
++ The kernel issues this command on a mode change. The helper's response should
++ be similar to the response to the 'init' command. Note that this time the
++ console sem is held and all ioctls must be performed with origin set to
++ FBCON_DECOR_IO_ORIG_KERNEL.
++
++
++Userspace -> Kernel
++-------------------
++
++Userspace programs can communicate with fbcondecor via ioctls on the
++fbcondecor device. These ioctls are to be used by both the userspace helper
++(called only by the kernel) and userspace configuration tools (run by the users).
++
++The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
++when doing the appropriate ioctls. All userspace configuration tools should
++use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
++field when performing ioctls from the kernel helper will most likely result
++in a console deadlock.
++
++FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
++semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
++the console sem.
++
++The framebuffer console decoration provides the following ioctls (all defined in
++linux/fb.h):
++
++FBIOCONDECOR_SETPIC
++description: loads a background picture for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
++notes:
++If called for consoles other than the current foreground one, the picture data
++will be ignored.
++
++If the current virtual console is running in a 8-bpp mode, the cmap substruct
++of fb_image has to be filled appropriately: start should be set to 16 (first
++16 colors are reserved for fbcon), len to a value <= 240 and red, green and
++blue should point to valid cmap data. The transp field is ingored. The fields
++dx, dy, bg_color, fg_color in fb_image are ignored as well.
++
++FBIOCONDECOR_SETCFG
++description: sets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++notes: The structure has to be filled with valid data.
++
++FBIOCONDECOR_GETCFG
++description: gets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++
++FBIOCONDECOR_SETSTATE
++description: sets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++ values: 0 = disabled, 1 = enabled.
++
++FBIOCONDECOR_GETSTATE
++description: gets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++ values: as in FBIOCONDECOR_SETSTATE
++
++Info on used structures:
++
++Definition of struct vc_decor can be found in linux/console_decor.h. It's
++heavily commented. Note that the 'theme' field should point to a string
++no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
++performed, the theme field should point to a char buffer of length
++FBCON_DECOR_THEME_LEN.
++
++Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
++The fields in this struct have the following meaning:
++
++vc:
++Virtual console number.
++
++origin:
++Specifies if the ioctl is performed as a response to a kernel request. The
++fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
++programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
++avoid console semaphore deadlocks.
++
++data:
++Pointer to a data structure appropriate for the performed ioctl. Type of
++the data struct is specified in the ioctls description.
++
++*****************************************************************************
++
++Credit
++------
++
++Original 'bootsplash' project & implementation by:
++ Volker Poplawski <volker@poplawski.de>, Stefan Reinauer <stepan@suse.de>,
++ Steffen Winterfeldt <snwint@suse.de>, Michael Schroeder <mls@suse.de>,
++ Ken Wimer <wimer@suse.de>.
++
++Fbcondecor, fbcondecor protocol design, current implementation & docs by:
++ Michal Januszewski <michalj+fbcondecor@gmail.com>
++
+diff -Nur linux-3.15.1.orig/drivers/Makefile linux-3.15.1/drivers/Makefile
+--- linux-3.15.1.orig/drivers/Makefile 2014-06-16 22:44:27.000000000 +0200
++++ linux-3.15.1/drivers/Makefile 2014-06-28 11:21:19.000000000 +0200
+@@ -17,6 +17,10 @@
+ obj-$(CONFIG_PCI) += pci/
+ obj-$(CONFIG_PARISC) += parisc/
+ obj-$(CONFIG_RAPIDIO) += rapidio/
++# tty/ comes before char/ so that the VT console is the boot-time
++# default.
++obj-y += tty/
++obj-y += char/
+ obj-y += video/
+ obj-y += idle/
+
+@@ -42,11 +46,6 @@
+ # reset controllers early, since gpu drivers might rely on them to initialize
+ obj-$(CONFIG_RESET_CONTROLLER) += reset/
+
+-# tty/ comes before char/ so that the VT console is the boot-time
+-# default.
+-obj-y += tty/
+-obj-y += char/
+-
+ # gpu/ comes after char for AGP vs DRM startup
+ obj-y += gpu/
+
+diff -Nur linux-3.15.1.orig/drivers/video/console/Kconfig linux-3.15.1/drivers/video/console/Kconfig
+--- linux-3.15.1.orig/drivers/video/console/Kconfig 2014-06-16 22:44:27.000000000 +0200
++++ linux-3.15.1/drivers/video/console/Kconfig 2014-06-28 11:21:19.000000000 +0200
+@@ -126,6 +126,19 @@
+ such that other users of the framebuffer will remain normally
+ oriented.
+
++config FB_CON_DECOR
++ bool "Support for the Framebuffer Console Decorations"
++ depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
++ default n
++ ---help---
++ This option enables support for framebuffer console decorations which
++ makes it possible to display images in the background of the system
++ consoles. Note that userspace utilities are necessary in order to take
++ advantage of these features. Refer to Documentation/fb/fbcondecor.txt
++ for more information.
++
++ If unsure, say N.
++
+ config STI_CONSOLE
+ bool "STI text console"
+ depends on PARISC
+diff -Nur linux-3.15.1.orig/drivers/video/console/Makefile linux-3.15.1/drivers/video/console/Makefile
+--- linux-3.15.1.orig/drivers/video/console/Makefile 2014-06-16 22:44:27.000000000 +0200
++++ linux-3.15.1/drivers/video/console/Makefile 2014-06-28 11:21:19.000000000 +0200
+@@ -16,4 +16,5 @@
+ fbcon_ccw.o
+ endif
+
++obj-$(CONFIG_FB_CON_DECOR) += fbcondecor.o cfbcondecor.o
+ obj-$(CONFIG_FB_STI) += sticore.o
+diff -Nur linux-3.15.1.orig/drivers/video/console/bitblit.c linux-3.15.1/drivers/video/console/bitblit.c
+--- linux-3.15.1.orig/drivers/video/console/bitblit.c 2014-06-16 22:44:27.000000000 +0200
++++ linux-3.15.1/drivers/video/console/bitblit.c 2014-06-28 11:21:19.000000000 +0200
+@@ -18,6 +18,7 @@
+ #include <linux/console.h>
+ #include <asm/types.h>
+ #include "fbcon.h"
++#include "fbcondecor.h"
+
+ /*
+ * Accelerated handlers.
+@@ -55,6 +56,13 @@
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
++ if (fbcon_decor_active(info, vc)) {
++ area.sx += vc->vc_decor.tx;
++ area.sy += vc->vc_decor.ty;
++ area.dx += vc->vc_decor.tx;
++ area.dy += vc->vc_decor.ty;
++ }
++
+ info->fbops->fb_copyarea(info, &area);
+ }
+
+@@ -380,11 +388,15 @@
+ cursor.image.depth = 1;
+ cursor.rop = ROP_XOR;
+
+- if (info->fbops->fb_cursor)
+- err = info->fbops->fb_cursor(info, &cursor);
++ if (fbcon_decor_active(info, vc)) {
++ fbcon_decor_cursor(info, &cursor);
++ } else {
++ if (info->fbops->fb_cursor)
++ err = info->fbops->fb_cursor(info, &cursor);
+
+- if (err)
+- soft_cursor(info, &cursor);
++ if (err)
++ soft_cursor(info, &cursor);
++ }
+
+ ops->cursor_reset = 0;
+ }
+diff -Nur linux-3.15.1.orig/drivers/video/console/cfbcondecor.c linux-3.15.1/drivers/video/console/cfbcondecor.c
+--- linux-3.15.1.orig/drivers/video/console/cfbcondecor.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15.1/drivers/video/console/cfbcondecor.c 2014-06-28 11:21:19.000000000 +0200
+@@ -0,0 +1,471 @@
++/*
++ * linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
++ *
++ * Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ * Code based upon "Bootdecor" (C) 2001-2003
++ * Volker Poplawski <volker@poplawski.de>,
++ * Stefan Reinauer <stepan@suse.de>,
++ * Steffen Winterfeldt <snwint@suse.de>,
++ * Michael Schroeder <mls@suse.de>,
++ * Ken Wimer <wimer@suse.de>.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/selection.h>
++#include <linux/slab.h>
++#include <linux/vt_kern.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++#define parse_pixel(shift,bpp,type) \
++ do { \
++ if (d & (0x80 >> (shift))) \
++ dd2[(shift)] = fgx; \
++ else \
++ dd2[(shift)] = transparent ? *(type *)decor_src : bgx; \
++ decor_src += (bpp); \
++ } while (0) \
++
++extern int get_color(struct vc_data *vc, struct fb_info *info,
++ u16 c, int is_fg);
++
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
++{
++ int i, j, k;
++ int minlen = min(min(info->var.red.length, info->var.green.length),
++ info->var.blue.length);
++ u32 col;
++
++ for (j = i = 0; i < 16; i++) {
++ k = color_table[i];
++
++ col = ((vc->vc_palette[j++] >> (8-minlen))
++ << info->var.red.offset);
++ col |= ((vc->vc_palette[j++] >> (8-minlen))
++ << info->var.green.offset);
++ col |= ((vc->vc_palette[j++] >> (8-minlen))
++ << info->var.blue.offset);
++ ((u32 *)info->pseudo_palette)[k] = col;
++ }
++}
++
++void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
++ int width, u8* src, u32 fgx, u32 bgx, u8 transparent)
++{
++ unsigned int x, y;
++ u32 dd;
++ int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++ unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
++ unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
++ u16 dd2[4];
++
++ u8* decor_src = (u8 *)(info->bgdecor.data + ds);
++ u8* dst = (u8 *)(info->screen_base + d);
++
++ if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
++ return;
++
++ for (y = 0; y < height; y++) {
++ switch (info->var.bits_per_pixel) {
++
++ case 32:
++ for (x = 0; x < width; x++) {
++
++ if ((x & 7) == 0)
++ d = *src++;
++ if (d & 0x80)
++ dd = fgx;
++ else
++ dd = transparent ?
++ *(u32 *)decor_src : bgx;
++
++ d <<= 1;
++ decor_src += 4;
++ fb_writel(dd, dst);
++ dst += 4;
++ }
++ break;
++ case 24:
++ for (x = 0; x < width; x++) {
++
++ if ((x & 7) == 0)
++ d = *src++;
++ if (d & 0x80)
++ dd = fgx;
++ else
++ dd = transparent ?
++ (*(u32 *)decor_src & 0xffffff) : bgx;
++
++ d <<= 1;
++ decor_src += 3;
++#ifdef __LITTLE_ENDIAN
++ fb_writew(dd & 0xffff, dst);
++ dst += 2;
++ fb_writeb((dd >> 16), dst);
++#else
++ fb_writew(dd >> 8, dst);
++ dst += 2;
++ fb_writeb(dd & 0xff, dst);
++#endif
++ dst++;
++ }
++ break;
++ case 16:
++ for (x = 0; x < width; x += 2) {
++ if ((x & 7) == 0)
++ d = *src++;
++
++ parse_pixel(0, 2, u16);
++ parse_pixel(1, 2, u16);
++#ifdef __LITTLE_ENDIAN
++ dd = dd2[0] | (dd2[1] << 16);
++#else
++ dd = dd2[1] | (dd2[0] << 16);
++#endif
++ d <<= 2;
++ fb_writel(dd, dst);
++ dst += 4;
++ }
++ break;
++
++ case 8:
++ for (x = 0; x < width; x += 4) {
++ if ((x & 7) == 0)
++ d = *src++;
++
++ parse_pixel(0, 1, u8);
++ parse_pixel(1, 1, u8);
++ parse_pixel(2, 1, u8);
++ parse_pixel(3, 1, u8);
++
++#ifdef __LITTLE_ENDIAN
++ dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
++#else
++ dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
++#endif
++ d <<= 4;
++ fb_writel(dd, dst);
++ dst += 4;
++ }
++ }
++
++ dst += info->fix.line_length - width * bytespp;
++ decor_src += (info->var.xres - width) * bytespp;
++ }
++}
++
++#define cc2cx(a) \
++ ((info->fix.visual == FB_VISUAL_TRUECOLOR || \
++ info->fix.visual == FB_VISUAL_DIRECTCOLOR) ? \
++ ((u32*)info->pseudo_palette)[a] : a)
++
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
++ const unsigned short *s, int count, int yy, int xx)
++{
++ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
++ struct fbcon_ops *ops = info->fbcon_par;
++ int fg_color, bg_color, transparent;
++ u8 *src;
++ u32 bgx, fgx;
++ u16 c = scr_readw(s);
++
++ fg_color = get_color(vc, info, c, 1);
++ bg_color = get_color(vc, info, c, 0);
++
++ /* Don't paint the background image if console is blanked */
++ transparent = ops->blank_state ? 0 :
++ (vc->vc_decor.bg_color == bg_color);
++
++ xx = xx * vc->vc_font.width + vc->vc_decor.tx;
++ yy = yy * vc->vc_font.height + vc->vc_decor.ty;
++
++ fgx = cc2cx(fg_color);
++ bgx = cc2cx(bg_color);
++
++ while (count--) {
++ c = scr_readw(s++);
++ src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
++ ((vc->vc_font.width + 7) >> 3);
++
++ fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
++ vc->vc_font.width, src, fgx, bgx, transparent);
++ xx += vc->vc_font.width;
++ }
++}
++
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
++{
++ int i;
++ unsigned int dsize, s_pitch;
++ struct fbcon_ops *ops = info->fbcon_par;
++ struct vc_data* vc;
++ u8 *src;
++
++ /* we really don't need any cursors while the console is blanked */
++ if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
++ return;
++
++ vc = vc_cons[ops->currcon].d;
++
++ src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
++ if (!src)
++ return;
++
++ s_pitch = (cursor->image.width + 7) >> 3;
++ dsize = s_pitch * cursor->image.height;
++ if (cursor->enable) {
++ switch (cursor->rop) {
++ case ROP_XOR:
++ for (i = 0; i < dsize; i++)
++ src[i] = cursor->image.data[i] ^ cursor->mask[i];
++ break;
++ case ROP_COPY:
++ default:
++ for (i = 0; i < dsize; i++)
++ src[i] = cursor->image.data[i] & cursor->mask[i];
++ break;
++ }
++ } else
++ memcpy(src, cursor->image.data, dsize);
++
++ fbcon_decor_renderc(info,
++ cursor->image.dy + vc->vc_decor.ty,
++ cursor->image.dx + vc->vc_decor.tx,
++ cursor->image.height,
++ cursor->image.width,
++ (u8*)src,
++ cc2cx(cursor->image.fg_color),
++ cc2cx(cursor->image.bg_color),
++ cursor->image.bg_color == vc->vc_decor.bg_color);
++
++ kfree(src);
++}
++
++static void decorset(u8 *dst, int height, int width, int dstbytes,
++ u32 bgx, int bpp)
++{
++ int i;
++
++ if (bpp == 8)
++ bgx |= bgx << 8;
++ if (bpp == 16 || bpp == 8)
++ bgx |= bgx << 16;
++
++ while (height-- > 0) {
++ u8 *p = dst;
++
++ switch (bpp) {
++
++ case 32:
++ for (i=0; i < width; i++) {
++ fb_writel(bgx, p); p += 4;
++ }
++ break;
++ case 24:
++ for (i=0; i < width; i++) {
++#ifdef __LITTLE_ENDIAN
++ fb_writew((bgx & 0xffff),(u16*)p); p += 2;
++ fb_writeb((bgx >> 16),p++);
++#else
++ fb_writew((bgx >> 8),(u16*)p); p += 2;
++ fb_writeb((bgx & 0xff),p++);
++#endif
++ }
++ case 16:
++ for (i=0; i < width/4; i++) {
++ fb_writel(bgx,p); p += 4;
++ fb_writel(bgx,p); p += 4;
++ }
++ if (width & 2) {
++ fb_writel(bgx,p); p += 4;
++ }
++ if (width & 1)
++ fb_writew(bgx,(u16*)p);
++ break;
++ case 8:
++ for (i=0; i < width/4; i++) {
++ fb_writel(bgx,p); p += 4;
++ }
++
++ if (width & 2) {
++ fb_writew(bgx,p); p += 2;
++ }
++ if (width & 1)
++ fb_writeb(bgx,(u8*)p);
++ break;
++
++ }
++ dst += dstbytes;
++ }
++}
++
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
++ int srclinebytes, int bpp)
++{
++ int i;
++
++ while (height-- > 0) {
++ u32 *p = (u32 *)dst;
++ u32 *q = (u32 *)src;
++
++ switch (bpp) {
++
++ case 32:
++ for (i=0; i < width; i++)
++ fb_writel(*q++, p++);
++ break;
++ case 24:
++ for (i=0; i < (width*3/4); i++)
++ fb_writel(*q++, p++);
++ if ((width*3) % 4) {
++ if (width & 2) {
++ fb_writeb(*(u8*)q, (u8*)p);
++ } else if (width & 1) {
++ fb_writew(*(u16*)q, (u16*)p);
++ fb_writeb(*(u8*)((u16*)q+1),(u8*)((u16*)p+2));
++ }
++ }
++ break;
++ case 16:
++ for (i=0; i < width/4; i++) {
++ fb_writel(*q++, p++);
++ fb_writel(*q++, p++);
++ }
++ if (width & 2)
++ fb_writel(*q++, p++);
++ if (width & 1)
++ fb_writew(*(u16*)q, (u16*)p);
++ break;
++ case 8:
++ for (i=0; i < width/4; i++)
++ fb_writel(*q++, p++);
++
++ if (width & 2) {
++ fb_writew(*(u16*)q, (u16*)p);
++ q = (u32*) ((u16*)q + 1);
++ p = (u32*) ((u16*)p + 1);
++ }
++ if (width & 1)
++ fb_writeb(*(u8*)q, (u8*)p);
++ break;
++ }
++
++ dst += linebytes;
++ src += srclinebytes;
++ }
++}
++
++static void decorfill(struct fb_info *info, int sy, int sx, int height,
++ int width)
++{
++ int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++ int d = sy * info->fix.line_length + sx * bytespp;
++ int ds = (sy * info->var.xres + sx) * bytespp;
++
++ fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
++ height, width, info->fix.line_length, info->var.xres * bytespp,
++ info->var.bits_per_pixel);
++}
++
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
++ int height, int width)
++{
++ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
++ struct fbcon_ops *ops = info->fbcon_par;
++ u8 *dst;
++ int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
++
++ transparent = (vc->vc_decor.bg_color == bg_color);
++ sy = sy * vc->vc_font.height + vc->vc_decor.ty;
++ sx = sx * vc->vc_font.width + vc->vc_decor.tx;
++ height *= vc->vc_font.height;
++ width *= vc->vc_font.width;
++
++ /* Don't paint the background image if console is blanked */
++ if (transparent && !ops->blank_state) {
++ decorfill(info, sy, sx, height, width);
++ } else {
++ dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
++ sx * ((info->var.bits_per_pixel + 7) >> 3));
++ decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
++ info->var.bits_per_pixel);
++ }
++}
++
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
++ int bottom_only)
++{
++ unsigned int tw = vc->vc_cols*vc->vc_font.width;
++ unsigned int th = vc->vc_rows*vc->vc_font.height;
++
++ if (!bottom_only) {
++ /* top margin */
++ decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
++ /* left margin */
++ decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
++ /* right margin */
++ decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th,
++ info->var.xres - vc->vc_decor.tx - tw);
++ }
++ decorfill(info, vc->vc_decor.ty + th, 0,
++ info->var.yres - vc->vc_decor.ty - th, info->var.xres);
++}
++
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y,
++ int sx, int dx, int width)
++{
++ u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
++ u16 *s = d + (dx - sx);
++ u16 *start = d;
++ u16 *ls = d;
++ u16 *le = d + width;
++ u16 c;
++ int x = dx;
++ u16 attr = 1;
++
++ do {
++ c = scr_readw(d);
++ if (attr != (c & 0xff00)) {
++ attr = c & 0xff00;
++ if (d > start) {
++ fbcon_decor_putcs(vc, info, start, d - start, y, x);
++ x += d - start;
++ start = d;
++ }
++ }
++ if (s >= ls && s < le && c == scr_readw(s)) {
++ if (d > start) {
++ fbcon_decor_putcs(vc, info, start, d - start, y, x);
++ x += d - start + 1;
++ start = d + 1;
++ } else {
++ x++;
++ start++;
++ }
++ }
++ s++;
++ d++;
++ } while (d < le);
++ if (d > start)
++ fbcon_decor_putcs(vc, info, start, d - start, y, x);
++}
++
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
++{
++ if (blank) {
++ decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
++ info->fix.line_length, 0, info->var.bits_per_pixel);
++ } else {
++ update_screen(vc);
++ fbcon_decor_clear_margins(vc, info, 0);
++ }
++}
++
+diff -Nur linux-3.15.1.orig/drivers/video/console/fbcon.c linux-3.15.1/drivers/video/console/fbcon.c
+--- linux-3.15.1.orig/drivers/video/console/fbcon.c 2014-06-16 22:44:27.000000000 +0200
++++ linux-3.15.1/drivers/video/console/fbcon.c 2014-06-28 11:21:19.000000000 +0200
+@@ -79,6 +79,7 @@
+ #include <asm/irq.h>
+
+ #include "fbcon.h"
++#include "fbcondecor.h"
+
+ #ifdef FBCONDEBUG
+ # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+@@ -94,7 +95,7 @@
+
+ static struct display fb_display[MAX_NR_CONSOLES];
+
+-static signed char con2fb_map[MAX_NR_CONSOLES];
++signed char con2fb_map[MAX_NR_CONSOLES];
+ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+
+ static int logo_lines;
+@@ -286,7 +287,7 @@
+ !vt_force_oops_output(vc);
+ }
+
+-static int get_color(struct vc_data *vc, struct fb_info *info,
++int get_color(struct vc_data *vc, struct fb_info *info,
+ u16 c, int is_fg)
+ {
+ int depth = fb_get_color_depth(&info->var, &info->fix);
+@@ -551,6 +552,9 @@
+ info_idx = -1;
+ } else {
+ fbcon_has_console_bind = 1;
++#ifdef CONFIG_FB_CON_DECOR
++ fbcon_decor_init();
++#endif
+ }
+
+ return err;
+@@ -1007,6 +1011,12 @@
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols /= vc->vc_font.width;
+ rows /= vc->vc_font.height;
++
++ if (fbcon_decor_active(info, vc)) {
++ cols = vc->vc_decor.twidth / vc->vc_font.width;
++ rows = vc->vc_decor.theight / vc->vc_font.height;
++ }
++
+ vc_resize(vc, cols, rows);
+
+ DPRINTK("mode: %s\n", info->fix.id);
+@@ -1036,7 +1046,7 @@
+ cap = info->flags;
+
+ if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
+- (info->fix.type == FB_TYPE_TEXT))
++ (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
+ logo = 0;
+
+ if (var_to_display(p, &info->var, info))
+@@ -1260,6 +1270,11 @@
+ fbcon_clear_margins(vc, 0);
+ }
+
++ if (fbcon_decor_active(info, vc)) {
++ fbcon_decor_clear(vc, info, sy, sx, height, width);
++ return;
++ }
++
+ /* Split blits that cross physical y_wrap boundary */
+
+ y_break = p->vrows - p->yscroll;
+@@ -1279,10 +1294,15 @@
+ struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+- if (!fbcon_is_inactive(vc, info))
+- ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+- get_color(vc, info, scr_readw(s), 1),
+- get_color(vc, info, scr_readw(s), 0));
++ if (!fbcon_is_inactive(vc, info)) {
++
++ if (fbcon_decor_active(info, vc))
++ fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
++ else
++ ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
++ get_color(vc, info, scr_readw(s), 1),
++ get_color(vc, info, scr_readw(s), 0));
++ }
+ }
+
+ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+@@ -1298,8 +1318,13 @@
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+- if (!fbcon_is_inactive(vc, info))
+- ops->clear_margins(vc, info, bottom_only);
++ if (!fbcon_is_inactive(vc, info)) {
++ if (fbcon_decor_active(info, vc)) {
++ fbcon_decor_clear_margins(vc, info, bottom_only);
++ } else {
++ ops->clear_margins(vc, info, bottom_only);
++ }
++ }
+ }
+
+ static void fbcon_cursor(struct vc_data *vc, int mode)
+@@ -1819,7 +1844,7 @@
+ count = vc->vc_rows;
+ if (softback_top)
+ fbcon_softback_note(vc, t, count);
+- if (logo_shown >= 0)
++ if (logo_shown >= 0 || fbcon_decor_active(info, vc))
+ goto redraw_up;
+ switch (p->scrollmode) {
+ case SCROLL_MOVE:
+@@ -1912,6 +1937,8 @@
+ count = vc->vc_rows;
+ if (logo_shown >= 0)
+ goto redraw_down;
++ if (fbcon_decor_active(info, vc))
++ goto redraw_down;
+ switch (p->scrollmode) {
+ case SCROLL_MOVE:
+ fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+@@ -2060,6 +2087,13 @@
+ }
+ return;
+ }
++
++ if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
++ /* must use slower redraw bmove to keep background pic intact */
++ fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
++ return;
++ }
++
+ ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
+ }
+@@ -2130,8 +2164,8 @@
+ var.yres = virt_h * virt_fh;
+ x_diff = info->var.xres - var.xres;
+ y_diff = info->var.yres - var.yres;
+- if (x_diff < 0 || x_diff > virt_fw ||
+- y_diff < 0 || y_diff > virt_fh) {
++ if ((x_diff < 0 || x_diff > virt_fw ||
++ y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
+ const struct fb_videomode *mode;
+
+ DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+@@ -2167,6 +2201,21 @@
+
+ info = registered_fb[con2fb_map[vc->vc_num]];
+ ops = info->fbcon_par;
++ prev_console = ops->currcon;
++ if (prev_console != -1)
++ old_info = registered_fb[con2fb_map[prev_console]];
++
++#ifdef CONFIG_FB_CON_DECOR
++ if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++ struct vc_data *vc_curr = vc_cons[prev_console].d;
++ if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
++ /* Clear the screen to avoid displaying funky colors during
++ * palette updates. */
++ memset((u8*)info->screen_base + info->fix.line_length * info->var.yoffset,
++ 0, info->var.yres * info->fix.line_length);
++ }
++ }
++#endif
+
+ if (softback_top) {
+ if (softback_lines)
+@@ -2185,9 +2234,6 @@
+ logo_shown = FBCON_LOGO_CANSHOW;
+ }
+
+- prev_console = ops->currcon;
+- if (prev_console != -1)
+- old_info = registered_fb[con2fb_map[prev_console]];
+ /*
+ * FIXME: If we have multiple fbdev's loaded, we need to
+ * update all info->currcon. Perhaps, we can place this
+@@ -2231,6 +2277,18 @@
+ fbcon_del_cursor_timer(old_info);
+ }
+
++ if (fbcon_decor_active_vc(vc)) {
++ struct vc_data *vc_curr = vc_cons[prev_console].d;
++
++ if (!vc_curr->vc_decor.theme ||
++ strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
++ (fbcon_decor_active_nores(info, vc_curr) &&
++ !fbcon_decor_active(info, vc_curr))) {
++ fbcon_decor_disable(vc, 0);
++ fbcon_decor_call_helper("modechange", vc->vc_num);
++ }
++ }
++
+ if (fbcon_is_inactive(vc, info) ||
+ ops->blank_state != FB_BLANK_UNBLANK)
+ fbcon_del_cursor_timer(info);
+@@ -2339,15 +2397,20 @@
+ }
+ }
+
+- if (!fbcon_is_inactive(vc, info)) {
++ if (!fbcon_is_inactive(vc, info)) {
+ if (ops->blank_state != blank) {
+ ops->blank_state = blank;
+ fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ ops->cursor_flash = (!blank);
+
+- if (!(info->flags & FBINFO_MISC_USEREVENT))
+- if (fb_blank(info, blank))
+- fbcon_generic_blank(vc, info, blank);
++ if (!(info->flags & FBINFO_MISC_USEREVENT)) {
++ if (fb_blank(info, blank)) {
++ if (fbcon_decor_active(info, vc))
++ fbcon_decor_blank(vc, info, blank);
++ else
++ fbcon_generic_blank(vc, info, blank);
++ }
++ }
+ }
+
+ if (!blank)
+@@ -2522,13 +2585,22 @@
+ }
+
+ if (resize) {
++ /* reset wrap/pan */
+ int cols, rows;
+
+ cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
++
++ if (fbcon_decor_active(info, vc)) {
++ info->var.xoffset = info->var.yoffset = p->yscroll = 0;
++ cols = vc->vc_decor.twidth;
++ rows = vc->vc_decor.theight;
++ }
+ cols /= w;
+ rows /= h;
++
+ vc_resize(vc, cols, rows);
++
+ if (CON_IS_VISIBLE(vc) && softback_buf)
+ fbcon_update_softback(vc);
+ } else if (CON_IS_VISIBLE(vc)
+@@ -2657,7 +2729,11 @@
+ int i, j, k, depth;
+ u8 val;
+
+- if (fbcon_is_inactive(vc, info))
++ if (fbcon_is_inactive(vc, info)
++#ifdef CONFIG_FB_CON_DECOR
++ || vc->vc_num != fg_console
++#endif
++ )
+ return -EINVAL;
+
+ if (!CON_IS_VISIBLE(vc))
+@@ -2683,14 +2759,56 @@
+ } else
+ fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
+
+- return fb_set_cmap(&palette_cmap, info);
++ if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++
++ u16 *red, *green, *blue;
++ int minlen = min(min(info->var.red.length, info->var.green.length),
++ info->var.blue.length);
++ int h;
++
++ struct fb_cmap cmap = {
++ .start = 0,
++ .len = (1 << minlen),
++ .red = NULL,
++ .green = NULL,
++ .blue = NULL,
++ .transp = NULL
++ };
++
++ red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
++
++ if (!red)
++ goto out;
++
++ green = red + 256;
++ blue = green + 256;
++ cmap.red = red;
++ cmap.green = green;
++ cmap.blue = blue;
++
++ for (i = 0; i < cmap.len; i++) {
++ red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
++ }
++
++ h = fb_set_cmap(&cmap, info);
++ fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++ kfree(red);
++
++ return h;
++
++ } else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++ info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
++ fb_set_cmap(&info->bgdecor.cmap, info);
++
++out: return fb_set_cmap(&palette_cmap, info);
+ }
+
+ static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+ {
+ unsigned long p;
+ int line;
+-
++
+ if (vc->vc_num != fg_console || !softback_lines)
+ return (u16 *) (vc->vc_origin + offset);
+ line = offset / vc->vc_size_row;
+@@ -2909,7 +3027,14 @@
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols /= vc->vc_font.width;
+ rows /= vc->vc_font.height;
+- vc_resize(vc, cols, rows);
++
++ if (!fbcon_decor_active_nores(info, vc)) {
++ vc_resize(vc, cols, rows);
++ } else {
++ fbcon_decor_disable(vc, 0);
++ fbcon_decor_call_helper("modechange", vc->vc_num);
++ }
++
+ updatescrollmode(p, info, vc);
+ scrollback_max = 0;
+ scrollback_current = 0;
+@@ -2954,7 +3079,9 @@
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols /= vc->vc_font.width;
+ rows /= vc->vc_font.height;
+- vc_resize(vc, cols, rows);
++ if (!fbcon_decor_active_nores(info, vc)) {
++ vc_resize(vc, cols, rows);
++ }
+ }
+
+ if (fg != -1)
+@@ -3596,6 +3723,7 @@
+ }
+ }
+
++ fbcon_decor_exit();
+ fbcon_has_exited = 1;
+ }
+
+diff -Nur linux-3.15.1.orig/drivers/video/console/fbcondecor.c linux-3.15.1/drivers/video/console/fbcondecor.c
+--- linux-3.15.1.orig/drivers/video/console/fbcondecor.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15.1/drivers/video/console/fbcondecor.c 2014-06-28 11:21:19.000000000 +0200
+@@ -0,0 +1,555 @@
++/*
++ * linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
++ *
++ * Copyright (C) 2004-2009 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ * Code based upon "Bootsplash" (C) 2001-2003
++ * Volker Poplawski <volker@poplawski.de>,
++ * Stefan Reinauer <stepan@suse.de>,
++ * Steffen Winterfeldt <snwint@suse.de>,
++ * Michael Schroeder <mls@suse.de>,
++ * Ken Wimer <wimer@suse.de>.
++ *
++ * Compat ioctl support by Thorsten Klein <TK@Thorsten-Klein.de>.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ *
++ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/vt_kern.h>
++#include <linux/vmalloc.h>
++#include <linux/unistd.h>
++#include <linux/syscalls.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/workqueue.h>
++#include <linux/kmod.h>
++#include <linux/miscdevice.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/compat.h>
++#include <linux/console.h>
++
++#include <asm/uaccess.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++extern signed char con2fb_map[];
++static int fbcon_decor_enable(struct vc_data *vc);
++char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
++static int initialized = 0;
++
++int fbcon_decor_call_helper(char* cmd, unsigned short vc)
++{
++ char *envp[] = {
++ "HOME=/",
++ "PATH=/sbin:/bin",
++ NULL
++ };
++
++ char tfb[5];
++ char tcons[5];
++ unsigned char fb = (int) con2fb_map[vc];
++
++ char *argv[] = {
++ fbcon_decor_path,
++ "2",
++ cmd,
++ tcons,
++ tfb,
++ vc_cons[vc].d->vc_decor.theme,
++ NULL
++ };
++
++ snprintf(tfb,5,"%d",fb);
++ snprintf(tcons,5,"%d",vc);
++
++ return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
++}
++
++/* Disables fbcondecor on a virtual console; called with console sem held. */
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
++{
++ struct fb_info* info;
++
++ if (!vc->vc_decor.state)
++ return -EINVAL;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (info == NULL)
++ return -EINVAL;
++
++ vc->vc_decor.state = 0;
++ vc_resize(vc, info->var.xres / vc->vc_font.width,
++ info->var.yres / vc->vc_font.height);
++
++ if (fg_console == vc->vc_num && redraw) {
++ redraw_screen(vc, 0);
++ update_region(vc, vc->vc_origin +
++ vc->vc_size_row * vc->vc_top,
++ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++ }
++
++ printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
++ vc->vc_num);
++
++ return 0;
++}
++
++/* Enables fbcondecor on a virtual console; called with console sem held. */
++static int fbcon_decor_enable(struct vc_data *vc)
++{
++ struct fb_info* info;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
++ info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
++ vc->vc_num == fg_console))
++ return -EINVAL;
++
++ vc->vc_decor.state = 1;
++ vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
++ vc->vc_decor.theight / vc->vc_font.height);
++
++ if (fg_console == vc->vc_num) {
++ redraw_screen(vc, 0);
++ update_region(vc, vc->vc_origin +
++ vc->vc_size_row * vc->vc_top,
++ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++ fbcon_decor_clear_margins(vc, info, 0);
++ }
++
++ printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
++ vc->vc_num);
++
++ return 0;
++}
++
++static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
++{
++ int ret;
++
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_lock();
++ if (!state)
++ ret = fbcon_decor_disable(vc, 1);
++ else
++ ret = fbcon_decor_enable(vc);
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_unlock();
++
++ return ret;
++}
++
++static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
++{
++ *state = vc->vc_decor.state;
++}
++
++static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
++{
++ struct fb_info *info;
++ int len;
++ char *tmp;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (info == NULL || !cfg->twidth || !cfg->theight ||
++ cfg->tx + cfg->twidth > info->var.xres ||
++ cfg->ty + cfg->theight > info->var.yres)
++ return -EINVAL;
++
++ len = strlen_user(cfg->theme);
++ if (!len || len > FBCON_DECOR_THEME_LEN)
++ return -EINVAL;
++ tmp = kmalloc(len, GFP_KERNEL);
++ if (!tmp)
++ return -ENOMEM;
++ if (copy_from_user(tmp, (void __user *)cfg->theme, len))
++ return -EFAULT;
++ cfg->theme = tmp;
++ cfg->state = 0;
++
++ /* If this ioctl is a response to a request from kernel, the console sem
++ * is already held; we also don't need to disable decor because either the
++ * new config and background picture will be successfully loaded, and the
++ * decor will stay on, or in case of a failure it'll be turned off in fbcon. */
++// if (origin == FBCON_DECOR_IO_ORIG_USER) {
++ console_lock();
++ if (vc->vc_decor.state)
++ fbcon_decor_disable(vc, 1);
++// }
++
++ if (vc->vc_decor.theme)
++ kfree(vc->vc_decor.theme);
++
++ vc->vc_decor = *cfg;
++
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_unlock();
++
++ printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
++ vc->vc_num, vc->vc_decor.theme);
++ return 0;
++}
++
++static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc, struct vc_decor *decor)
++{
++ char __user *tmp;
++
++ tmp = decor->theme;
++ *decor = vc->vc_decor;
++ decor->theme = tmp;
++
++ if (vc->vc_decor.theme) {
++ if (copy_to_user(tmp, vc->vc_decor.theme, strlen(vc->vc_decor.theme) + 1))
++ return -EFAULT;
++ } else
++ if (put_user(0, tmp))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img, unsigned char origin)
++{
++ struct fb_info *info;
++ int len;
++ u8 *tmp;
++
++ if (vc->vc_num != fg_console)
++ return -EINVAL;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (info == NULL)
++ return -EINVAL;
++
++ if (img->width != info->var.xres || img->height != info->var.yres) {
++ printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
++ printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height, info->var.xres, info->var.yres);
++ return -EINVAL;
++ }
++
++ if (img->depth != info->var.bits_per_pixel) {
++ printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
++ return -EINVAL;
++ }
++
++ if (img->depth == 8) {
++ if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
++ !img->cmap.blue)
++ return -EINVAL;
++
++ tmp = vmalloc(img->cmap.len * 3 * 2);
++ if (!tmp)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp,
++ (void __user*)img->cmap.red, (img->cmap.len << 1)) ||
++ copy_from_user(tmp + (img->cmap.len << 1),
++ (void __user*)img->cmap.green, (img->cmap.len << 1)) ||
++ copy_from_user(tmp + (img->cmap.len << 2),
++ (void __user*)img->cmap.blue, (img->cmap.len << 1))) {
++ vfree(tmp);
++ return -EFAULT;
++ }
++
++ img->cmap.transp = NULL;
++ img->cmap.red = (u16*)tmp;
++ img->cmap.green = img->cmap.red + img->cmap.len;
++ img->cmap.blue = img->cmap.green + img->cmap.len;
++ } else {
++ img->cmap.red = NULL;
++ }
++
++ len = ((img->depth + 7) >> 3) * img->width * img->height;
++
++ /*
++ * Allocate an additional byte so that we never go outside of the
++ * buffer boundaries in the rendering functions in a 24 bpp mode.
++ */
++ tmp = vmalloc(len + 1);
++
++ if (!tmp)
++ goto out;
++
++ if (copy_from_user(tmp, (void __user*)img->data, len))
++ goto out;
++
++ img->data = tmp;
++
++ /* If this ioctl is a response to a request from kernel, the console sem
++ * is already held. */
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_lock();
++
++ if (info->bgdecor.data)
++ vfree((u8*)info->bgdecor.data);
++ if (info->bgdecor.cmap.red)
++ vfree(info->bgdecor.cmap.red);
++
++ info->bgdecor = *img;
++
++ if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
++ redraw_screen(vc, 0);
++ update_region(vc, vc->vc_origin +
++ vc->vc_size_row * vc->vc_top,
++ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++ fbcon_decor_clear_margins(vc, info, 0);
++ }
++
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_unlock();
++
++ return 0;
++
++out: if (img->cmap.red)
++ vfree(img->cmap.red);
++
++ if (tmp)
++ vfree(tmp);
++ return -ENOMEM;
++}
++
++static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
++{
++ struct fbcon_decor_iowrapper __user *wrapper = (void __user*) arg;
++ struct vc_data *vc = NULL;
++ unsigned short vc_num = 0;
++ unsigned char origin = 0;
++ void __user *data = NULL;
++
++ if (!access_ok(VERIFY_READ, wrapper,
++ sizeof(struct fbcon_decor_iowrapper)))
++ return -EFAULT;
++
++ __get_user(vc_num, &wrapper->vc);
++ __get_user(origin, &wrapper->origin);
++ __get_user(data, &wrapper->data);
++
++ if (!vc_cons_allocated(vc_num))
++ return -EINVAL;
++
++ vc = vc_cons[vc_num].d;
++
++ switch (cmd) {
++ case FBIOCONDECOR_SETPIC:
++ {
++ struct fb_image img;
++ if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
++ return -EFAULT;
++
++ return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++ }
++ case FBIOCONDECOR_SETCFG:
++ {
++ struct vc_decor cfg;
++ if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++ return -EFAULT;
++
++ return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++ }
++ case FBIOCONDECOR_GETCFG:
++ {
++ int rval;
++ struct vc_decor cfg;
++
++ if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++ return -EFAULT;
++
++ rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++ if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
++ return -EFAULT;
++ return rval;
++ }
++ case FBIOCONDECOR_SETSTATE:
++ {
++ unsigned int state = 0;
++ if (get_user(state, (unsigned int __user *)data))
++ return -EFAULT;
++ return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++ }
++ case FBIOCONDECOR_GETSTATE:
++ {
++ unsigned int state = 0;
++ fbcon_decor_ioctl_dogetstate(vc, &state);
++ return put_user(state, (unsigned int __user *)data);
++ }
++
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++
++#ifdef CONFIG_COMPAT
++
++static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {
++
++ struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
++ struct vc_data *vc = NULL;
++ unsigned short vc_num = 0;
++ unsigned char origin = 0;
++ compat_uptr_t data_compat = 0;
++ void __user *data = NULL;
++
++ if (!access_ok(VERIFY_READ, wrapper,
++ sizeof(struct fbcon_decor_iowrapper32)))
++ return -EFAULT;
++
++ __get_user(vc_num, &wrapper->vc);
++ __get_user(origin, &wrapper->origin);
++ __get_user(data_compat, &wrapper->data);
++ data = compat_ptr(data_compat);
++
++ if (!vc_cons_allocated(vc_num))
++ return -EINVAL;
++
++ vc = vc_cons[vc_num].d;
++
++ switch (cmd) {
++ case FBIOCONDECOR_SETPIC32:
++ {
++ struct fb_image32 img_compat;
++ struct fb_image img;
++
++ if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
++ return -EFAULT;
++
++ fb_image_from_compat(img, img_compat);
++
++ return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++ }
++
++ case FBIOCONDECOR_SETCFG32:
++ {
++ struct vc_decor32 cfg_compat;
++ struct vc_decor cfg;
++
++ if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++ return -EFAULT;
++
++ vc_decor_from_compat(cfg, cfg_compat);
++
++ return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++ }
++
++ case FBIOCONDECOR_GETCFG32:
++ {
++ int rval;
++ struct vc_decor32 cfg_compat;
++ struct vc_decor cfg;
++
++ if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++ return -EFAULT;
++ cfg.theme = compat_ptr(cfg_compat.theme);
++
++ rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++ vc_decor_to_compat(cfg_compat, cfg);
++
++ if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
++ return -EFAULT;
++ return rval;
++ }
++
++ case FBIOCONDECOR_SETSTATE32:
++ {
++ compat_uint_t state_compat = 0;
++ unsigned int state = 0;
++
++ if (get_user(state_compat, (compat_uint_t __user *)data))
++ return -EFAULT;
++
++ state = (unsigned int)state_compat;
++
++ return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++ }
++
++ case FBIOCONDECOR_GETSTATE32:
++ {
++ compat_uint_t state_compat = 0;
++ unsigned int state = 0;
++
++ fbcon_decor_ioctl_dogetstate(vc, &state);
++ state_compat = (compat_uint_t)state;
++
++ return put_user(state_compat, (compat_uint_t __user *)data);
++ }
++
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++#else
++ #define fbcon_decor_compat_ioctl NULL
++#endif
++
++static struct file_operations fbcon_decor_ops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = fbcon_decor_ioctl,
++ .compat_ioctl = fbcon_decor_compat_ioctl
++};
++
++static struct miscdevice fbcon_decor_dev = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "fbcondecor",
++ .fops = &fbcon_decor_ops
++};
++
++void fbcon_decor_reset()
++{
++ int i;
++
++ for (i = 0; i < num_registered_fb; i++) {
++ registered_fb[i]->bgdecor.data = NULL;
++ registered_fb[i]->bgdecor.cmap.red = NULL;
++ }
++
++ for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
++ vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
++ vc_cons[i].d->vc_decor.theight = 0;
++ vc_cons[i].d->vc_decor.theme = NULL;
++ }
++
++ return;
++}
++
++int fbcon_decor_init()
++{
++ int i;
++
++ fbcon_decor_reset();
++
++ if (initialized)
++ return 0;
++
++ i = misc_register(&fbcon_decor_dev);
++ if (i) {
++ printk(KERN_ERR "fbcondecor: failed to register device\n");
++ return i;
++ }
++
++ fbcon_decor_call_helper("init", 0);
++ initialized = 1;
++ return 0;
++}
++
++int fbcon_decor_exit(void)
++{
++ fbcon_decor_reset();
++ return 0;
++}
++
++EXPORT_SYMBOL(fbcon_decor_path);
+diff -Nur linux-3.15.1.orig/drivers/video/console/fbcondecor.h linux-3.15.1/drivers/video/console/fbcondecor.h
+--- linux-3.15.1.orig/drivers/video/console/fbcondecor.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15.1/drivers/video/console/fbcondecor.h 2014-06-28 11:21:19.000000000 +0200
+@@ -0,0 +1,79 @@
++/*
++ * linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
++ *
++ * Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ */
++
++#ifndef __FBCON_DECOR_H
++#define __FBCON_DECOR_H
++
++#ifndef _LINUX_FB_H
++#include <linux/fb.h>
++#endif
++
++/* This is needed for vc_cons in fbcmap.c */
++#include <linux/vt_kern.h>
++
++struct fb_cursor;
++struct fb_info;
++struct vc_data;
++
++#ifdef CONFIG_FB_CON_DECOR
++/* fbcondecor.c */
++int fbcon_decor_init(void);
++void fbcon_decor_reset(void);
++int fbcon_decor_exit(void);
++int fbcon_decor_call_helper(char* cmd, unsigned short cons);
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
++
++/* cfbcondecor.c */
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
++
++/* vt.c */
++void acquire_console_sem(void);
++void release_console_sem(void);
++void do_unblank_screen(int entering_gfx);
++
++/* struct vc_data *y */
++#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme)
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active_nores(x,y) (x->bgdecor.data && fbcon_decor_active_vc(y))
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active(x,y) (fbcon_decor_active_nores(x,y) && \
++ x->bgdecor.width == x->var.xres && \
++ x->bgdecor.height == x->var.yres && \
++ x->bgdecor.depth == x->var.bits_per_pixel)
++
++
++#else /* CONFIG_FB_CON_DECOR */
++
++static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
++static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
++static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
++static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
++static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
++static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
++static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
++static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
++static inline int fbcon_decor_call_helper(char* cmd, unsigned short cons) { return 0; }
++static inline int fbcon_decor_init(void) { return 0; }
++static inline int fbcon_decor_exit(void) { return 0; }
++static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
++
++#define fbcon_decor_active_vc(y) (0)
++#define fbcon_decor_active_nores(x,y) (0)
++#define fbcon_decor_active(x,y) (0)
++
++#endif /* CONFIG_FB_CON_DECOR */
++
++#endif /* __FBCON_DECOR_H */
+diff -Nur linux-3.15.1.orig/drivers/video/fbdev/core/fbcmap.c linux-3.15.1/drivers/video/fbdev/core/fbcmap.c
+--- linux-3.15.1.orig/drivers/video/fbdev/core/fbcmap.c 2014-06-16 22:44:27.000000000 +0200
++++ linux-3.15.1/drivers/video/fbdev/core/fbcmap.c 2014-06-28 11:21:49.000000000 +0200
+@@ -17,6 +17,8 @@
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+
++#include "../../console/fbcondecor.h"
++
+ static u16 red2[] __read_mostly = {
+ 0x0000, 0xaaaa
+ };
+@@ -257,6 +259,10 @@
+ if (rc == 0)
+ fb_copy_cmap(cmap, &info->cmap);
+
++ if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
++ fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++
+ return rc;
+ }
+
+diff -Nur linux-3.15.1.orig/include/linux/console_decor.h linux-3.15.1/include/linux/console_decor.h
+--- linux-3.15.1.orig/include/linux/console_decor.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15.1/include/linux/console_decor.h 2014-06-28 11:21:19.000000000 +0200
+@@ -0,0 +1,46 @@
++#ifndef _LINUX_CONSOLE_DECOR_H_
++#define _LINUX_CONSOLE_DECOR_H_ 1
++
++/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
++struct vc_decor {
++ __u8 bg_color; /* The color that is to be treated as transparent */
++ __u8 state; /* Current decor state: 0 = off, 1 = on */
++ __u16 tx, ty; /* Top left corner coordinates of the text field */
++ __u16 twidth, theight; /* Width and height of the text field */
++ char* theme;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++
++struct vc_decor32 {
++ __u8 bg_color; /* The color that is to be treated as transparent */
++ __u8 state; /* Current decor state: 0 = off, 1 = on */
++ __u16 tx, ty; /* Top left corner coordinates of the text field */
++ __u16 twidth, theight; /* Width and height of the text field */
++ compat_uptr_t theme;
++};
++
++#define vc_decor_from_compat(to, from) \
++ (to).bg_color = (from).bg_color; \
++ (to).state = (from).state; \
++ (to).tx = (from).tx; \
++ (to).ty = (from).ty; \
++ (to).twidth = (from).twidth; \
++ (to).theight = (from).theight; \
++ (to).theme = compat_ptr((from).theme)
++
++#define vc_decor_to_compat(to, from) \
++ (to).bg_color = (from).bg_color; \
++ (to).state = (from).state; \
++ (to).tx = (from).tx; \
++ (to).ty = (from).ty; \
++ (to).twidth = (from).twidth; \
++ (to).theight = (from).theight; \
++ (to).theme = ptr_to_compat((from).theme)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#endif
+diff -Nur linux-3.15.1.orig/include/linux/console_struct.h linux-3.15.1/include/linux/console_struct.h
+--- linux-3.15.1.orig/include/linux/console_struct.h 2014-06-16 22:44:27.000000000 +0200
++++ linux-3.15.1/include/linux/console_struct.h 2014-06-28 11:21:19.000000000 +0200
+@@ -19,6 +19,7 @@
+ struct vt_struct;
+
+ #define NPAR 16
++#include <linux/console_decor.h>
+
+ struct vc_data {
+ struct tty_port port; /* Upper level data */
+@@ -107,6 +108,8 @@
+ unsigned long vc_uni_pagedir;
+ unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
+ bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
++
++ struct vc_decor vc_decor;
+ /* additional information is in vt_kern.h */
+ };
+
+diff -Nur linux-3.15.1.orig/include/linux/fb.h linux-3.15.1/include/linux/fb.h
+--- linux-3.15.1.orig/include/linux/fb.h 2014-06-16 22:44:27.000000000 +0200
++++ linux-3.15.1/include/linux/fb.h 2014-06-28 11:21:19.000000000 +0200
+@@ -219,6 +219,34 @@
+ };
+ #endif
+
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_image32 {
++ __u32 dx; /* Where to place image */
++ __u32 dy;
++ __u32 width; /* Size of image */
++ __u32 height;
++ __u32 fg_color; /* Only used when a mono bitmap */
++ __u32 bg_color;
++ __u8 depth; /* Depth of the image */
++ const compat_uptr_t data; /* Pointer to image data */
++ struct fb_cmap32 cmap; /* color map info */
++};
++
++#define fb_image_from_compat(to, from) \
++ (to).dx = (from).dx; \
++ (to).dy = (from).dy; \
++ (to).width = (from).width; \
++ (to).height = (from).height; \
++ (to).fg_color = (from).fg_color; \
++ (to).bg_color = (from).bg_color; \
++ (to).depth = (from).depth; \
++ (to).data = compat_ptr((from).data); \
++ fb_cmap_from_compat((to).cmap, (from).cmap)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /*
+ * Frame buffer operations
+ *
+@@ -489,6 +517,9 @@
+ #define FBINFO_STATE_SUSPENDED 1
+ u32 state; /* Hardware state i.e suspend */
+ void *fbcon_par; /* fbcon use-only private area */
++
++ struct fb_image bgdecor;
++
+ /* From here on everything is device dependent */
+ void *par;
+ /* we need the PCI or similar aperture base/size not
+diff -Nur linux-3.15.1.orig/include/uapi/linux/fb.h linux-3.15.1/include/uapi/linux/fb.h
+--- linux-3.15.1.orig/include/uapi/linux/fb.h 2014-06-16 22:44:27.000000000 +0200
++++ linux-3.15.1/include/uapi/linux/fb.h 2014-06-28 11:21:19.000000000 +0200
+@@ -8,6 +8,25 @@
+
+ #define FB_MAX 32 /* sufficient for now */
+
++struct fbcon_decor_iowrapper
++{
++ unsigned short vc; /* Virtual console */
++ unsigned char origin; /* Point of origin of the request */
++ void *data;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++struct fbcon_decor_iowrapper32
++{
++ unsigned short vc; /* Virtual console */
++ unsigned char origin; /* Point of origin of the request */
++ compat_uptr_t data;
++};
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /* ioctls
+ 0x46 is 'F' */
+ #define FBIOGET_VSCREENINFO 0x4600
+@@ -35,6 +54,25 @@
+ #define FBIOGET_DISPINFO 0x4618
+ #define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
+
++#define FBIOCONDECOR_SETCFG _IOWR('F', 0x19, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETCFG _IOR('F', 0x1A, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETSTATE _IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETSTATE _IOR('F', 0x1C, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETPIC _IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#define FBIOCONDECOR_SETCFG32 _IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETCFG32 _IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETSTATE32 _IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETSTATE32 _IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETPIC32 _IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#define FBCON_DECOR_THEME_LEN 128 /* Maximum lenght of a theme name */
++#define FBCON_DECOR_IO_ORIG_KERNEL 0 /* Kernel ioctl origin */
++#define FBCON_DECOR_IO_ORIG_USER 1 /* User ioctl origin */
++
+ #define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */
+ #define FB_TYPE_PLANES 1 /* Non interleaved planes */
+ #define FB_TYPE_INTERLEAVED_PLANES 2 /* Interleaved planes */
+@@ -277,6 +315,29 @@
+ __u32 reserved[4]; /* Reserved for future compatibility */
+ };
+
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_cmap32 {
++ __u32 start;
++ __u32 len; /* Number of entries */
++ compat_uptr_t red; /* Red values */
++ compat_uptr_t green;
++ compat_uptr_t blue;
++ compat_uptr_t transp; /* transparency, can be NULL */
++};
++
++#define fb_cmap_from_compat(to, from) \
++ (to).start = (from).start; \
++ (to).len = (from).len; \
++ (to).red = compat_ptr((from).red); \
++ (to).green = compat_ptr((from).green); \
++ (to).blue = compat_ptr((from).blue); \
++ (to).transp = compat_ptr((from).transp)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++
+ struct fb_cmap {
+ __u32 start; /* First entry */
+ __u32 len; /* Number of entries */
+diff -Nur linux-3.15.1.orig/kernel/sysctl.c linux-3.15.1/kernel/sysctl.c
+--- linux-3.15.1.orig/kernel/sysctl.c 2014-06-16 22:44:27.000000000 +0200
++++ linux-3.15.1/kernel/sysctl.c 2014-06-28 11:21:19.000000000 +0200
+@@ -146,6 +146,10 @@
+ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
+ #endif
+
++#ifdef CONFIG_FB_CON_DECOR
++extern char fbcon_decor_path[];
++#endif
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -255,6 +259,15 @@
+ .mode = 0555,
+ .child = dev_table,
+ },
++#ifdef CONFIG_FB_CON_DECOR
++ {
++ .procname = "fbcondecor",
++ .data = &fbcon_decor_path,
++ .maxlen = KMOD_PATH_LEN,
++ .mode = 0644,
++ .proc_handler = &proc_dostring,
++ },
++#endif
+ { }
+ };
+
diff --git a/target/linux/patches/3.18.9/patch-linuxrt b/target/linux/patches/3.18.9/patch-linuxrt
new file mode 100644
index 000000000..444acdc6c
--- /dev/null
+++ b/target/linux/patches/3.18.9/patch-linuxrt
@@ -0,0 +1,23744 @@
+diff -Nur linux-3.18.8.orig/arch/alpha/mm/fault.c linux-3.18.8/arch/alpha/mm/fault.c
+--- linux-3.18.8.orig/arch/alpha/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/alpha/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -107,7 +107,7 @@
+
+ /* If we're in an interrupt context, or have no user context,
+ we must not take the fault. */
+- if (!mm || in_atomic())
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ #ifdef CONFIG_ALPHA_LARGE_VMALLOC
+diff -Nur linux-3.18.8.orig/arch/arm/include/asm/cmpxchg.h linux-3.18.8/arch/arm/include/asm/cmpxchg.h
+--- linux-3.18.8.orig/arch/arm/include/asm/cmpxchg.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/include/asm/cmpxchg.h 2015-03-03 08:05:17.000000000 +0100
+@@ -129,6 +129,8 @@
+
+ #else /* min ARCH >= ARMv6 */
+
++#define __HAVE_ARCH_CMPXCHG 1
++
+ extern void __bad_cmpxchg(volatile void *ptr, int size);
+
+ /*
+diff -Nur linux-3.18.8.orig/arch/arm/include/asm/futex.h linux-3.18.8/arch/arm/include/asm/futex.h
+--- linux-3.18.8.orig/arch/arm/include/asm/futex.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/include/asm/futex.h 2015-03-03 08:05:17.000000000 +0100
+@@ -93,6 +93,8 @@
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
++ preempt_disable_rt();
++
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: " TUSER(ldr) " %1, [%4]\n"
+ " teq %1, %2\n"
+@@ -104,6 +106,8 @@
+ : "cc", "memory");
+
+ *uval = val;
++
++ preempt_enable_rt();
+ return ret;
+ }
+
+diff -Nur linux-3.18.8.orig/arch/arm/include/asm/switch_to.h linux-3.18.8/arch/arm/include/asm/switch_to.h
+--- linux-3.18.8.orig/arch/arm/include/asm/switch_to.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/include/asm/switch_to.h 2015-03-03 08:05:17.000000000 +0100
+@@ -3,6 +3,13 @@
+
+ #include <linux/thread_info.h>
+
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
++
+ /*
+ * For v7 SMP cores running a preemptible kernel we may be pre-empted
+ * during a TLB maintenance operation, so execute an inner-shareable dsb
+@@ -22,6 +29,7 @@
+
+ #define switch_to(prev,next,last) \
+ do { \
++ switch_kmaps(prev, next); \
+ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
+ } while (0)
+
+diff -Nur linux-3.18.8.orig/arch/arm/include/asm/thread_info.h linux-3.18.8/arch/arm/include/asm/thread_info.h
+--- linux-3.18.8.orig/arch/arm/include/asm/thread_info.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/include/asm/thread_info.h 2015-03-03 08:05:17.000000000 +0100
+@@ -51,6 +51,7 @@
+ struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => bug */
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+@@ -149,6 +150,7 @@
+ #define TIF_SIGPENDING 0
+ #define TIF_NEED_RESCHED 1
+ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
++#define TIF_NEED_RESCHED_LAZY 3
+ #define TIF_UPROBE 7
+ #define TIF_SYSCALL_TRACE 8
+ #define TIF_SYSCALL_AUDIT 9
+@@ -162,6 +164,7 @@
+ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+diff -Nur linux-3.18.8.orig/arch/arm/Kconfig linux-3.18.8/arch/arm/Kconfig
+--- linux-3.18.8.orig/arch/arm/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/Kconfig 2015-03-03 08:05:17.000000000 +0100
+@@ -62,6 +62,7 @@
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_SYSCALL_TRACEPOINTS
+diff -Nur linux-3.18.8.orig/arch/arm/kernel/asm-offsets.c linux-3.18.8/arch/arm/kernel/asm-offsets.c
+--- linux-3.18.8.orig/arch/arm/kernel/asm-offsets.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/kernel/asm-offsets.c 2015-03-03 08:05:17.000000000 +0100
+@@ -64,6 +64,7 @@
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
+diff -Nur linux-3.18.8.orig/arch/arm/kernel/entry-armv.S linux-3.18.8/arch/arm/kernel/entry-armv.S
+--- linux-3.18.8.orig/arch/arm/kernel/entry-armv.S 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/kernel/entry-armv.S 2015-03-03 08:05:17.000000000 +0100
+@@ -207,11 +207,18 @@
+ #ifdef CONFIG_PREEMPT
+ get_thread_info tsk
+ ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
+- ldr r0, [tsk, #TI_FLAGS] @ get flags
+ teq r8, #0 @ if preempt count != 0
++ bne 1f @ return from exeption
++ ldr r0, [tsk, #TI_FLAGS] @ get flags
++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
++ blne svc_preempt @ preempt!
++
++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r8, #0 @ if preempt lazy count != 0
+ movne r0, #0 @ force flags to 0
+- tst r0, #_TIF_NEED_RESCHED
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ blne svc_preempt
++1:
+ #endif
+
+ svc_exit r5, irq = 1 @ return from exception
+@@ -226,6 +233,8 @@
+ 1: bl preempt_schedule_irq @ irq en/disable is done inside
+ ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
+ tst r0, #_TIF_NEED_RESCHED
++ bne 1b
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ reteq r8 @ go again
+ b 1b
+ #endif
+diff -Nur linux-3.18.8.orig/arch/arm/kernel/process.c linux-3.18.8/arch/arm/kernel/process.c
+--- linux-3.18.8.orig/arch/arm/kernel/process.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/kernel/process.c 2015-03-03 08:05:17.000000000 +0100
+@@ -431,6 +431,30 @@
+ }
+
+ #ifdef CONFIG_MMU
++/*
++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
++ * initialized by pgtable_page_ctor() then a coredump of the vector page will
++ * fail.
++ */
++static int __init vectors_user_mapping_init_page(void)
++{
++ struct page *page;
++ unsigned long addr = 0xffff0000;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ page = pmd_page(*(pmd));
++
++ pgtable_page_ctor(page);
++
++ return 0;
++}
++late_initcall(vectors_user_mapping_init_page);
++
+ #ifdef CONFIG_KUSER_HELPERS
+ /*
+ * The vectors page is always readable from user space for the
+diff -Nur linux-3.18.8.orig/arch/arm/kernel/signal.c linux-3.18.8/arch/arm/kernel/signal.c
+--- linux-3.18.8.orig/arch/arm/kernel/signal.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/kernel/signal.c 2015-03-03 08:05:17.000000000 +0100
+@@ -574,7 +574,8 @@
+ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ {
+ do {
+- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
++ if (likely(thread_flags & (_TIF_NEED_RESCHED |
++ _TIF_NEED_RESCHED_LAZY))) {
+ schedule();
+ } else {
+ if (unlikely(!user_mode(regs)))
+diff -Nur linux-3.18.8.orig/arch/arm/kernel/smp.c linux-3.18.8/arch/arm/kernel/smp.c
+--- linux-3.18.8.orig/arch/arm/kernel/smp.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/kernel/smp.c 2015-03-03 08:05:17.000000000 +0100
+@@ -506,12 +506,14 @@
+ }
+
+ #ifdef CONFIG_IRQ_WORK
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void arch_irq_work_raise(void)
+ {
+ if (arch_irq_work_has_interrupt())
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+ }
+ #endif
++#endif
+
+ #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ void tick_broadcast(const struct cpumask *mask)
+diff -Nur linux-3.18.8.orig/arch/arm/kernel/unwind.c linux-3.18.8/arch/arm/kernel/unwind.c
+--- linux-3.18.8.orig/arch/arm/kernel/unwind.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/kernel/unwind.c 2015-03-03 08:05:17.000000000 +0100
+@@ -93,7 +93,7 @@
+ static const struct unwind_idx *__origin_unwind_idx;
+ extern const struct unwind_idx __stop_unwind_idx[];
+
+-static DEFINE_SPINLOCK(unwind_lock);
++static DEFINE_RAW_SPINLOCK(unwind_lock);
+ static LIST_HEAD(unwind_tables);
+
+ /* Convert a prel31 symbol to an absolute address */
+@@ -201,7 +201,7 @@
+ /* module unwind tables */
+ struct unwind_table *table;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_for_each_entry(table, &unwind_tables, list) {
+ if (addr >= table->begin_addr &&
+ addr < table->end_addr) {
+@@ -213,7 +213,7 @@
+ break;
+ }
+ }
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+ }
+
+ pr_debug("%s: idx = %p\n", __func__, idx);
+@@ -530,9 +530,9 @@
+ tab->begin_addr = text_addr;
+ tab->end_addr = text_addr + text_size;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_add_tail(&tab->list, &unwind_tables);
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+ return tab;
+ }
+@@ -544,9 +544,9 @@
+ if (!tab)
+ return;
+
+- spin_lock_irqsave(&unwind_lock, flags);
++ raw_spin_lock_irqsave(&unwind_lock, flags);
+ list_del(&tab->list);
+- spin_unlock_irqrestore(&unwind_lock, flags);
++ raw_spin_unlock_irqrestore(&unwind_lock, flags);
+
+ kfree(tab);
+ }
+diff -Nur linux-3.18.8.orig/arch/arm/mach-at91/at91rm9200_time.c linux-3.18.8/arch/arm/mach-at91/at91rm9200_time.c
+--- linux-3.18.8.orig/arch/arm/mach-at91/at91rm9200_time.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/mach-at91/at91rm9200_time.c 2015-03-03 08:05:17.000000000 +0100
+@@ -135,6 +135,7 @@
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
++ remove_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
+ case CLOCK_EVT_MODE_RESUME:
+ irqmask = 0;
+ break;
+diff -Nur linux-3.18.8.orig/arch/arm/mach-exynos/platsmp.c linux-3.18.8/arch/arm/mach-exynos/platsmp.c
+--- linux-3.18.8.orig/arch/arm/mach-exynos/platsmp.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/mach-exynos/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+@@ -137,7 +137,7 @@
+ return (void __iomem *)(S5P_VA_SCU);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void exynos_secondary_init(unsigned int cpu)
+ {
+@@ -150,8 +150,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -165,7 +165,7 @@
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -192,7 +192,7 @@
+
+ if (timeout == 0) {
+ printk(KERN_ERR "cpu1 power enable failed");
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ return -ETIMEDOUT;
+ }
+ }
+@@ -242,7 +242,7 @@
+ * calibrations, then wait for it to finish
+ */
+ fail:
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? ret : 0;
+ }
+diff -Nur linux-3.18.8.orig/arch/arm/mach-hisi/platmcpm.c linux-3.18.8/arch/arm/mach-hisi/platmcpm.c
+--- linux-3.18.8.orig/arch/arm/mach-hisi/platmcpm.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/mach-hisi/platmcpm.c 2015-03-03 08:05:17.000000000 +0100
+@@ -57,7 +57,7 @@
+
+ static void __iomem *sysctrl, *fabric;
+ static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+ static u32 fabric_phys_addr;
+ /*
+ * [0]: bootwrapper physical address
+@@ -104,7 +104,7 @@
+ if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
+ return -EINVAL;
+
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+
+ if (hip04_cpu_table[cluster][cpu])
+ goto out;
+@@ -133,7 +133,7 @@
+ udelay(20);
+ out:
+ hip04_cpu_table[cluster][cpu]++;
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+
+ return 0;
+ }
+@@ -149,7 +149,7 @@
+
+ __mcpm_cpu_going_down(cpu, cluster);
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+ BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
+ hip04_cpu_table[cluster][cpu]--;
+ if (hip04_cpu_table[cluster][cpu] == 1) {
+@@ -162,7 +162,7 @@
+
+ last_man = hip04_cluster_is_down(cluster);
+ if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ /* Since it's Cortex A15, disable L2 prefetching. */
+ asm volatile(
+ "mcr p15, 1, %0, c15, c0, 3 \n\t"
+@@ -173,7 +173,7 @@
+ hip04_set_snoop_filter(cluster, 0);
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+ } else {
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ v7_exit_coherency_flush(louis);
+ }
+
+@@ -192,7 +192,7 @@
+ cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
+
+ count = TIMEOUT_MSEC / POLL_MSEC;
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+ for (tries = 0; tries < count; tries++) {
+ if (hip04_cpu_table[cluster][cpu]) {
+ ret = -EBUSY;
+@@ -202,10 +202,10 @@
+ data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
+ if (data & CORE_WFI_STATUS(cpu))
+ break;
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ /* Wait for clean L2 when the whole cluster is down. */
+ msleep(POLL_MSEC);
+- spin_lock_irq(&boot_lock);
++ raw_spin_lock_irq(&boot_lock);
+ }
+ if (tries >= count)
+ goto err;
+@@ -220,10 +220,10 @@
+ }
+ if (tries >= count)
+ goto err;
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ return 0;
+ err:
+- spin_unlock_irq(&boot_lock);
++ raw_spin_unlock_irq(&boot_lock);
+ return ret;
+ }
+
+@@ -235,10 +235,10 @@
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+ if (!hip04_cpu_table[cluster][cpu])
+ hip04_cpu_table[cluster][cpu] = 1;
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level)
+diff -Nur linux-3.18.8.orig/arch/arm/mach-omap2/omap-smp.c linux-3.18.8/arch/arm/mach-omap2/omap-smp.c
+--- linux-3.18.8.orig/arch/arm/mach-omap2/omap-smp.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/mach-omap2/omap-smp.c 2015-03-03 08:05:17.000000000 +0100
+@@ -43,7 +43,7 @@
+ /* SCU base address */
+ static void __iomem *scu_base;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __iomem *omap4_get_scu_base(void)
+ {
+@@ -74,8 +74,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -89,7 +89,7 @@
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Update the AuxCoreBoot0 with boot state for secondary core.
+@@ -166,7 +166,7 @@
+ * Now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return 0;
+ }
+diff -Nur linux-3.18.8.orig/arch/arm/mach-prima2/platsmp.c linux-3.18.8/arch/arm/mach-prima2/platsmp.c
+--- linux-3.18.8.orig/arch/arm/mach-prima2/platsmp.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/mach-prima2/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+@@ -23,7 +23,7 @@
+ static void __iomem *scu_base;
+ static void __iomem *rsc_base;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static struct map_desc scu_io_desc __initdata = {
+ .length = SZ_4K,
+@@ -56,8 +56,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static struct of_device_id rsc_ids[] = {
+@@ -95,7 +95,7 @@
+ /* make sure write buffer is drained */
+ mb();
+
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -127,7 +127,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-3.18.8.orig/arch/arm/mach-qcom/platsmp.c linux-3.18.8/arch/arm/mach-qcom/platsmp.c
+--- linux-3.18.8.orig/arch/arm/mach-qcom/platsmp.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/mach-qcom/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+@@ -46,7 +46,7 @@
+
+ extern void secondary_startup(void);
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ static void __ref qcom_cpu_die(unsigned int cpu)
+@@ -60,8 +60,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int scss_release_secondary(unsigned int cpu)
+@@ -284,7 +284,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Send the secondary CPU a soft interrupt, thereby causing
+@@ -297,7 +297,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return ret;
+ }
+diff -Nur linux-3.18.8.orig/arch/arm/mach-spear/platsmp.c linux-3.18.8/arch/arm/mach-spear/platsmp.c
+--- linux-3.18.8.orig/arch/arm/mach-spear/platsmp.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/mach-spear/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+@@ -32,7 +32,7 @@
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+
+@@ -47,8 +47,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -59,7 +59,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -84,7 +84,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-3.18.8.orig/arch/arm/mach-sti/platsmp.c linux-3.18.8/arch/arm/mach-sti/platsmp.c
+--- linux-3.18.8.orig/arch/arm/mach-sti/platsmp.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/mach-sti/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+@@ -34,7 +34,7 @@
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void sti_secondary_init(unsigned int cpu)
+ {
+@@ -49,8 +49,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -61,7 +61,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -92,7 +92,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-3.18.8.orig/arch/arm/mach-ux500/platsmp.c linux-3.18.8/arch/arm/mach-ux500/platsmp.c
+--- linux-3.18.8.orig/arch/arm/mach-ux500/platsmp.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/mach-ux500/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+@@ -51,7 +51,7 @@
+ return NULL;
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static void ux500_secondary_init(unsigned int cpu)
+ {
+@@ -64,8 +64,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -76,7 +76,7 @@
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -97,7 +97,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-3.18.8.orig/arch/arm/mm/fault.c linux-3.18.8/arch/arm/mm/fault.c
+--- linux-3.18.8.orig/arch/arm/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -277,7 +277,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ if (user_mode(regs))
+@@ -431,6 +431,9 @@
+ if (addr < TASK_SIZE)
+ return do_page_fault(addr, fsr, regs);
+
++ if (interrupts_enabled(regs))
++ local_irq_enable();
++
+ if (user_mode(regs))
+ goto bad_area;
+
+@@ -498,6 +501,9 @@
+ static int
+ do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ {
++ if (interrupts_enabled(regs))
++ local_irq_enable();
++
+ do_bad_area(addr, fsr, regs);
+ return 0;
+ }
+diff -Nur linux-3.18.8.orig/arch/arm/mm/highmem.c linux-3.18.8/arch/arm/mm/highmem.c
+--- linux-3.18.8.orig/arch/arm/mm/highmem.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/mm/highmem.c 2015-03-03 08:05:17.000000000 +0100
+@@ -53,6 +53,7 @@
+
+ void *kmap_atomic(struct page *page)
+ {
++ pte_t pte = mk_pte(page, kmap_prot);
+ unsigned int idx;
+ unsigned long vaddr;
+ void *kmap;
+@@ -91,7 +92,10 @@
+ * in place, so the contained TLB flush ensures the TLB is updated
+ * with the new mapping.
+ */
+- set_fixmap_pte(idx, mk_pte(page, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_fixmap_pte(idx, pte);
+
+ return (void *)vaddr;
+ }
+@@ -108,12 +112,15 @@
+
+ if (cache_is_vivt())
+ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = __pte(0);
++#endif
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(vaddr != __fix_to_virt(idx));
+- set_fixmap_pte(idx, __pte(0));
+ #else
+ (void) idx; /* to kill a warning */
+ #endif
++ set_fixmap_pte(idx, __pte(0));
+ kmap_atomic_idx_pop();
+ } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+ /* this address was obtained through kmap_high_get() */
+@@ -125,6 +132,7 @@
+
+ void *kmap_atomic_pfn(unsigned long pfn)
+ {
++ pte_t pte = pfn_pte(pfn, kmap_prot);
+ unsigned long vaddr;
+ int idx, type;
+ struct page *page = pfn_to_page(pfn);
+@@ -139,7 +147,10 @@
+ #ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(!pte_none(*(fixmap_page_table + idx)));
+ #endif
+- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_fixmap_pte(idx, pte);
+
+ return (void *)vaddr;
+ }
+@@ -153,3 +164,28 @@
+
+ return pte_page(get_fixmap_pte(vaddr));
+ }
++
++#if defined CONFIG_PREEMPT_RT_FULL
++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ int i;
++
++ /*
++ * Clear @prev's kmap_atomic mappings
++ */
++ for (i = 0; i < prev_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ set_fixmap_pte(idx, __pte(0));
++ }
++ /*
++ * Restore @next_p's kmap_atomic mappings
++ */
++ for (i = 0; i < next_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ if (!pte_none(next_p->kmap_pte[i]))
++ set_fixmap_pte(idx, next_p->kmap_pte[i]);
++ }
++}
++#endif
+diff -Nur linux-3.18.8.orig/arch/arm/plat-versatile/platsmp.c linux-3.18.8/arch/arm/plat-versatile/platsmp.c
+--- linux-3.18.8.orig/arch/arm/plat-versatile/platsmp.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm/plat-versatile/platsmp.c 2015-03-03 08:05:17.000000000 +0100
+@@ -30,7 +30,7 @@
+ sync_cache_w(&pen_release);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void versatile_secondary_init(unsigned int cpu)
+ {
+@@ -43,8 +43,8 @@
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -55,7 +55,7 @@
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * This is really belt and braces; we hold unintended secondary
+@@ -85,7 +85,7 @@
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+diff -Nur linux-3.18.8.orig/arch/arm64/kernel/smp.c linux-3.18.8/arch/arm64/kernel/smp.c
+--- linux-3.18.8.orig/arch/arm64/kernel/smp.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/arm64/kernel/smp.c 2015-03-03 08:05:17.000000000 +0100
+@@ -529,12 +529,14 @@
+ }
+
+ #ifdef CONFIG_IRQ_WORK
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void arch_irq_work_raise(void)
+ {
+ if (__smp_cross_call)
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+ }
+ #endif
++#endif
+
+ static DEFINE_RAW_SPINLOCK(stop_lock);
+
+diff -Nur linux-3.18.8.orig/arch/avr32/mm/fault.c linux-3.18.8/arch/avr32/mm/fault.c
+--- linux-3.18.8.orig/arch/avr32/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/avr32/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -81,7 +81,7 @@
+ * If we're in an interrupt or have no user context, we must
+ * not take the fault...
+ */
+- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
++ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled())
+ goto no_context;
+
+ local_irq_enable();
+diff -Nur linux-3.18.8.orig/arch/cris/mm/fault.c linux-3.18.8/arch/cris/mm/fault.c
+--- linux-3.18.8.orig/arch/cris/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/cris/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -113,7 +113,7 @@
+ * user context, we must not take the fault.
+ */
+
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ if (user_mode(regs))
+diff -Nur linux-3.18.8.orig/arch/frv/mm/fault.c linux-3.18.8/arch/frv/mm/fault.c
+--- linux-3.18.8.orig/arch/frv/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/frv/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -78,7 +78,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ if (user_mode(__frame))
+diff -Nur linux-3.18.8.orig/arch/ia64/mm/fault.c linux-3.18.8/arch/ia64/mm/fault.c
+--- linux-3.18.8.orig/arch/ia64/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/ia64/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -96,7 +96,7 @@
+ /*
+ * If we're in an interrupt or have no user context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ #ifdef CONFIG_VIRTUAL_MEM_MAP
+diff -Nur linux-3.18.8.orig/arch/Kconfig linux-3.18.8/arch/Kconfig
+--- linux-3.18.8.orig/arch/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/Kconfig 2015-03-03 08:05:17.000000000 +0100
+@@ -6,6 +6,7 @@
+ tristate "OProfile system profiling"
+ depends on PROFILING
+ depends on HAVE_OPROFILE
++ depends on !PREEMPT_RT_FULL
+ select RING_BUFFER
+ select RING_BUFFER_ALLOW_SWAP
+ help
+diff -Nur linux-3.18.8.orig/arch/m32r/mm/fault.c linux-3.18.8/arch/m32r/mm/fault.c
+--- linux-3.18.8.orig/arch/m32r/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/m32r/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -114,7 +114,7 @@
+ * If we're in an interrupt or have no user context or are running in an
+ * atomic region then we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
+
+ if (error_code & ACE_USERMODE)
+diff -Nur linux-3.18.8.orig/arch/m68k/mm/fault.c linux-3.18.8/arch/m68k/mm/fault.c
+--- linux-3.18.8.orig/arch/m68k/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/m68k/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -81,7 +81,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ if (user_mode(regs))
+diff -Nur linux-3.18.8.orig/arch/microblaze/mm/fault.c linux-3.18.8/arch/microblaze/mm/fault.c
+--- linux-3.18.8.orig/arch/microblaze/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/microblaze/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -107,7 +107,7 @@
+ if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
+ is_write = 0;
+
+- if (unlikely(in_atomic() || !mm)) {
++ if (unlikely(!mm || pagefault_disabled())) {
+ if (kernel_mode(regs))
+ goto bad_area_nosemaphore;
+
+diff -Nur linux-3.18.8.orig/arch/mips/Kconfig linux-3.18.8/arch/mips/Kconfig
+--- linux-3.18.8.orig/arch/mips/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/mips/Kconfig 2015-03-03 08:05:17.000000000 +0100
+@@ -2196,7 +2196,7 @@
+ #
+ config HIGHMEM
+ bool "High Memory Support"
+- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
+
+ config CPU_SUPPORTS_HIGHMEM
+ bool
+diff -Nur linux-3.18.8.orig/arch/mips/kernel/signal.c linux-3.18.8/arch/mips/kernel/signal.c
+--- linux-3.18.8.orig/arch/mips/kernel/signal.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/mips/kernel/signal.c 2015-03-03 08:05:17.000000000 +0100
+@@ -613,6 +613,7 @@
+ __u32 thread_info_flags)
+ {
+ local_irq_enable();
++ preempt_check_resched();
+
+ user_exit();
+
+diff -Nur linux-3.18.8.orig/arch/mips/mm/fault.c linux-3.18.8/arch/mips/mm/fault.c
+--- linux-3.18.8.orig/arch/mips/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/mips/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -89,7 +89,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
+
+ if (user_mode(regs))
+diff -Nur linux-3.18.8.orig/arch/mips/mm/init.c linux-3.18.8/arch/mips/mm/init.c
+--- linux-3.18.8.orig/arch/mips/mm/init.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/mips/mm/init.c 2015-03-03 08:05:17.000000000 +0100
+@@ -90,7 +90,7 @@
+
+ BUG_ON(Page_dcache_dirty(page));
+
+- pagefault_disable();
++ raw_pagefault_disable();
+ idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
+ idx += in_interrupt() ? FIX_N_COLOURS : 0;
+ vaddr = __fix_to_virt(FIX_CMAP_END - idx);
+@@ -146,7 +146,7 @@
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ local_irq_restore(flags);
+- pagefault_enable();
++ raw_pagefault_enable();
+ }
+
+ void copy_user_highpage(struct page *to, struct page *from,
+diff -Nur linux-3.18.8.orig/arch/mn10300/mm/fault.c linux-3.18.8/arch/mn10300/mm/fault.c
+--- linux-3.18.8.orig/arch/mn10300/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/mn10300/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -168,7 +168,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+diff -Nur linux-3.18.8.orig/arch/parisc/mm/fault.c linux-3.18.8/arch/parisc/mm/fault.c
+--- linux-3.18.8.orig/arch/parisc/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/parisc/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -207,7 +207,7 @@
+ int fault;
+ unsigned int flags;
+
+- if (in_atomic())
++ if (pagefault_disabled())
+ goto no_context;
+
+ tsk = current;
+diff -Nur linux-3.18.8.orig/arch/powerpc/include/asm/thread_info.h linux-3.18.8/arch/powerpc/include/asm/thread_info.h
+--- linux-3.18.8.orig/arch/powerpc/include/asm/thread_info.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/powerpc/include/asm/thread_info.h 2015-03-03 08:05:17.000000000 +0100
+@@ -43,6 +43,8 @@
+ int cpu; /* cpu we're on */
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
++ int preempt_lazy_count; /* 0 => preemptable,
++ <0 => BUG */
+ struct restart_block restart_block;
+ unsigned long local_flags; /* private flags for thread */
+
+@@ -88,8 +90,7 @@
+ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+ #define TIF_SIGPENDING 1 /* signal pending */
+ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+-#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
+- TIF_NEED_RESCHED */
++#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */
+ #define TIF_32BIT 4 /* 32 bit binary */
+ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+@@ -107,6 +108,8 @@
+ #if defined(CONFIG_PPC64)
+ #define TIF_ELF2ABI 18 /* function descriptors must die! */
+ #endif
++#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling
++ TIF_NEED_RESCHED */
+
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+@@ -125,14 +128,16 @@
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
+ #define _TIF_NOHZ (1<<TIF_NOHZ)
++#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
+ #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_NOHZ)
+
+ #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+- _TIF_RESTORE_TM)
++ _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY)
+ #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+
+ /* Bits in local_flags */
+ /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+diff -Nur linux-3.18.8.orig/arch/powerpc/Kconfig linux-3.18.8/arch/powerpc/Kconfig
+--- linux-3.18.8.orig/arch/powerpc/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/powerpc/Kconfig 2015-03-03 08:05:17.000000000 +0100
+@@ -60,10 +60,11 @@
+
+ config RWSEM_GENERIC_SPINLOCK
+ bool
++ default y if PREEMPT_RT_FULL
+
+ config RWSEM_XCHGADD_ALGORITHM
+ bool
+- default y
++ default y if !PREEMPT_RT_FULL
+
+ config GENERIC_LOCKBREAK
+ bool
+@@ -136,6 +137,7 @@
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
++ select HAVE_PREEMPT_LAZY
+ select HAVE_MOD_ARCH_SPECIFIC
+ select MODULES_USE_ELF_RELA
+ select CLONE_BACKWARDS
+@@ -303,7 +305,7 @@
+
+ config HIGHMEM
+ bool "High memory support"
+- depends on PPC32
++ depends on PPC32 && !PREEMPT_RT_FULL
+
+ source kernel/Kconfig.hz
+ source kernel/Kconfig.preempt
+diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/asm-offsets.c linux-3.18.8/arch/powerpc/kernel/asm-offsets.c
+--- linux-3.18.8.orig/arch/powerpc/kernel/asm-offsets.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/powerpc/kernel/asm-offsets.c 2015-03-03 08:05:17.000000000 +0100
+@@ -159,6 +159,7 @@
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+
+diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/entry_32.S linux-3.18.8/arch/powerpc/kernel/entry_32.S
+--- linux-3.18.8.orig/arch/powerpc/kernel/entry_32.S 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/powerpc/kernel/entry_32.S 2015-03-03 08:05:17.000000000 +0100
+@@ -890,7 +890,14 @@
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
+ bne restore
+ andi. r8,r8,_TIF_NEED_RESCHED
++ bne+ 1f
++ lwz r0,TI_PREEMPT_LAZY(r9)
++ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
++ bne restore
++ lwz r0,TI_FLAGS(r9)
++ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
+ beq+ restore
++1:
+ lwz r3,_MSR(r1)
+ andi. r0,r3,MSR_EE /* interrupts off? */
+ beq restore /* don't schedule if so */
+@@ -901,11 +908,11 @@
+ */
+ bl trace_hardirqs_off
+ #endif
+-1: bl preempt_schedule_irq
++2: bl preempt_schedule_irq
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r3,TI_FLAGS(r9)
+- andi. r0,r3,_TIF_NEED_RESCHED
+- bne- 1b
++ andi. r0,r3,_TIF_NEED_RESCHED_MASK
++ bne- 2b
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ /* And now, to properly rebalance the above, we tell lockdep they
+ * are being turned back on, which will happen when we return
+@@ -1226,7 +1233,7 @@
+ #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
+
+ do_work: /* r10 contains MSR_KERNEL here */
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ beq do_user_signal
+
+ do_resched: /* r10 contains MSR_KERNEL here */
+@@ -1247,7 +1254,7 @@
+ MTMSRD(r10) /* disable interrupts */
+ CURRENT_THREAD_INFO(r9, r1)
+ lwz r9,TI_FLAGS(r9)
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ bne- do_resched
+ andi. r0,r9,_TIF_USER_WORK_MASK
+ beq restore_user
+diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/entry_64.S linux-3.18.8/arch/powerpc/kernel/entry_64.S
+--- linux-3.18.8.orig/arch/powerpc/kernel/entry_64.S 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/powerpc/kernel/entry_64.S 2015-03-03 08:05:17.000000000 +0100
+@@ -644,7 +644,7 @@
+ #else
+ beq restore
+ #endif
+-1: andi. r0,r4,_TIF_NEED_RESCHED
++1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ beq 2f
+ bl restore_interrupts
+ SCHEDULE_USER
+@@ -706,10 +706,18 @@
+
+ #ifdef CONFIG_PREEMPT
+ /* Check if we need to preempt */
++ lwz r8,TI_PREEMPT(r9)
++ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
++ bne restore
+ andi. r0,r4,_TIF_NEED_RESCHED
++ bne+ check_count
++
++ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
+ beq+ restore
++ lwz r8,TI_PREEMPT_LAZY(r9)
++
+ /* Check that preempt_count() == 0 and interrupts are enabled */
+- lwz r8,TI_PREEMPT(r9)
++check_count:
+ cmpwi cr1,r8,0
+ ld r0,SOFTE(r1)
+ cmpdi r0,0
+@@ -726,7 +734,7 @@
+ /* Re-test flags and eventually loop */
+ CURRENT_THREAD_INFO(r9, r1)
+ ld r4,TI_FLAGS(r9)
+- andi. r0,r4,_TIF_NEED_RESCHED
++ andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ bne 1b
+
+ /*
+diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/irq.c linux-3.18.8/arch/powerpc/kernel/irq.c
+--- linux-3.18.8.orig/arch/powerpc/kernel/irq.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/powerpc/kernel/irq.c 2015-03-03 08:05:17.000000000 +0100
+@@ -615,6 +615,7 @@
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+ struct thread_info *curtp, *irqtp;
+@@ -632,6 +633,7 @@
+ if (irqtp->flags)
+ set_bits(irqtp->flags, &curtp->flags);
+ }
++#endif
+
+ irq_hw_number_t virq_to_hw(unsigned int virq)
+ {
+diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/misc_32.S linux-3.18.8/arch/powerpc/kernel/misc_32.S
+--- linux-3.18.8.orig/arch/powerpc/kernel/misc_32.S 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/powerpc/kernel/misc_32.S 2015-03-03 08:05:17.000000000 +0100
+@@ -40,6 +40,7 @@
+ * We store the saved ksp_limit in the unused part
+ * of the STACK_FRAME_OVERHEAD
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+ mflr r0
+ stw r0,4(r1)
+@@ -56,6 +57,7 @@
+ stw r10,THREAD+KSP_LIMIT(r2)
+ mtlr r0
+ blr
++#endif
+
+ /*
+ * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
+diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/misc_64.S linux-3.18.8/arch/powerpc/kernel/misc_64.S
+--- linux-3.18.8.orig/arch/powerpc/kernel/misc_64.S 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/powerpc/kernel/misc_64.S 2015-03-03 08:05:17.000000000 +0100
+@@ -29,6 +29,7 @@
+
+ .text
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+ mflr r0
+ std r0,16(r1)
+@@ -39,6 +40,7 @@
+ ld r0,16(r1)
+ mtlr r0
+ blr
++#endif
+
+ _GLOBAL(call_do_irq)
+ mflr r0
+diff -Nur linux-3.18.8.orig/arch/powerpc/kernel/time.c linux-3.18.8/arch/powerpc/kernel/time.c
+--- linux-3.18.8.orig/arch/powerpc/kernel/time.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/powerpc/kernel/time.c 2015-03-03 08:05:17.000000000 +0100
+@@ -424,7 +424,7 @@
+ EXPORT_SYMBOL(profile_pc);
+ #endif
+
+-#ifdef CONFIG_IRQ_WORK
++#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
+
+ /*
+ * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
+diff -Nur linux-3.18.8.orig/arch/powerpc/mm/fault.c linux-3.18.8/arch/powerpc/mm/fault.c
+--- linux-3.18.8.orig/arch/powerpc/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/powerpc/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -273,7 +273,7 @@
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
+- if (in_atomic() || mm == NULL) {
++ if (in_atomic() || mm == NULL || pagefault_disabled()) {
+ if (!user_mode(regs)) {
+ rc = SIGSEGV;
+ goto bail;
+diff -Nur linux-3.18.8.orig/arch/s390/mm/fault.c linux-3.18.8/arch/s390/mm/fault.c
+--- linux-3.18.8.orig/arch/s390/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/s390/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -435,7 +435,8 @@
+ * user context.
+ */
+ fault = VM_FAULT_BADCONTEXT;
+- if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
++ if (unlikely(!user_space_fault(regs) || !mm ||
++ tsk->pagefault_disabled))
+ goto out;
+
+ address = trans_exc_code & __FAIL_ADDR_MASK;
+diff -Nur linux-3.18.8.orig/arch/score/mm/fault.c linux-3.18.8/arch/score/mm/fault.c
+--- linux-3.18.8.orig/arch/score/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/score/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -73,7 +73,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
+
+ if (user_mode(regs))
+diff -Nur linux-3.18.8.orig/arch/sh/kernel/irq.c linux-3.18.8/arch/sh/kernel/irq.c
+--- linux-3.18.8.orig/arch/sh/kernel/irq.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/sh/kernel/irq.c 2015-03-03 08:05:17.000000000 +0100
+@@ -149,6 +149,7 @@
+ hardirq_ctx[cpu] = NULL;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+ struct thread_info *curctx;
+@@ -176,6 +177,7 @@
+ "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
+ );
+ }
++#endif
+ #else
+ static inline void handle_one_irq(unsigned int irq)
+ {
+diff -Nur linux-3.18.8.orig/arch/sh/mm/fault.c linux-3.18.8/arch/sh/mm/fault.c
+--- linux-3.18.8.orig/arch/sh/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/sh/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -440,7 +440,7 @@
+ * If we're in an interrupt, have no user context or are running
+ * in an atomic region then we must not take the fault:
+ */
+- if (unlikely(in_atomic() || !mm)) {
++ if (unlikely(!mm || pagefault_disabled())) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+diff -Nur linux-3.18.8.orig/arch/sparc/Kconfig linux-3.18.8/arch/sparc/Kconfig
+--- linux-3.18.8.orig/arch/sparc/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/sparc/Kconfig 2015-03-03 08:05:17.000000000 +0100
+@@ -182,12 +182,10 @@
+ source kernel/Kconfig.hz
+
+ config RWSEM_GENERIC_SPINLOCK
+- bool
+- default y if SPARC32
++ def_bool PREEMPT_RT_FULL
+
+ config RWSEM_XCHGADD_ALGORITHM
+- bool
+- default y if SPARC64
++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+
+ config GENERIC_HWEIGHT
+ bool
+@@ -528,6 +526,10 @@
+
+ source "fs/Kconfig.binfmt"
+
++config EARLY_PRINTK
++ bool
++ default y
++
+ config COMPAT
+ bool
+ depends on SPARC64
+diff -Nur linux-3.18.8.orig/arch/sparc/kernel/irq_64.c linux-3.18.8/arch/sparc/kernel/irq_64.c
+--- linux-3.18.8.orig/arch/sparc/kernel/irq_64.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/sparc/kernel/irq_64.c 2015-03-03 08:05:17.000000000 +0100
+@@ -849,6 +849,7 @@
+ set_irq_regs(old_regs);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+ void *orig_sp, *sp = softirq_stack[smp_processor_id()];
+@@ -863,6 +864,7 @@
+ __asm__ __volatile__("mov %0, %%sp"
+ : : "r" (orig_sp));
+ }
++#endif
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ void fixup_irqs(void)
+diff -Nur linux-3.18.8.orig/arch/sparc/kernel/pcr.c linux-3.18.8/arch/sparc/kernel/pcr.c
+--- linux-3.18.8.orig/arch/sparc/kernel/pcr.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/sparc/kernel/pcr.c 2015-03-03 08:05:17.000000000 +0100
+@@ -43,10 +43,12 @@
+ set_irq_regs(old_regs);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void arch_irq_work_raise(void)
+ {
+ set_softint(1 << PIL_DEFERRED_PCR_WORK);
+ }
++#endif
+
+ const struct pcr_ops *pcr_ops;
+ EXPORT_SYMBOL_GPL(pcr_ops);
+diff -Nur linux-3.18.8.orig/arch/sparc/kernel/setup_32.c linux-3.18.8/arch/sparc/kernel/setup_32.c
+--- linux-3.18.8.orig/arch/sparc/kernel/setup_32.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/sparc/kernel/setup_32.c 2015-03-03 08:05:17.000000000 +0100
+@@ -309,6 +309,7 @@
+
+ boot_flags_init(*cmdline_p);
+
++ early_console = &prom_early_console;
+ register_console(&prom_early_console);
+
+ printk("ARCH: ");
+diff -Nur linux-3.18.8.orig/arch/sparc/kernel/setup_64.c linux-3.18.8/arch/sparc/kernel/setup_64.c
+--- linux-3.18.8.orig/arch/sparc/kernel/setup_64.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/sparc/kernel/setup_64.c 2015-03-03 08:05:17.000000000 +0100
+@@ -563,6 +563,12 @@
+ pause_patch();
+ }
+
++static inline void register_prom_console(void)
++{
++ early_console = &prom_early_console;
++ register_console(&prom_early_console);
++}
++
+ void __init setup_arch(char **cmdline_p)
+ {
+ /* Initialize PROM console and command line. */
+@@ -574,7 +580,7 @@
+ #ifdef CONFIG_EARLYFB
+ if (btext_find_display())
+ #endif
+- register_console(&prom_early_console);
++ register_prom_console();
+
+ if (tlb_type == hypervisor)
+ printk("ARCH: SUN4V\n");
+diff -Nur linux-3.18.8.orig/arch/sparc/mm/fault_32.c linux-3.18.8/arch/sparc/mm/fault_32.c
+--- linux-3.18.8.orig/arch/sparc/mm/fault_32.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/sparc/mm/fault_32.c 2015-03-03 08:05:17.000000000 +0100
+@@ -196,7 +196,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+diff -Nur linux-3.18.8.orig/arch/sparc/mm/fault_64.c linux-3.18.8/arch/sparc/mm/fault_64.c
+--- linux-3.18.8.orig/arch/sparc/mm/fault_64.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/sparc/mm/fault_64.c 2015-03-03 08:05:17.000000000 +0100
+@@ -330,7 +330,7 @@
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (!mm || pagefault_disabled())
+ goto intr_or_no_mm;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+diff -Nur linux-3.18.8.orig/arch/tile/mm/fault.c linux-3.18.8/arch/tile/mm/fault.c
+--- linux-3.18.8.orig/arch/tile/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/tile/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -357,7 +357,7 @@
+ * If we're in an interrupt, have no user context or are running in an
+ * atomic region then we must not take the fault.
+ */
+- if (in_atomic() || !mm) {
++ if (!mm || pagefault_disabled()) {
+ vma = NULL; /* happy compiler */
+ goto bad_area_nosemaphore;
+ }
+diff -Nur linux-3.18.8.orig/arch/um/kernel/trap.c linux-3.18.8/arch/um/kernel/trap.c
+--- linux-3.18.8.orig/arch/um/kernel/trap.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/um/kernel/trap.c 2015-03-03 08:05:17.000000000 +0100
+@@ -38,7 +38,7 @@
+ * If the fault was during atomic operation, don't take the fault, just
+ * fail.
+ */
+- if (in_atomic())
++ if (pagefault_disabled())
+ goto out_nosemaphore;
+
+ if (is_user)
+diff -Nur linux-3.18.8.orig/arch/x86/crypto/aesni-intel_glue.c linux-3.18.8/arch/x86/crypto/aesni-intel_glue.c
+--- linux-3.18.8.orig/arch/x86/crypto/aesni-intel_glue.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/crypto/aesni-intel_glue.c 2015-03-03 08:05:17.000000000 +0100
+@@ -381,14 +381,14 @@
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+- nbytes & AES_BLOCK_MASK);
++ nbytes & AES_BLOCK_MASK);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -405,14 +405,14 @@
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -429,14 +429,14 @@
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -453,14 +453,14 @@
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -512,18 +512,20 @@
+ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
++ kernel_fpu_begin();
+ aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+ if (walk.nbytes) {
++ kernel_fpu_begin();
+ ctr_crypt_final(ctx, &walk);
++ kernel_fpu_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+diff -Nur linux-3.18.8.orig/arch/x86/crypto/cast5_avx_glue.c linux-3.18.8/arch/x86/crypto/cast5_avx_glue.c
+--- linux-3.18.8.orig/arch/x86/crypto/cast5_avx_glue.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/crypto/cast5_avx_glue.c 2015-03-03 08:05:17.000000000 +0100
+@@ -60,7 +60,7 @@
+ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+ bool enc)
+ {
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ const unsigned int bsize = CAST5_BLOCK_SIZE;
+ unsigned int nbytes;
+@@ -76,7 +76,7 @@
+ u8 *wsrc = walk->src.virt.addr;
+ u8 *wdst = walk->dst.virt.addr;
+
+- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
++ fpu_enabled = cast5_fpu_begin(false, nbytes);
+
+ /* Process multi-block batch */
+ if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
+@@ -104,10 +104,9 @@
+ } while (nbytes >= bsize);
+
+ done:
++ cast5_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, walk, nbytes);
+ }
+-
+- cast5_fpu_end(fpu_enabled);
+ return err;
+ }
+
+@@ -228,7 +227,7 @@
+ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+ {
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -237,12 +236,11 @@
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ while ((nbytes = walk.nbytes)) {
+- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
++ fpu_enabled = cast5_fpu_begin(false, nbytes);
+ nbytes = __cbc_decrypt(desc, &walk);
++ cast5_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+-
+- cast5_fpu_end(fpu_enabled);
+ return err;
+ }
+
+@@ -312,7 +310,7 @@
+ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes)
+ {
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -321,13 +319,12 @@
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
+- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
++ fpu_enabled = cast5_fpu_begin(false, nbytes);
+ nbytes = __ctr_crypt(desc, &walk);
++ cast5_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+- cast5_fpu_end(fpu_enabled);
+-
+ if (walk.nbytes) {
+ ctr_crypt_final(desc, &walk);
+ err = blkcipher_walk_done(desc, &walk, 0);
+diff -Nur linux-3.18.8.orig/arch/x86/crypto/glue_helper.c linux-3.18.8/arch/x86/crypto/glue_helper.c
+--- linux-3.18.8.orig/arch/x86/crypto/glue_helper.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/crypto/glue_helper.c 2015-03-03 08:05:17.000000000 +0100
+@@ -39,7 +39,7 @@
+ void *ctx = crypto_blkcipher_ctx(desc->tfm);
+ const unsigned int bsize = 128 / 8;
+ unsigned int nbytes, i, func_bytes;
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ int err;
+
+ err = blkcipher_walk_virt(desc, walk);
+@@ -49,7 +49,7 @@
+ u8 *wdst = walk->dst.virt.addr;
+
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+- desc, fpu_enabled, nbytes);
++ desc, false, nbytes);
+
+ for (i = 0; i < gctx->num_funcs; i++) {
+ func_bytes = bsize * gctx->funcs[i].num_blocks;
+@@ -71,10 +71,10 @@
+ }
+
+ done:
++ glue_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, walk, nbytes);
+ }
+
+- glue_fpu_end(fpu_enabled);
+ return err;
+ }
+
+@@ -194,7 +194,7 @@
+ struct scatterlist *src, unsigned int nbytes)
+ {
+ const unsigned int bsize = 128 / 8;
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -203,12 +203,12 @@
+
+ while ((nbytes = walk.nbytes)) {
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+- desc, fpu_enabled, nbytes);
++ desc, false, nbytes);
+ nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
++ glue_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+- glue_fpu_end(fpu_enabled);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
+@@ -278,7 +278,7 @@
+ struct scatterlist *src, unsigned int nbytes)
+ {
+ const unsigned int bsize = 128 / 8;
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -287,13 +287,12 @@
+
+ while ((nbytes = walk.nbytes) >= bsize) {
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+- desc, fpu_enabled, nbytes);
++ desc, false, nbytes);
+ nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
++ glue_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+- glue_fpu_end(fpu_enabled);
+-
+ if (walk.nbytes) {
+ glue_ctr_crypt_final_128bit(
+ gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
+@@ -348,7 +347,7 @@
+ void *tweak_ctx, void *crypt_ctx)
+ {
+ const unsigned int bsize = 128 / 8;
+- bool fpu_enabled = false;
++ bool fpu_enabled;
+ struct blkcipher_walk walk;
+ int err;
+
+@@ -361,21 +360,21 @@
+
+ /* set minimum length to bsize, for tweak_fn */
+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+- desc, fpu_enabled,
++ desc, false,
+ nbytes < bsize ? bsize : nbytes);
+-
+ /* calculate first value of T */
+ tweak_fn(tweak_ctx, walk.iv, walk.iv);
++ glue_fpu_end(fpu_enabled);
+
+ while (nbytes) {
++ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
++ desc, false, nbytes);
+ nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
+
++ glue_fpu_end(fpu_enabled);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ nbytes = walk.nbytes;
+ }
+-
+- glue_fpu_end(fpu_enabled);
+-
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
+diff -Nur linux-3.18.8.orig/arch/x86/include/asm/preempt.h linux-3.18.8/arch/x86/include/asm/preempt.h
+--- linux-3.18.8.orig/arch/x86/include/asm/preempt.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/include/asm/preempt.h 2015-03-03 08:05:17.000000000 +0100
+@@ -85,17 +85,33 @@
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
+-static __always_inline bool __preempt_count_dec_and_test(void)
++static __always_inline bool ____preempt_count_dec_and_test(void)
+ {
+ GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
+ }
+
++static __always_inline bool __preempt_count_dec_and_test(void)
++{
++ if (____preempt_count_dec_and_test())
++ return true;
++#ifdef CONFIG_PREEMPT_LAZY
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
++ return false;
++#endif
++}
++
+ /*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+ static __always_inline bool should_resched(void)
+ {
++#ifdef CONFIG_PREEMPT_LAZY
++ return unlikely(!raw_cpu_read_4(__preempt_count) || \
++ test_thread_flag(TIF_NEED_RESCHED_LAZY));
++#else
+ return unlikely(!raw_cpu_read_4(__preempt_count));
++#endif
+ }
+
+ #ifdef CONFIG_PREEMPT
+diff -Nur linux-3.18.8.orig/arch/x86/include/asm/signal.h linux-3.18.8/arch/x86/include/asm/signal.h
+--- linux-3.18.8.orig/arch/x86/include/asm/signal.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/include/asm/signal.h 2015-03-03 08:05:17.000000000 +0100
+@@ -23,6 +23,19 @@
+ unsigned long sig[_NSIG_WORDS];
+ } sigset_t;
+
++/*
++ * Because some traps use the IST stack, we must keep preemption
++ * disabled while calling do_trap(), but do_trap() may call
++ * force_sig_info() which will grab the signal spin_locks for the
++ * task, which in PREEMPT_RT_FULL are mutexes. By defining
++ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
++ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
++ * trap.
++ */
++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64)
++#define ARCH_RT_DELAYS_SIGNAL_SEND
++#endif
++
+ #ifndef CONFIG_COMPAT
+ typedef sigset_t compat_sigset_t;
+ #endif
+diff -Nur linux-3.18.8.orig/arch/x86/include/asm/stackprotector.h linux-3.18.8/arch/x86/include/asm/stackprotector.h
+--- linux-3.18.8.orig/arch/x86/include/asm/stackprotector.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/include/asm/stackprotector.h 2015-03-03 08:05:17.000000000 +0100
+@@ -57,7 +57,7 @@
+ */
+ static __always_inline void boot_init_stack_canary(void)
+ {
+- u64 canary;
++ u64 uninitialized_var(canary);
+ u64 tsc;
+
+ #ifdef CONFIG_X86_64
+@@ -68,8 +68,16 @@
+ * of randomness. The TSC only matters for very early init,
+ * there it already has some randomness on most systems. Later
+ * on during the bootup the random pool has true entropy too.
++ *
++ * For preempt-rt we need to weaken the randomness a bit, as
++ * we can't call into the random generator from atomic context
++ * due to locking constraints. We just leave canary
++ * uninitialized and use the TSC based randomness on top of
++ * it.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ get_random_bytes(&canary, sizeof(canary));
++#endif
+ tsc = __native_read_tsc();
+ canary += tsc + (tsc << 32UL);
+
+diff -Nur linux-3.18.8.orig/arch/x86/include/asm/thread_info.h linux-3.18.8/arch/x86/include/asm/thread_info.h
+--- linux-3.18.8.orig/arch/x86/include/asm/thread_info.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/include/asm/thread_info.h 2015-03-03 08:05:17.000000000 +0100
+@@ -30,6 +30,8 @@
+ __u32 status; /* thread synchronous flags */
+ __u32 cpu; /* current CPU */
+ int saved_preempt_count;
++ int preempt_lazy_count; /* 0 => lazy preemptable
++ <0 => BUG */
+ mm_segment_t addr_limit;
+ struct restart_block restart_block;
+ void __user *sysenter_return;
+@@ -75,6 +77,7 @@
+ #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SECCOMP 8 /* secure computing */
++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
+ #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
+ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
+ #define TIF_UPROBE 12 /* breakpointed or singlestepping */
+@@ -100,6 +103,7 @@
+ #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY)
+ #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
+ #define _TIF_UPROBE (1 << TIF_UPROBE)
+@@ -150,6 +154,8 @@
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
++
+ #define STACK_WARN (THREAD_SIZE/8)
+ #define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
+
+diff -Nur linux-3.18.8.orig/arch/x86/include/asm/uv/uv_bau.h linux-3.18.8/arch/x86/include/asm/uv/uv_bau.h
+--- linux-3.18.8.orig/arch/x86/include/asm/uv/uv_bau.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/include/asm/uv/uv_bau.h 2015-03-03 08:05:17.000000000 +0100
+@@ -615,9 +615,9 @@
+ cycles_t send_message;
+ cycles_t period_end;
+ cycles_t period_time;
+- spinlock_t uvhub_lock;
+- spinlock_t queue_lock;
+- spinlock_t disable_lock;
++ raw_spinlock_t uvhub_lock;
++ raw_spinlock_t queue_lock;
++ raw_spinlock_t disable_lock;
+ /* tunables */
+ int max_concurr;
+ int max_concurr_const;
+@@ -776,15 +776,15 @@
+ * to be lowered below the current 'v'. atomic_add_unless can only stop
+ * on equal.
+ */
+-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
++static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u)
+ {
+- spin_lock(lock);
++ raw_spin_lock(lock);
+ if (atomic_read(v) >= u) {
+- spin_unlock(lock);
++ raw_spin_unlock(lock);
+ return 0;
+ }
+ atomic_inc(v);
+- spin_unlock(lock);
++ raw_spin_unlock(lock);
+ return 1;
+ }
+
+diff -Nur linux-3.18.8.orig/arch/x86/include/asm/uv/uv_hub.h linux-3.18.8/arch/x86/include/asm/uv/uv_hub.h
+--- linux-3.18.8.orig/arch/x86/include/asm/uv/uv_hub.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/include/asm/uv/uv_hub.h 2015-03-03 08:05:17.000000000 +0100
+@@ -492,7 +492,7 @@
+ unsigned short nr_online_cpus;
+ unsigned short pnode;
+ short memory_nid;
+- spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */
++ raw_spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */
+ unsigned long nmi_count; /* obsolete, see uv_hub_nmi */
+ };
+ extern struct uv_blade_info *uv_blade_info;
+diff -Nur linux-3.18.8.orig/arch/x86/Kconfig linux-3.18.8/arch/x86/Kconfig
+--- linux-3.18.8.orig/arch/x86/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/Kconfig 2015-03-03 08:05:17.000000000 +0100
+@@ -21,6 +21,7 @@
+ ### Arch settings
+ config X86
+ def_bool y
++ select HAVE_PREEMPT_LAZY if X86_32
+ select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
+ select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ select ARCH_HAS_FAST_MULTIPLIER
+@@ -197,8 +198,11 @@
+ def_bool y
+ depends on ISA_DMA_API
+
++config RWSEM_GENERIC_SPINLOCK
++ def_bool PREEMPT_RT_FULL
++
+ config RWSEM_XCHGADD_ALGORITHM
+- def_bool y
++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+
+ config GENERIC_CALIBRATE_DELAY
+ def_bool y
+@@ -811,7 +815,7 @@
+ config MAXSMP
+ bool "Enable Maximum number of SMP Processors and NUMA Nodes"
+ depends on X86_64 && SMP && DEBUG_KERNEL
+- select CPUMASK_OFFSTACK
++ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
+ ---help---
+ Enable maximum number of CPUS and NUMA Nodes for this architecture.
+ If unsure, say N.
+diff -Nur linux-3.18.8.orig/arch/x86/kernel/apic/io_apic.c linux-3.18.8/arch/x86/kernel/apic/io_apic.c
+--- linux-3.18.8.orig/arch/x86/kernel/apic/io_apic.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/kernel/apic/io_apic.c 2015-03-03 08:05:17.000000000 +0100
+@@ -2494,7 +2494,8 @@
+ static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
+ {
+ /* If we are moving the irq we need to mask it */
+- if (unlikely(irqd_is_setaffinity_pending(data))) {
++ if (unlikely(irqd_is_setaffinity_pending(data) &&
++ !irqd_irq_inprogress(data))) {
+ mask_ioapic(cfg);
+ return true;
+ }
+diff -Nur linux-3.18.8.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-3.18.8/arch/x86/kernel/apic/x2apic_uv_x.c
+--- linux-3.18.8.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/kernel/apic/x2apic_uv_x.c 2015-03-03 08:05:17.000000000 +0100
+@@ -918,7 +918,7 @@
+ uv_blade_info[blade].pnode = pnode;
+ uv_blade_info[blade].nr_possible_cpus = 0;
+ uv_blade_info[blade].nr_online_cpus = 0;
+- spin_lock_init(&uv_blade_info[blade].nmi_lock);
++ raw_spin_lock_init(&uv_blade_info[blade].nmi_lock);
+ min_pnode = min(pnode, min_pnode);
+ max_pnode = max(pnode, max_pnode);
+ blade++;
+diff -Nur linux-3.18.8.orig/arch/x86/kernel/asm-offsets.c linux-3.18.8/arch/x86/kernel/asm-offsets.c
+--- linux-3.18.8.orig/arch/x86/kernel/asm-offsets.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/kernel/asm-offsets.c 2015-03-03 08:05:17.000000000 +0100
+@@ -32,6 +32,7 @@
+ OFFSET(TI_flags, thread_info, flags);
+ OFFSET(TI_status, thread_info, status);
+ OFFSET(TI_addr_limit, thread_info, addr_limit);
++ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
+
+ BLANK();
+ OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+@@ -71,4 +72,5 @@
+
+ BLANK();
+ DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
++ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
+ }
+diff -Nur linux-3.18.8.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-3.18.8/arch/x86/kernel/cpu/mcheck/mce.c
+--- linux-3.18.8.orig/arch/x86/kernel/cpu/mcheck/mce.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/kernel/cpu/mcheck/mce.c 2015-03-03 08:05:17.000000000 +0100
+@@ -18,6 +18,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/kobject.h>
+ #include <linux/uaccess.h>
++#include <linux/kthread.h>
+ #include <linux/kdebug.h>
+ #include <linux/kernel.h>
+ #include <linux/percpu.h>
+@@ -41,6 +42,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/irq_work.h>
+ #include <linux/export.h>
++#include <linux/jiffies.h>
+
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+@@ -1266,7 +1268,7 @@
+ static unsigned long check_interval = 5 * 60; /* 5 minutes */
+
+ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
+-static DEFINE_PER_CPU(struct timer_list, mce_timer);
++static DEFINE_PER_CPU(struct hrtimer, mce_timer);
+
+ static unsigned long mce_adjust_timer_default(unsigned long interval)
+ {
+@@ -1283,14 +1285,11 @@
+ return test_and_clear_bit(0, v);
+ }
+
+-static void mce_timer_fn(unsigned long data)
++static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
+ unsigned long iv;
+ int notify;
+
+- WARN_ON(smp_processor_id() != data);
+-
+ if (mce_available(this_cpu_ptr(&cpu_info))) {
+ machine_check_poll(MCP_TIMESTAMP,
+ this_cpu_ptr(&mce_poll_banks));
+@@ -1313,9 +1312,11 @@
+ __this_cpu_write(mce_next_interval, iv);
+ /* Might have become 0 after CMCI storm subsided */
+ if (iv) {
+- t->expires = jiffies + iv;
+- add_timer_on(t, smp_processor_id());
++ hrtimer_forward_now(timer, ns_to_ktime(
++ jiffies_to_usecs(iv) * 1000ULL));
++ return HRTIMER_RESTART;
+ }
++ return HRTIMER_NORESTART;
+ }
+
+ /*
+@@ -1323,28 +1324,37 @@
+ */
+ void mce_timer_kick(unsigned long interval)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
+- unsigned long when = jiffies + interval;
++ struct hrtimer *t = this_cpu_ptr(&mce_timer);
+ unsigned long iv = __this_cpu_read(mce_next_interval);
+
+- if (timer_pending(t)) {
+- if (time_before(when, t->expires))
+- mod_timer_pinned(t, when);
++ if (hrtimer_active(t)) {
++ s64 exp;
++ s64 intv_us;
++
++ intv_us = jiffies_to_usecs(interval);
++ exp = ktime_to_us(hrtimer_expires_remaining(t));
++ if (intv_us < exp) {
++ hrtimer_cancel(t);
++ hrtimer_start_range_ns(t,
++ ns_to_ktime(intv_us * 1000),
++ 0, HRTIMER_MODE_REL_PINNED);
++ }
+ } else {
+- t->expires = round_jiffies(when);
+- add_timer_on(t, smp_processor_id());
++ hrtimer_start_range_ns(t,
++ ns_to_ktime(jiffies_to_usecs(interval) * 1000ULL),
++ 0, HRTIMER_MODE_REL_PINNED);
+ }
+ if (interval < iv)
+ __this_cpu_write(mce_next_interval, interval);
+ }
+
+-/* Must not be called in IRQ context where del_timer_sync() can deadlock */
++/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */
+ static void mce_timer_delete_all(void)
+ {
+ int cpu;
+
+ for_each_online_cpu(cpu)
+- del_timer_sync(&per_cpu(mce_timer, cpu));
++ hrtimer_cancel(&per_cpu(mce_timer, cpu));
+ }
+
+ static void mce_do_trigger(struct work_struct *work)
+@@ -1354,6 +1364,68 @@
+
+ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
+
++static void __mce_notify_work(void)
++{
++ /* Not more than two messages every minute */
++ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
++
++ /* wake processes polling /dev/mcelog */
++ wake_up_interruptible(&mce_chrdev_wait);
++
++ /*
++ * There is no risk of missing notifications because
++ * work_pending is always cleared before the function is
++ * executed.
++ */
++ if (mce_helper[0] && !work_pending(&mce_trigger_work))
++ schedule_work(&mce_trigger_work);
++
++ if (__ratelimit(&ratelimit))
++ pr_info(HW_ERR "Machine check events logged\n");
++}
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++struct task_struct *mce_notify_helper;
++
++static int mce_notify_helper_thread(void *unused)
++{
++ while (1) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
++ if (kthread_should_stop())
++ break;
++ __mce_notify_work();
++ }
++ return 0;
++}
++
++static int mce_notify_work_init(void)
++{
++ mce_notify_helper = kthread_run(mce_notify_helper_thread, NULL,
++ "mce-notify");
++ if (!mce_notify_helper)
++ return -ENOMEM;
++
++ return 0;
++}
++
++static void mce_notify_work(void)
++{
++ if (WARN_ON_ONCE(!mce_notify_helper)) {
++ pr_info(HW_ERR "Machine check event before MCE init; ignored\n");
++ return;
++ }
++
++ wake_up_process(mce_notify_helper);
++}
++#else
++static void mce_notify_work(void)
++{
++ __mce_notify_work();
++}
++static inline int mce_notify_work_init(void) { return 0; }
++#endif
++
+ /*
+ * Notify the user(s) about new machine check events.
+ * Can be called from interrupt context, but not from machine check/NMI
+@@ -1361,19 +1433,8 @@
+ */
+ int mce_notify_irq(void)
+ {
+- /* Not more than two messages every minute */
+- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
+-
+ if (test_and_clear_bit(0, &mce_need_notify)) {
+- /* wake processes polling /dev/mcelog */
+- wake_up_interruptible(&mce_chrdev_wait);
+-
+- if (mce_helper[0])
+- schedule_work(&mce_trigger_work);
+-
+- if (__ratelimit(&ratelimit))
+- pr_info(HW_ERR "Machine check events logged\n");
+-
++ mce_notify_work();
+ return 1;
+ }
+ return 0;
+@@ -1644,7 +1705,7 @@
+ }
+ }
+
+-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
++static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
+ {
+ unsigned long iv = check_interval * HZ;
+
+@@ -1653,16 +1714,17 @@
+
+ per_cpu(mce_next_interval, cpu) = iv;
+
+- t->expires = round_jiffies(jiffies + iv);
+- add_timer_on(t, cpu);
++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
++ 0, HRTIMER_MODE_REL_PINNED);
+ }
+
+ static void __mcheck_cpu_init_timer(void)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
++ struct hrtimer *t = this_cpu_ptr(&mce_timer);
+ unsigned int cpu = smp_processor_id();
+
+- setup_timer(t, mce_timer_fn, cpu);
++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ t->function = mce_timer_fn;
+ mce_start_timer(cpu, t);
+ }
+
+@@ -2339,6 +2401,8 @@
+ if (!mce_available(raw_cpu_ptr(&cpu_info)))
+ return;
+
++ hrtimer_cancel(this_cpu_ptr(&mce_timer));
++
+ if (!(action & CPU_TASKS_FROZEN))
+ cmci_clear();
+ for (i = 0; i < mca_cfg.banks; i++) {
+@@ -2365,6 +2429,7 @@
+ if (b->init)
+ wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
+ }
++ __mcheck_cpu_init_timer();
+ }
+
+ /* Get notified when a cpu comes on/off. Be hotplug friendly. */
+@@ -2372,7 +2437,6 @@
+ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ {
+ unsigned int cpu = (unsigned long)hcpu;
+- struct timer_list *t = &per_cpu(mce_timer, cpu);
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+@@ -2392,11 +2456,9 @@
+ break;
+ case CPU_DOWN_PREPARE:
+ smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
+- del_timer_sync(t);
+ break;
+ case CPU_DOWN_FAILED:
+ smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
+- mce_start_timer(cpu, t);
+ break;
+ }
+
+@@ -2471,8 +2533,15 @@
+ if (err)
+ goto err_register;
+
++ err = mce_notify_work_init();
++ if (err)
++ goto err_notify;
++
+ return 0;
+
++err_notify:
++ misc_deregister(&mce_chrdev_device);
++
+ err_register:
+ unregister_syscore_ops(&mce_syscore_ops);
+
+diff -Nur linux-3.18.8.orig/arch/x86/kernel/entry_32.S linux-3.18.8/arch/x86/kernel/entry_32.S
+--- linux-3.18.8.orig/arch/x86/kernel/entry_32.S 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/kernel/entry_32.S 2015-03-03 08:05:17.000000000 +0100
+@@ -359,8 +359,24 @@
+ ENTRY(resume_kernel)
+ DISABLE_INTERRUPTS(CLBR_ANY)
+ need_resched:
++ # preempt count == 0 + NEED_RS set?
+ cmpl $0,PER_CPU_VAR(__preempt_count)
++#ifndef CONFIG_PREEMPT_LAZY
+ jnz restore_all
++#else
++ jz test_int_off
++
++ # atleast preempt count == 0 ?
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ jne restore_all
++
++ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
++ jnz restore_all
++
++ testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp)
++ jz restore_all
++test_int_off:
++#endif
+ testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
+ jz restore_all
+ call preempt_schedule_irq
+@@ -591,7 +607,7 @@
+ ALIGN
+ RING0_PTREGS_FRAME # can't unwind into user space anyway
+ work_pending:
+- testb $_TIF_NEED_RESCHED, %cl
++ testl $_TIF_NEED_RESCHED_MASK, %ecx
+ jz work_notifysig
+ work_resched:
+ call schedule
+@@ -604,7 +620,7 @@
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
+ # than syscall tracing?
+ jz restore_all
+- testb $_TIF_NEED_RESCHED, %cl
++ testl $_TIF_NEED_RESCHED_MASK, %ecx
+ jnz work_resched
+
+ work_notifysig: # deal with pending signals and
+diff -Nur linux-3.18.8.orig/arch/x86/kernel/entry_64.S linux-3.18.8/arch/x86/kernel/entry_64.S
+--- linux-3.18.8.orig/arch/x86/kernel/entry_64.S 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/kernel/entry_64.S 2015-03-03 08:05:17.000000000 +0100
+@@ -451,8 +451,8 @@
+ /* Handle reschedules */
+ /* edx: work, edi: workmask */
+ sysret_careful:
+- bt $TIF_NEED_RESCHED,%edx
+- jnc sysret_signal
++ testl $_TIF_NEED_RESCHED_MASK,%edx
++ jz sysret_signal
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+ pushq_cfi %rdi
+@@ -551,8 +551,8 @@
+ /* First do a reschedule test. */
+ /* edx: work, edi: workmask */
+ int_careful:
+- bt $TIF_NEED_RESCHED,%edx
+- jnc int_very_careful
++ testl $_TIF_NEED_RESCHED_MASK,%edx
++ jz int_very_careful
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+ pushq_cfi %rdi
+@@ -867,8 +867,8 @@
+ /* edi: workmask, edx: work */
+ retint_careful:
+ CFI_RESTORE_STATE
+- bt $TIF_NEED_RESCHED,%edx
+- jnc retint_signal
++ testl $_TIF_NEED_RESCHED_MASK,%edx
++ jz retint_signal
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+ pushq_cfi %rdi
+@@ -900,7 +900,22 @@
+ /* rcx: threadinfo. interrupts off. */
+ ENTRY(retint_kernel)
+ cmpl $0,PER_CPU_VAR(__preempt_count)
++#ifndef CONFIG_PREEMPT_LAZY
+ jnz retint_restore_args
++#else
++ jz check_int_off
++
++ # atleast preempt count == 0 ?
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ jnz retint_restore_args
++
++ cmpl $0, TI_preempt_lazy_count(%rcx)
++ jnz retint_restore_args
++
++ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
++ jnc retint_restore_args
++check_int_off:
++#endif
+ bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
+ jnc retint_restore_args
+ call preempt_schedule_irq
+@@ -1116,6 +1131,7 @@
+ jmp 2b
+ .previous
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /* Call softirq on interrupt stack. Interrupts are off. */
+ ENTRY(do_softirq_own_stack)
+ CFI_STARTPROC
+@@ -1135,6 +1151,7 @@
+ ret
+ CFI_ENDPROC
+ END(do_softirq_own_stack)
++#endif
+
+ #ifdef CONFIG_XEN
+ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
+@@ -1299,7 +1316,7 @@
+ movq %rsp,%rdi /* &pt_regs */
+ call sync_regs
+ movq %rax,%rsp /* switch stack for scheduling */
+- testl $_TIF_NEED_RESCHED,%ebx
++ testl $_TIF_NEED_RESCHED_MASK,%ebx
+ jnz paranoid_schedule
+ movl %ebx,%edx /* arg3: thread flags */
+ TRACE_IRQS_ON
+diff -Nur linux-3.18.8.orig/arch/x86/kernel/irq_32.c linux-3.18.8/arch/x86/kernel/irq_32.c
+--- linux-3.18.8.orig/arch/x86/kernel/irq_32.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/kernel/irq_32.c 2015-03-03 08:05:17.000000000 +0100
+@@ -142,6 +142,7 @@
+ cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq_own_stack(void)
+ {
+ struct thread_info *curstk;
+@@ -160,6 +161,7 @@
+
+ call_on_stack(__do_softirq, isp);
+ }
++#endif
+
+ bool handle_irq(unsigned irq, struct pt_regs *regs)
+ {
+diff -Nur linux-3.18.8.orig/arch/x86/kernel/irq_work.c linux-3.18.8/arch/x86/kernel/irq_work.c
+--- linux-3.18.8.orig/arch/x86/kernel/irq_work.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/kernel/irq_work.c 2015-03-03 08:05:17.000000000 +0100
+@@ -38,6 +38,7 @@
+ exiting_irq();
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void arch_irq_work_raise(void)
+ {
+ #ifdef CONFIG_X86_LOCAL_APIC
+@@ -48,3 +49,4 @@
+ apic_wait_icr_idle();
+ #endif
+ }
++#endif
+diff -Nur linux-3.18.8.orig/arch/x86/kernel/process_32.c linux-3.18.8/arch/x86/kernel/process_32.c
+--- linux-3.18.8.orig/arch/x86/kernel/process_32.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/kernel/process_32.c 2015-03-03 08:05:17.000000000 +0100
+@@ -35,6 +35,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/io.h>
+ #include <linux/kdebug.h>
++#include <linux/highmem.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/ldt.h>
+@@ -214,6 +215,35 @@
+ }
+ EXPORT_SYMBOL_GPL(start_thread);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ int i;
++
++ /*
++ * Clear @prev's kmap_atomic mappings
++ */
++ for (i = 0; i < prev_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++ pte_t *ptep = kmap_pte - idx;
++
++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
++ }
++ /*
++ * Restore @next_p's kmap_atomic mappings
++ */
++ for (i = 0; i < next_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ if (!pte_none(next_p->kmap_pte[i]))
++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
++ }
++}
++#else
++static inline void
++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
++#endif
++
+
+ /*
+ * switch_to(x,y) should switch tasks from x to y.
+@@ -301,6 +331,8 @@
+ task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ __switch_to_xtra(prev_p, next_p, tss);
+
++ switch_kmaps(prev_p, next_p);
++
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+ * This must be done before restoring TLS segments so
+diff -Nur linux-3.18.8.orig/arch/x86/kernel/signal.c linux-3.18.8/arch/x86/kernel/signal.c
+--- linux-3.18.8.orig/arch/x86/kernel/signal.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/kernel/signal.c 2015-03-03 08:05:17.000000000 +0100
+@@ -746,6 +746,14 @@
+ mce_notify_process();
+ #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
+
++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
++ if (unlikely(current->forced_info.si_signo)) {
++ struct task_struct *t = current;
++ force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
++ t->forced_info.si_signo = 0;
++ }
++#endif
++
+ if (thread_info_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
+diff -Nur linux-3.18.8.orig/arch/x86/kernel/traps.c linux-3.18.8/arch/x86/kernel/traps.c
+--- linux-3.18.8.orig/arch/x86/kernel/traps.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/kernel/traps.c 2015-03-03 08:05:17.000000000 +0100
+@@ -87,9 +87,21 @@
+ local_irq_enable();
+ }
+
+-static inline void preempt_conditional_sti(struct pt_regs *regs)
++static inline void conditional_sti_ist(struct pt_regs *regs)
+ {
++#ifdef CONFIG_X86_64
++ /*
++ * X86_64 uses a per CPU stack on the IST for certain traps
++ * like int3. The task can not be preempted when using one
++ * of these stacks, thus preemption must be disabled, otherwise
++ * the stack can be corrupted if the task is scheduled out,
++ * and another task comes in and uses this stack.
++ *
++ * On x86_32 the task keeps its own stack and it is OK if the
++ * task schedules out.
++ */
+ preempt_count_inc();
++#endif
+ if (regs->flags & X86_EFLAGS_IF)
+ local_irq_enable();
+ }
+@@ -100,11 +112,13 @@
+ local_irq_disable();
+ }
+
+-static inline void preempt_conditional_cli(struct pt_regs *regs)
++static inline void conditional_cli_ist(struct pt_regs *regs)
+ {
+ if (regs->flags & X86_EFLAGS_IF)
+ local_irq_disable();
++#ifdef CONFIG_X86_64
+ preempt_count_dec();
++#endif
+ }
+
+ static nokprobe_inline int
+@@ -372,9 +386,9 @@
+ * as we may switch to the interrupt stack.
+ */
+ debug_stack_usage_inc();
+- preempt_conditional_sti(regs);
++ conditional_sti_ist(regs);
+ do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
+- preempt_conditional_cli(regs);
++ conditional_cli_ist(regs);
+ debug_stack_usage_dec();
+ exit:
+ exception_exit(prev_state);
+@@ -517,12 +531,12 @@
+ debug_stack_usage_inc();
+
+ /* It's safe to allow irq's after DR6 has been saved */
+- preempt_conditional_sti(regs);
++ conditional_sti_ist(regs);
+
+ if (regs->flags & X86_VM_MASK) {
+ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
+ X86_TRAP_DB);
+- preempt_conditional_cli(regs);
++ conditional_cli_ist(regs);
+ debug_stack_usage_dec();
+ goto exit;
+ }
+@@ -542,7 +556,7 @@
+ si_code = get_si_code(tsk->thread.debugreg6);
+ if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
+ send_sigtrap(tsk, regs, error_code, si_code);
+- preempt_conditional_cli(regs);
++ conditional_cli_ist(regs);
+ debug_stack_usage_dec();
+
+ exit:
+diff -Nur linux-3.18.8.orig/arch/x86/kvm/x86.c linux-3.18.8/arch/x86/kvm/x86.c
+--- linux-3.18.8.orig/arch/x86/kvm/x86.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/kvm/x86.c 2015-03-03 08:05:17.000000000 +0100
+@@ -5772,6 +5772,13 @@
+ goto out;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
++ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
++ return -EOPNOTSUPP;
++ }
++#endif
++
+ r = kvm_mmu_module_init();
+ if (r)
+ goto out_free_percpu;
+diff -Nur linux-3.18.8.orig/arch/x86/mm/fault.c linux-3.18.8/arch/x86/mm/fault.c
+--- linux-3.18.8.orig/arch/x86/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -1128,7 +1128,7 @@
+ * If we're in an interrupt, have no user context or are running
+ * in an atomic region then we must not take the fault:
+ */
+- if (unlikely(in_atomic() || !mm)) {
++ if (unlikely(!mm || pagefault_disabled())) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+diff -Nur linux-3.18.8.orig/arch/x86/mm/highmem_32.c linux-3.18.8/arch/x86/mm/highmem_32.c
+--- linux-3.18.8.orig/arch/x86/mm/highmem_32.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/mm/highmem_32.c 2015-03-03 08:05:17.000000000 +0100
+@@ -32,6 +32,7 @@
+ */
+ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
++ pte_t pte = mk_pte(page, prot);
+ unsigned long vaddr;
+ int idx, type;
+
+@@ -45,7 +46,10 @@
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
+- set_pte(kmap_pte-idx, mk_pte(page, prot));
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_pte(kmap_pte-idx, pte);
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+@@ -88,6 +92,9 @@
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = __pte(0);
++#endif
+ kpte_clear_flush(kmap_pte-idx, vaddr);
+ kmap_atomic_idx_pop();
+ arch_flush_lazy_mmu_mode();
+diff -Nur linux-3.18.8.orig/arch/x86/mm/iomap_32.c linux-3.18.8/arch/x86/mm/iomap_32.c
+--- linux-3.18.8.orig/arch/x86/mm/iomap_32.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/mm/iomap_32.c 2015-03-03 08:05:17.000000000 +0100
+@@ -56,6 +56,7 @@
+
+ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+ {
++ pte_t pte = pfn_pte(pfn, prot);
+ unsigned long vaddr;
+ int idx, type;
+
+@@ -64,7 +65,12 @@
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
++ WARN_ON(!pte_none(*(kmap_pte - idx)));
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = pte;
++#endif
++ set_pte(kmap_pte - idx, pte);
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+@@ -110,6 +116,9 @@
+ * is a bad idea also, in case the page changes cacheability
+ * attributes or becomes a protected page in a hypervisor.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ current->kmap_pte[type] = __pte(0);
++#endif
+ kpte_clear_flush(kmap_pte-idx, vaddr);
+ kmap_atomic_idx_pop();
+ }
+diff -Nur linux-3.18.8.orig/arch/x86/platform/uv/tlb_uv.c linux-3.18.8/arch/x86/platform/uv/tlb_uv.c
+--- linux-3.18.8.orig/arch/x86/platform/uv/tlb_uv.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/platform/uv/tlb_uv.c 2015-03-03 08:05:17.000000000 +0100
+@@ -714,9 +714,9 @@
+
+ quiesce_local_uvhub(hmaster);
+
+- spin_lock(&hmaster->queue_lock);
++ raw_spin_lock(&hmaster->queue_lock);
+ reset_with_ipi(&bau_desc->distribution, bcp);
+- spin_unlock(&hmaster->queue_lock);
++ raw_spin_unlock(&hmaster->queue_lock);
+
+ end_uvhub_quiesce(hmaster);
+
+@@ -736,9 +736,9 @@
+
+ quiesce_local_uvhub(hmaster);
+
+- spin_lock(&hmaster->queue_lock);
++ raw_spin_lock(&hmaster->queue_lock);
+ reset_with_ipi(&bau_desc->distribution, bcp);
+- spin_unlock(&hmaster->queue_lock);
++ raw_spin_unlock(&hmaster->queue_lock);
+
+ end_uvhub_quiesce(hmaster);
+
+@@ -759,7 +759,7 @@
+ cycles_t tm1;
+
+ hmaster = bcp->uvhub_master;
+- spin_lock(&hmaster->disable_lock);
++ raw_spin_lock(&hmaster->disable_lock);
+ if (!bcp->baudisabled) {
+ stat->s_bau_disabled++;
+ tm1 = get_cycles();
+@@ -772,7 +772,7 @@
+ }
+ }
+ }
+- spin_unlock(&hmaster->disable_lock);
++ raw_spin_unlock(&hmaster->disable_lock);
+ }
+
+ static void count_max_concurr(int stat, struct bau_control *bcp,
+@@ -835,7 +835,7 @@
+ */
+ static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
+ {
+- spinlock_t *lock = &hmaster->uvhub_lock;
++ raw_spinlock_t *lock = &hmaster->uvhub_lock;
+ atomic_t *v;
+
+ v = &hmaster->active_descriptor_count;
+@@ -968,7 +968,7 @@
+ struct bau_control *hmaster;
+
+ hmaster = bcp->uvhub_master;
+- spin_lock(&hmaster->disable_lock);
++ raw_spin_lock(&hmaster->disable_lock);
+ if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
+ stat->s_bau_reenabled++;
+ for_each_present_cpu(tcpu) {
+@@ -980,10 +980,10 @@
+ tbcp->period_giveups = 0;
+ }
+ }
+- spin_unlock(&hmaster->disable_lock);
++ raw_spin_unlock(&hmaster->disable_lock);
+ return 0;
+ }
+- spin_unlock(&hmaster->disable_lock);
++ raw_spin_unlock(&hmaster->disable_lock);
+ return -1;
+ }
+
+@@ -1899,9 +1899,9 @@
+ bcp->cong_reps = congested_reps;
+ bcp->disabled_period = sec_2_cycles(disabled_period);
+ bcp->giveup_limit = giveup_limit;
+- spin_lock_init(&bcp->queue_lock);
+- spin_lock_init(&bcp->uvhub_lock);
+- spin_lock_init(&bcp->disable_lock);
++ raw_spin_lock_init(&bcp->queue_lock);
++ raw_spin_lock_init(&bcp->uvhub_lock);
++ raw_spin_lock_init(&bcp->disable_lock);
+ }
+ }
+
+diff -Nur linux-3.18.8.orig/arch/x86/platform/uv/uv_time.c linux-3.18.8/arch/x86/platform/uv/uv_time.c
+--- linux-3.18.8.orig/arch/x86/platform/uv/uv_time.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/x86/platform/uv/uv_time.c 2015-03-03 08:05:17.000000000 +0100
+@@ -58,7 +58,7 @@
+
+ /* There is one of these allocated per node */
+ struct uv_rtc_timer_head {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ /* next cpu waiting for timer, local node relative: */
+ int next_cpu;
+ /* number of cpus on this node: */
+@@ -178,7 +178,7 @@
+ uv_rtc_deallocate_timers();
+ return -ENOMEM;
+ }
+- spin_lock_init(&head->lock);
++ raw_spin_lock_init(&head->lock);
+ head->ncpus = uv_blade_nr_possible_cpus(bid);
+ head->next_cpu = -1;
+ blade_info[bid] = head;
+@@ -232,7 +232,7 @@
+ unsigned long flags;
+ int next_cpu;
+
+- spin_lock_irqsave(&head->lock, flags);
++ raw_spin_lock_irqsave(&head->lock, flags);
+
+ next_cpu = head->next_cpu;
+ *t = expires;
+@@ -244,12 +244,12 @@
+ if (uv_setup_intr(cpu, expires)) {
+ *t = ULLONG_MAX;
+ uv_rtc_find_next_timer(head, pnode);
+- spin_unlock_irqrestore(&head->lock, flags);
++ raw_spin_unlock_irqrestore(&head->lock, flags);
+ return -ETIME;
+ }
+ }
+
+- spin_unlock_irqrestore(&head->lock, flags);
++ raw_spin_unlock_irqrestore(&head->lock, flags);
+ return 0;
+ }
+
+@@ -268,7 +268,7 @@
+ unsigned long flags;
+ int rc = 0;
+
+- spin_lock_irqsave(&head->lock, flags);
++ raw_spin_lock_irqsave(&head->lock, flags);
+
+ if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
+ rc = 1;
+@@ -280,7 +280,7 @@
+ uv_rtc_find_next_timer(head, pnode);
+ }
+
+- spin_unlock_irqrestore(&head->lock, flags);
++ raw_spin_unlock_irqrestore(&head->lock, flags);
+
+ return rc;
+ }
+@@ -300,13 +300,18 @@
+ static cycle_t uv_read_rtc(struct clocksource *cs)
+ {
+ unsigned long offset;
++ cycle_t cycles;
+
++ preempt_disable();
+ if (uv_get_min_hub_revision_id() == 1)
+ offset = 0;
+ else
+ offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
+
+- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
++ cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
++ preempt_enable();
++
++ return cycles;
+ }
+
+ /*
+diff -Nur linux-3.18.8.orig/arch/xtensa/mm/fault.c linux-3.18.8/arch/xtensa/mm/fault.c
+--- linux-3.18.8.orig/arch/xtensa/mm/fault.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/arch/xtensa/mm/fault.c 2015-03-03 08:05:17.000000000 +0100
+@@ -57,7 +57,7 @@
+ /* If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm) {
++ if (!mm || pagefault_disabled()) {
+ bad_page_fault(regs, address, SIGSEGV);
+ return;
+ }
+diff -Nur linux-3.18.8.orig/block/blk-core.c linux-3.18.8/block/blk-core.c
+--- linux-3.18.8.orig/block/blk-core.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/block/blk-core.c 2015-03-03 08:05:17.000000000 +0100
+@@ -100,6 +100,9 @@
+
+ INIT_LIST_HEAD(&rq->queuelist);
+ INIT_LIST_HEAD(&rq->timeout_list);
++#if CONFIG_PREEMPT_RT_FULL
++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
++#endif
+ rq->cpu = -1;
+ rq->q = q;
+ rq->__sector = (sector_t) -1;
+@@ -194,7 +197,7 @@
+ **/
+ void blk_start_queue(struct request_queue *q)
+ {
+- WARN_ON(!irqs_disabled());
++ WARN_ON_NONRT(!irqs_disabled());
+
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ __blk_run_queue(q);
+@@ -627,7 +630,7 @@
+ q->bypass_depth = 1;
+ __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+- init_waitqueue_head(&q->mq_freeze_wq);
++ init_swait_head(&q->mq_freeze_wq);
+
+ if (blkcg_init_queue(q))
+ goto fail_bdi;
+@@ -3037,7 +3040,7 @@
+ blk_run_queue_async(q);
+ else
+ __blk_run_queue(q);
+- spin_unlock(q->queue_lock);
++ spin_unlock_irq(q->queue_lock);
+ }
+
+ static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
+@@ -3085,7 +3088,6 @@
+ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ {
+ struct request_queue *q;
+- unsigned long flags;
+ struct request *rq;
+ LIST_HEAD(list);
+ unsigned int depth;
+@@ -3105,11 +3107,6 @@
+ q = NULL;
+ depth = 0;
+
+- /*
+- * Save and disable interrupts here, to avoid doing it for every
+- * queue lock we have to take.
+- */
+- local_irq_save(flags);
+ while (!list_empty(&list)) {
+ rq = list_entry_rq(list.next);
+ list_del_init(&rq->queuelist);
+@@ -3122,7 +3119,7 @@
+ queue_unplugged(q, depth, from_schedule);
+ q = rq->q;
+ depth = 0;
+- spin_lock(q->queue_lock);
++ spin_lock_irq(q->queue_lock);
+ }
+
+ /*
+@@ -3149,8 +3146,6 @@
+ */
+ if (q)
+ queue_unplugged(q, depth, from_schedule);
+-
+- local_irq_restore(flags);
+ }
+
+ void blk_finish_plug(struct blk_plug *plug)
+diff -Nur linux-3.18.8.orig/block/blk-ioc.c linux-3.18.8/block/blk-ioc.c
+--- linux-3.18.8.orig/block/blk-ioc.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/block/blk-ioc.c 2015-03-03 08:05:17.000000000 +0100
+@@ -7,6 +7,7 @@
+ #include <linux/bio.h>
+ #include <linux/blkdev.h>
+ #include <linux/slab.h>
++#include <linux/delay.h>
+
+ #include "blk.h"
+
+@@ -109,7 +110,7 @@
+ spin_unlock(q->queue_lock);
+ } else {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+- cpu_relax();
++ cpu_chill();
+ spin_lock_irqsave_nested(&ioc->lock, flags, 1);
+ }
+ }
+@@ -187,7 +188,7 @@
+ spin_unlock(icq->q->queue_lock);
+ } else {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+- cpu_relax();
++ cpu_chill();
+ goto retry;
+ }
+ }
+diff -Nur linux-3.18.8.orig/block/blk-iopoll.c linux-3.18.8/block/blk-iopoll.c
+--- linux-3.18.8.orig/block/blk-iopoll.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/block/blk-iopoll.c 2015-03-03 08:05:17.000000000 +0100
+@@ -35,6 +35,7 @@
+ list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(blk_iopoll_sched);
+
+@@ -132,6 +133,7 @@
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ /**
+@@ -201,6 +203,7 @@
+ this_cpu_ptr(&blk_cpu_iopoll));
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ return NOTIFY_OK;
+diff -Nur linux-3.18.8.orig/block/blk-mq.c linux-3.18.8/block/blk-mq.c
+--- linux-3.18.8.orig/block/blk-mq.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/block/blk-mq.c 2015-03-03 08:05:17.000000000 +0100
+@@ -85,7 +85,7 @@
+ if (percpu_ref_tryget_live(&q->mq_usage_counter))
+ return 0;
+
+- ret = wait_event_interruptible(q->mq_freeze_wq,
++ ret = swait_event_interruptible(q->mq_freeze_wq,
+ !q->mq_freeze_depth || blk_queue_dying(q));
+ if (blk_queue_dying(q))
+ return -ENODEV;
+@@ -104,7 +104,7 @@
+ struct request_queue *q =
+ container_of(ref, struct request_queue, mq_usage_counter);
+
+- wake_up_all(&q->mq_freeze_wq);
++ swait_wake_all(&q->mq_freeze_wq);
+ }
+
+ static void blk_mq_freeze_queue_start(struct request_queue *q)
+@@ -123,7 +123,7 @@
+
+ static void blk_mq_freeze_queue_wait(struct request_queue *q)
+ {
+- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
++ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
+ }
+
+ /*
+@@ -146,7 +146,7 @@
+ spin_unlock_irq(q->queue_lock);
+ if (wake) {
+ percpu_ref_reinit(&q->mq_usage_counter);
+- wake_up_all(&q->mq_freeze_wq);
++ swait_wake_all(&q->mq_freeze_wq);
+ }
+ }
+
+@@ -194,6 +194,9 @@
+ rq->resid_len = 0;
+ rq->sense = NULL;
+
++#if CONFIG_PREEMPT_RT_FULL
++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
++#endif
+ INIT_LIST_HEAD(&rq->timeout_list);
+ rq->timeout = 0;
+
+@@ -313,6 +316,17 @@
+ }
+ EXPORT_SYMBOL(blk_mq_end_request);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++void __blk_mq_complete_request_remote_work(struct work_struct *work)
++{
++ struct request *rq = container_of(work, struct request, work);
++
++ rq->q->softirq_done_fn(rq);
++}
++
++#else
++
+ static void __blk_mq_complete_request_remote(void *data)
+ {
+ struct request *rq = data;
+@@ -320,6 +334,8 @@
+ rq->q->softirq_done_fn(rq);
+ }
+
++#endif
++
+ static void blk_mq_ipi_complete_request(struct request *rq)
+ {
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+@@ -331,19 +347,23 @@
+ return;
+ }
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
+ shared = cpus_share_cache(cpu, ctx->cpu);
+
+ if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
++#if CONFIG_PREEMPT_RT_FULL
++ schedule_work_on(ctx->cpu, &rq->work);
++#else
+ rq->csd.func = __blk_mq_complete_request_remote;
+ rq->csd.info = rq;
+ rq->csd.flags = 0;
+ smp_call_function_single_async(ctx->cpu, &rq->csd);
++#endif
+ } else {
+ rq->q->softirq_done_fn(rq);
+ }
+- put_cpu();
++ put_cpu_light();
+ }
+
+ void __blk_mq_complete_request(struct request *rq)
+@@ -814,9 +834,9 @@
+ test_bit(BLK_MQ_S_STOPPED, &hctx->state))
+ continue;
+
+- preempt_disable();
++ migrate_disable();
+ blk_mq_run_hw_queue(hctx, async);
+- preempt_enable();
++ migrate_enable();
+ }
+ }
+ EXPORT_SYMBOL(blk_mq_run_queues);
+@@ -843,9 +863,9 @@
+ {
+ clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
+
+- preempt_disable();
++ migrate_disable();
+ blk_mq_run_hw_queue(hctx, false);
+- preempt_enable();
++ migrate_enable();
+ }
+ EXPORT_SYMBOL(blk_mq_start_hw_queue);
+
+@@ -870,9 +890,9 @@
+ continue;
+
+ clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
+- preempt_disable();
++ migrate_disable();
+ blk_mq_run_hw_queue(hctx, async);
+- preempt_enable();
++ migrate_enable();
+ }
+ }
+ EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
+@@ -1494,7 +1514,7 @@
+ {
+ struct blk_mq_hw_ctx *hctx = data;
+
+- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
++ if (action == CPU_POST_DEAD)
+ return blk_mq_hctx_cpu_offline(hctx, cpu);
+ else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
+ return blk_mq_hctx_cpu_online(hctx, cpu);
+diff -Nur linux-3.18.8.orig/block/blk-mq-cpu.c linux-3.18.8/block/blk-mq-cpu.c
+--- linux-3.18.8.orig/block/blk-mq-cpu.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/block/blk-mq-cpu.c 2015-03-03 08:05:17.000000000 +0100
+@@ -16,7 +16,7 @@
+ #include "blk-mq.h"
+
+ static LIST_HEAD(blk_mq_cpu_notify_list);
+-static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
++static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
+
+ static int blk_mq_main_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+@@ -25,7 +25,10 @@
+ struct blk_mq_cpu_notifier *notify;
+ int ret = NOTIFY_OK;
+
+- raw_spin_lock(&blk_mq_cpu_notify_lock);
++ if (action != CPU_POST_DEAD)
++ return NOTIFY_OK;
++
++ spin_lock(&blk_mq_cpu_notify_lock);
+
+ list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
+ ret = notify->notify(notify->data, action, cpu);
+@@ -33,7 +36,7 @@
+ break;
+ }
+
+- raw_spin_unlock(&blk_mq_cpu_notify_lock);
++ spin_unlock(&blk_mq_cpu_notify_lock);
+ return ret;
+ }
+
+@@ -41,16 +44,16 @@
+ {
+ BUG_ON(!notifier->notify);
+
+- raw_spin_lock(&blk_mq_cpu_notify_lock);
++ spin_lock(&blk_mq_cpu_notify_lock);
+ list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
+- raw_spin_unlock(&blk_mq_cpu_notify_lock);
++ spin_unlock(&blk_mq_cpu_notify_lock);
+ }
+
+ void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
+ {
+- raw_spin_lock(&blk_mq_cpu_notify_lock);
++ spin_lock(&blk_mq_cpu_notify_lock);
+ list_del(&notifier->list);
+- raw_spin_unlock(&blk_mq_cpu_notify_lock);
++ spin_unlock(&blk_mq_cpu_notify_lock);
+ }
+
+ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
+diff -Nur linux-3.18.8.orig/block/blk-mq.h linux-3.18.8/block/blk-mq.h
+--- linux-3.18.8.orig/block/blk-mq.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/block/blk-mq.h 2015-03-03 08:05:17.000000000 +0100
+@@ -73,7 +73,10 @@
+ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
+ unsigned int cpu)
+ {
+- return per_cpu_ptr(q->queue_ctx, cpu);
++ struct blk_mq_ctx *ctx;
++
++ ctx = per_cpu_ptr(q->queue_ctx, cpu);
++ return ctx;
+ }
+
+ /*
+@@ -84,12 +87,12 @@
+ */
+ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
+ {
+- return __blk_mq_get_ctx(q, get_cpu());
++ return __blk_mq_get_ctx(q, get_cpu_light());
+ }
+
+ static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
+ {
+- put_cpu();
++ put_cpu_light();
+ }
+
+ struct blk_mq_alloc_data {
+diff -Nur linux-3.18.8.orig/block/blk-softirq.c linux-3.18.8/block/blk-softirq.c
+--- linux-3.18.8.orig/block/blk-softirq.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/block/blk-softirq.c 2015-03-03 08:05:17.000000000 +0100
+@@ -51,6 +51,7 @@
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ /*
+@@ -93,6 +94,7 @@
+ this_cpu_ptr(&blk_cpu_done));
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ return NOTIFY_OK;
+@@ -150,6 +152,7 @@
+ goto do_local;
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ /**
+diff -Nur linux-3.18.8.orig/block/bounce.c linux-3.18.8/block/bounce.c
+--- linux-3.18.8.orig/block/bounce.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/block/bounce.c 2015-03-03 08:05:17.000000000 +0100
+@@ -54,11 +54,11 @@
+ unsigned long flags;
+ unsigned char *vto;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ vto = kmap_atomic(to->bv_page);
+ memcpy(vto + to->bv_offset, vfrom, to->bv_len);
+ kunmap_atomic(vto);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ #else /* CONFIG_HIGHMEM */
+diff -Nur linux-3.18.8.orig/crypto/algapi.c linux-3.18.8/crypto/algapi.c
+--- linux-3.18.8.orig/crypto/algapi.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/crypto/algapi.c 2015-03-03 08:05:17.000000000 +0100
+@@ -698,13 +698,13 @@
+
+ int crypto_register_notifier(struct notifier_block *nb)
+ {
+- return blocking_notifier_chain_register(&crypto_chain, nb);
++ return srcu_notifier_chain_register(&crypto_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(crypto_register_notifier);
+
+ int crypto_unregister_notifier(struct notifier_block *nb)
+ {
+- return blocking_notifier_chain_unregister(&crypto_chain, nb);
++ return srcu_notifier_chain_unregister(&crypto_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
+
+diff -Nur linux-3.18.8.orig/crypto/api.c linux-3.18.8/crypto/api.c
+--- linux-3.18.8.orig/crypto/api.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/crypto/api.c 2015-03-03 08:05:17.000000000 +0100
+@@ -31,7 +31,7 @@
+ DECLARE_RWSEM(crypto_alg_sem);
+ EXPORT_SYMBOL_GPL(crypto_alg_sem);
+
+-BLOCKING_NOTIFIER_HEAD(crypto_chain);
++SRCU_NOTIFIER_HEAD(crypto_chain);
+ EXPORT_SYMBOL_GPL(crypto_chain);
+
+ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
+@@ -236,10 +236,10 @@
+ {
+ int ok;
+
+- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
++ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
+ if (ok == NOTIFY_DONE) {
+ request_module("cryptomgr");
+- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
++ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
+ }
+
+ return ok;
+diff -Nur linux-3.18.8.orig/crypto/internal.h linux-3.18.8/crypto/internal.h
+--- linux-3.18.8.orig/crypto/internal.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/crypto/internal.h 2015-03-03 08:05:17.000000000 +0100
+@@ -48,7 +48,7 @@
+
+ extern struct list_head crypto_alg_list;
+ extern struct rw_semaphore crypto_alg_sem;
+-extern struct blocking_notifier_head crypto_chain;
++extern struct srcu_notifier_head crypto_chain;
+
+ #ifdef CONFIG_PROC_FS
+ void __init crypto_init_proc(void);
+@@ -142,7 +142,7 @@
+
+ static inline void crypto_notify(unsigned long val, void *v)
+ {
+- blocking_notifier_call_chain(&crypto_chain, val, v);
++ srcu_notifier_call_chain(&crypto_chain, val, v);
+ }
+
+ #endif /* _CRYPTO_INTERNAL_H */
+diff -Nur linux-3.18.8.orig/Documentation/hwlat_detector.txt linux-3.18.8/Documentation/hwlat_detector.txt
+--- linux-3.18.8.orig/Documentation/hwlat_detector.txt 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/Documentation/hwlat_detector.txt 2015-03-03 08:05:17.000000000 +0100
+@@ -0,0 +1,64 @@
++Introduction:
++-------------
++
++The module hwlat_detector is a special purpose kernel module that is used to
++detect large system latencies induced by the behavior of certain underlying
++hardware or firmware, independent of Linux itself. The code was developed
++originally to detect SMIs (System Management Interrupts) on x86 systems,
++however there is nothing x86 specific about this patchset. It was
++originally written for use by the "RT" patch since the Real Time
++kernel is highly latency sensitive.
++
++SMIs are usually not serviced by the Linux kernel, which typically does not
++even know that they are occuring. SMIs are instead are set up by BIOS code
++and are serviced by BIOS code, usually for "critical" events such as
++management of thermal sensors and fans. Sometimes though, SMIs are used for
++other tasks and those tasks can spend an inordinate amount of time in the
++handler (sometimes measured in milliseconds). Obviously this is a problem if
++you are trying to keep event service latencies down in the microsecond range.
++
++The hardware latency detector works by hogging all of the cpus for configurable
++amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
++for some period, then looking for gaps in the TSC data. Any gap indicates a
++time when the polling was interrupted and since the machine is stopped and
++interrupts turned off the only thing that could do that would be an SMI.
++
++Note that the SMI detector should *NEVER* be used in a production environment.
++It is intended to be run manually to determine if the hardware platform has a
++problem with long system firmware service routines.
++
++Usage:
++------
++
++Loading the module hwlat_detector passing the parameter "enabled=1" (or by
++setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
++step required to start the hwlat_detector. It is possible to redefine the
++threshold in microseconds (us) above which latency spikes will be taken
++into account (parameter "threshold=").
++
++Example:
++
++ # modprobe hwlat_detector enabled=1 threshold=100
++
++After the module is loaded, it creates a directory named "hwlat_detector" under
++the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
++to have debugfs mounted, which might be on /sys/debug on your system.
++
++The /debug/hwlat_detector interface contains the following files:
++
++count - number of latency spikes observed since last reset
++enable - a global enable/disable toggle (0/1), resets count
++max - maximum hardware latency actually observed (usecs)
++sample - a pipe from which to read current raw sample data
++ in the format <timestamp> <latency observed usecs>
++ (can be opened O_NONBLOCK for a single sample)
++threshold - minimum latency value to be considered (usecs)
++width - time period to sample with CPUs held (usecs)
++ must be less than the total window size (enforced)
++window - total period of sampling, width being inside (usecs)
++
++By default we will set width to 500,000 and window to 1,000,000, meaning that
++we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
++observe any latencies that exceed the threshold (initially 100 usecs),
++then we write to a global sample ring buffer of 8K samples, which is
++consumed by reading from the "sample" (pipe) debugfs file interface.
+diff -Nur linux-3.18.8.orig/Documentation/sysrq.txt linux-3.18.8/Documentation/sysrq.txt
+--- linux-3.18.8.orig/Documentation/sysrq.txt 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/Documentation/sysrq.txt 2015-03-03 08:05:17.000000000 +0100
+@@ -59,10 +59,17 @@
+ On other - If you know of the key combos for other architectures, please
+ let me know so I can add them to this section.
+
+-On all - write a character to /proc/sysrq-trigger. e.g.:
+-
++On all - write a character to /proc/sysrq-trigger, e.g.:
+ echo t > /proc/sysrq-trigger
+
++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
++ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
++ Send an ICMP echo request with this pattern plus the particular
++ SysRq command key. Example:
++ # ping -c1 -s57 -p0102030468
++ will trigger the SysRq-H (help) command.
++
++
+ * What are the 'command' keys?
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 'b' - Will immediately reboot the system without syncing or unmounting
+diff -Nur linux-3.18.8.orig/Documentation/trace/histograms.txt linux-3.18.8/Documentation/trace/histograms.txt
+--- linux-3.18.8.orig/Documentation/trace/histograms.txt 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/Documentation/trace/histograms.txt 2015-03-03 08:05:17.000000000 +0100
+@@ -0,0 +1,186 @@
++ Using the Linux Kernel Latency Histograms
++
++
++This document gives a short explanation how to enable, configure and use
++latency histograms. Latency histograms are primarily relevant in the
++context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
++and are used in the quality management of the Linux real-time
++capabilities.
++
++
++* Purpose of latency histograms
++
++A latency histogram continuously accumulates the frequencies of latency
++data. There are two types of histograms
++- potential sources of latencies
++- effective latencies
++
++
++* Potential sources of latencies
++
++Potential sources of latencies are code segments where interrupts,
++preemption or both are disabled (aka critical sections). To create
++histograms of potential sources of latency, the kernel stores the time
++stamp at the start of a critical section, determines the time elapsed
++when the end of the section is reached, and increments the frequency
++counter of that latency value - irrespective of whether any concurrently
++running process is affected by latency or not.
++- Configuration items (in the Kernel hacking/Tracers submenu)
++ CONFIG_INTERRUPT_OFF_LATENCY
++ CONFIG_PREEMPT_OFF_LATENCY
++
++
++* Effective latencies
++
++Effective latencies are actually occuring during wakeup of a process. To
++determine effective latencies, the kernel stores the time stamp when a
++process is scheduled to be woken up, and determines the duration of the
++wakeup time shortly before control is passed over to this process. Note
++that the apparent latency in user space may be somewhat longer, since the
++process may be interrupted after control is passed over to it but before
++the execution in user space takes place. Simply measuring the interval
++between enqueuing and wakeup may also not appropriate in cases when a
++process is scheduled as a result of a timer expiration. The timer may have
++missed its deadline, e.g. due to disabled interrupts, but this latency
++would not be registered. Therefore, the offsets of missed timers are
++recorded in a separate histogram. If both wakeup latency and missed timer
++offsets are configured and enabled, a third histogram may be enabled that
++records the overall latency as a sum of the timer latency, if any, and the
++wakeup latency. This histogram is called "timerandwakeup".
++- Configuration items (in the Kernel hacking/Tracers submenu)
++ CONFIG_WAKEUP_LATENCY
++ CONFIG_MISSED_TIMER_OFSETS
++
++
++* Usage
++
++The interface to the administration of the latency histograms is located
++in the debugfs file system. To mount it, either enter
++
++mount -t sysfs nodev /sys
++mount -t debugfs nodev /sys/kernel/debug
++
++from shell command line level, or add
++
++nodev /sys sysfs defaults 0 0
++nodev /sys/kernel/debug debugfs defaults 0 0
++
++to the file /etc/fstab. All latency histogram related files are then
++available in the directory /sys/kernel/debug/tracing/latency_hist. A
++particular histogram type is enabled by writing non-zero to the related
++variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
++Select "preemptirqsoff" for the histograms of potential sources of
++latencies and "wakeup" for histograms of effective latencies etc. The
++histogram data - one per CPU - are available in the files
++
++/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
++/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
++/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
++/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
++
++The histograms are reset by writing non-zero to the file "reset" in a
++particular latency directory. To reset all latency data, use
++
++#!/bin/sh
++
++TRACINGDIR=/sys/kernel/debug/tracing
++HISTDIR=$TRACINGDIR/latency_hist
++
++if test -d $HISTDIR
++then
++ cd $HISTDIR
++ for i in `find . | grep /reset$`
++ do
++ echo 1 >$i
++ done
++fi
++
++
++* Data format
++
++Latency data are stored with a resolution of one microsecond. The
++maximum latency is 10,240 microseconds. The data are only valid, if the
++overflow register is empty. Every output line contains the latency in
++microseconds in the first row and the number of samples in the second
++row. To display only lines with a positive latency count, use, for
++example,
++
++grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
++
++#Minimum latency: 0 microseconds.
++#Average latency: 0 microseconds.
++#Maximum latency: 25 microseconds.
++#Total samples: 3104770694
++#There are 0 samples greater or equal than 10240 microseconds
++#usecs samples
++ 0 2984486876
++ 1 49843506
++ 2 58219047
++ 3 5348126
++ 4 2187960
++ 5 3388262
++ 6 959289
++ 7 208294
++ 8 40420
++ 9 4485
++ 10 14918
++ 11 18340
++ 12 25052
++ 13 19455
++ 14 5602
++ 15 969
++ 16 47
++ 17 18
++ 18 14
++ 19 1
++ 20 3
++ 21 2
++ 22 5
++ 23 2
++ 25 1
++
++
++* Wakeup latency of a selected process
++
++To only collect wakeup latency data of a particular process, write the
++PID of the requested process to
++
++/sys/kernel/debug/tracing/latency_hist/wakeup/pid
++
++PIDs are not considered, if this variable is set to 0.
++
++
++* Details of the process with the highest wakeup latency so far
++
++Selected data of the process that suffered from the highest wakeup
++latency that occurred in a particular CPU are available in the file
++
++/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
++
++In addition, other relevant system data at the time when the
++latency occurred are given.
++
++The format of the data is (all in one line):
++<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
++<- <PID> <Priority> <Command> <Timestamp>
++
++The value of <Timeroffset> is only relevant in the combined timer
++and wakeup latency recording. In the wakeup recording, it is
++always 0, in the missed_timer_offsets recording, it is the same
++as <Latency>.
++
++When retrospectively searching for the origin of a latency and
++tracing was not enabled, it may be helpful to know the name and
++some basic data of the task that (finally) was switching to the
++late real-tlme task. In addition to the victim's data, also the
++data of the possible culprit are therefore displayed after the
++"<-" symbol.
++
++Finally, the timestamp of the time when the latency occurred
++in <seconds>.<microseconds> after the most recent system boot
++is provided.
++
++These data are also reset when the wakeup histogram is reset.
+diff -Nur linux-3.18.8.orig/drivers/acpi/acpica/acglobal.h linux-3.18.8/drivers/acpi/acpica/acglobal.h
+--- linux-3.18.8.orig/drivers/acpi/acpica/acglobal.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/acpi/acpica/acglobal.h 2015-03-03 08:05:17.000000000 +0100
+@@ -112,7 +112,7 @@
+ * interrupt level
+ */
+ ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
+-ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
++ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */
+ ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
+
+ /* Mutex for _OSI support */
+diff -Nur linux-3.18.8.orig/drivers/acpi/acpica/hwregs.c linux-3.18.8/drivers/acpi/acpica/hwregs.c
+--- linux-3.18.8.orig/drivers/acpi/acpica/hwregs.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/acpi/acpica/hwregs.c 2015-03-03 08:05:17.000000000 +0100
+@@ -269,14 +269,14 @@
+ ACPI_BITMASK_ALL_FIXED_STATUS,
+ ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
+
+- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
+
+ /* Clear the fixed events in PM1 A/B */
+
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
+ ACPI_BITMASK_ALL_FIXED_STATUS);
+
+- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
+
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+diff -Nur linux-3.18.8.orig/drivers/acpi/acpica/hwxface.c linux-3.18.8/drivers/acpi/acpica/hwxface.c
+--- linux-3.18.8.orig/drivers/acpi/acpica/hwxface.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/acpi/acpica/hwxface.c 2015-03-03 08:05:17.000000000 +0100
+@@ -374,7 +374,7 @@
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
+
+ /*
+ * At this point, we know that the parent register is one of the
+@@ -435,7 +435,7 @@
+
+ unlock_and_exit:
+
+- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
+ return_ACPI_STATUS(status);
+ }
+
+diff -Nur linux-3.18.8.orig/drivers/acpi/acpica/utmutex.c linux-3.18.8/drivers/acpi/acpica/utmutex.c
+--- linux-3.18.8.orig/drivers/acpi/acpica/utmutex.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/acpi/acpica/utmutex.c 2015-03-03 08:05:17.000000000 +0100
+@@ -88,7 +88,7 @@
+ return_ACPI_STATUS (status);
+ }
+
+- status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
++ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
+ if (ACPI_FAILURE (status)) {
+ return_ACPI_STATUS (status);
+ }
+@@ -141,7 +141,7 @@
+ /* Delete the spinlocks */
+
+ acpi_os_delete_lock(acpi_gbl_gpe_lock);
+- acpi_os_delete_lock(acpi_gbl_hardware_lock);
++ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
+ acpi_os_delete_lock(acpi_gbl_reference_count_lock);
+
+ /* Delete the reader/writer lock */
+diff -Nur linux-3.18.8.orig/drivers/ata/libata-sff.c linux-3.18.8/drivers/ata/libata-sff.c
+--- linux-3.18.8.orig/drivers/ata/libata-sff.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/ata/libata-sff.c 2015-03-03 08:05:17.000000000 +0100
+@@ -678,9 +678,9 @@
+ unsigned long flags;
+ unsigned int consumed;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ return consumed;
+ }
+@@ -719,7 +719,7 @@
+ unsigned long flags;
+
+ /* FIXME: use a bounce buffer */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ buf = kmap_atomic(page);
+
+ /* do the actual data transfer */
+@@ -727,7 +727,7 @@
+ do_write);
+
+ kunmap_atomic(buf);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ } else {
+ buf = page_address(page);
+ ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
+@@ -864,7 +864,7 @@
+ unsigned long flags;
+
+ /* FIXME: use bounce buffer */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ buf = kmap_atomic(page);
+
+ /* do the actual data transfer */
+@@ -872,7 +872,7 @@
+ count, rw);
+
+ kunmap_atomic(buf);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ } else {
+ buf = page_address(page);
+ consumed = ap->ops->sff_data_xfer(dev, buf + offset,
+diff -Nur linux-3.18.8.orig/drivers/char/random.c linux-3.18.8/drivers/char/random.c
+--- linux-3.18.8.orig/drivers/char/random.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/char/random.c 2015-03-03 08:05:17.000000000 +0100
+@@ -776,8 +776,6 @@
+ } sample;
+ long delta, delta2, delta3;
+
+- preempt_disable();
+-
+ sample.jiffies = jiffies;
+ sample.cycles = random_get_entropy();
+ sample.num = num;
+@@ -818,7 +816,6 @@
+ */
+ credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
+ }
+- preempt_enable();
+ }
+
+ void add_input_randomness(unsigned int type, unsigned int code,
+@@ -871,28 +868,27 @@
+ return *(ptr + f->reg_idx++);
+ }
+
+-void add_interrupt_randomness(int irq, int irq_flags)
++void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
+ {
+ struct entropy_store *r;
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+- struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+ cycles_t cycles = random_get_entropy();
+ __u32 c_high, j_high;
+- __u64 ip;
+ unsigned long seed;
+ int credit = 0;
+
+ if (cycles == 0)
+- cycles = get_reg(fast_pool, regs);
++ cycles = get_reg(fast_pool, NULL);
+ c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
+ j_high = (sizeof(now) > 4) ? now >> 32 : 0;
+ fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
+ fast_pool->pool[1] ^= now ^ c_high;
+- ip = regs ? instruction_pointer(regs) : _RET_IP_;
++ if (!ip)
++ ip = _RET_IP_;
+ fast_pool->pool[2] ^= ip;
+ fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
+- get_reg(fast_pool, regs);
++ get_reg(fast_pool, NULL);
+
+ fast_mix(fast_pool);
+ add_interrupt_bench(cycles);
+diff -Nur linux-3.18.8.orig/drivers/clocksource/tcb_clksrc.c linux-3.18.8/drivers/clocksource/tcb_clksrc.c
+--- linux-3.18.8.orig/drivers/clocksource/tcb_clksrc.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/clocksource/tcb_clksrc.c 2015-03-03 08:05:17.000000000 +0100
+@@ -23,8 +23,7 @@
+ * this 32 bit free-running counter. the second channel is not used.
+ *
+ * - The third channel may be used to provide a 16-bit clockevent
+- * source, used in either periodic or oneshot mode. This runs
+- * at 32 KiHZ, and can handle delays of up to two seconds.
++ * source, used in either periodic or oneshot mode.
+ *
+ * A boot clocksource and clockevent source are also currently needed,
+ * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
+@@ -74,6 +73,7 @@
+ struct tc_clkevt_device {
+ struct clock_event_device clkevt;
+ struct clk *clk;
++ u32 freq;
+ void __iomem *regs;
+ };
+
+@@ -82,13 +82,6 @@
+ return container_of(clkevt, struct tc_clkevt_device, clkevt);
+ }
+
+-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
+- * because using one of the divided clocks would usually mean the
+- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
+- *
+- * A divided clock could be good for high resolution timers, since
+- * 30.5 usec resolution can seem "low".
+- */
+ static u32 timer_clock;
+
+ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
+@@ -111,11 +104,12 @@
+ case CLOCK_EVT_MODE_PERIODIC:
+ clk_enable(tcd->clk);
+
+- /* slow clock, count up to RC, then irq and restart */
++ /* count up to RC, then irq and restart */
+ __raw_writel(timer_clock
+ | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ regs + ATMEL_TC_REG(2, CMR));
+- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
++ __raw_writel((tcd->freq + HZ / 2) / HZ,
++ tcaddr + ATMEL_TC_REG(2, RC));
+
+ /* Enable clock and interrupts on RC compare */
+ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+@@ -128,7 +122,7 @@
+ case CLOCK_EVT_MODE_ONESHOT:
+ clk_enable(tcd->clk);
+
+- /* slow clock, count up to RC, then irq and stop */
++ /* count up to RC, then irq and stop */
+ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
+ | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ regs + ATMEL_TC_REG(2, CMR));
+@@ -157,8 +151,12 @@
+ .name = "tc_clkevt",
+ .features = CLOCK_EVT_FEAT_PERIODIC
+ | CLOCK_EVT_FEAT_ONESHOT,
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ /* Should be lower than at91rm9200's system timer */
+ .rating = 125,
++#else
++ .rating = 200,
++#endif
+ .set_next_event = tc_next_event,
+ .set_mode = tc_mode,
+ },
+@@ -178,8 +176,9 @@
+ return IRQ_NONE;
+ }
+
+-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
++static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
+ {
++ unsigned divisor = atmel_tc_divisors[divisor_idx];
+ int ret;
+ struct clk *t2_clk = tc->clk[2];
+ int irq = tc->irq[2];
+@@ -193,7 +192,11 @@
+ clkevt.regs = tc->regs;
+ clkevt.clk = t2_clk;
+
+- timer_clock = clk32k_divisor_idx;
++ timer_clock = divisor_idx;
++ if (!divisor)
++ clkevt.freq = 32768;
++ else
++ clkevt.freq = clk_get_rate(t2_clk) / divisor;
+
+ clkevt.clkevt.cpumask = cpumask_of(0);
+
+@@ -203,7 +206,7 @@
+ return ret;
+ }
+
+- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
++ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
+
+ return ret;
+ }
+@@ -340,7 +343,11 @@
+ goto err_disable_t1;
+
+ /* channel 2: periodic and oneshot timer support */
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ ret = setup_clkevents(tc, clk32k_divisor_idx);
++#else
++ ret = setup_clkevents(tc, best_divisor_idx);
++#endif
+ if (ret)
+ goto err_unregister_clksrc;
+
+diff -Nur linux-3.18.8.orig/drivers/clocksource/timer-atmel-pit.c linux-3.18.8/drivers/clocksource/timer-atmel-pit.c
+--- linux-3.18.8.orig/drivers/clocksource/timer-atmel-pit.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/clocksource/timer-atmel-pit.c 2015-03-03 08:05:17.000000000 +0100
+@@ -90,6 +90,7 @@
+ return elapsed;
+ }
+
++static struct irqaction at91sam926x_pit_irq;
+ /*
+ * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
+ */
+@@ -100,6 +101,8 @@
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
++ /* Set up irq handler */
++ setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
+ /* update clocksource counter */
+ data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
+ pit_write(data->base, AT91_PIT_MR,
+@@ -113,6 +116,7 @@
+ /* disable irq, leaving the clocksource active */
+ pit_write(data->base, AT91_PIT_MR,
+ (data->cycle - 1) | AT91_PIT_PITEN);
++ remove_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+diff -Nur linux-3.18.8.orig/drivers/gpio/gpio-omap.c linux-3.18.8/drivers/gpio/gpio-omap.c
+--- linux-3.18.8.orig/drivers/gpio/gpio-omap.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/gpio/gpio-omap.c 2015-03-03 08:05:17.000000000 +0100
+@@ -57,7 +57,7 @@
+ u32 saved_datain;
+ u32 level_mask;
+ u32 toggle_mask;
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ struct gpio_chip chip;
+ struct clk *dbck;
+ u32 mod_usage;
+@@ -503,19 +503,19 @@
+ (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
+ return -EINVAL;
+
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ offset = GPIO_INDEX(bank, gpio);
+ retval = omap_set_gpio_triggering(bank, offset, type);
+ if (!LINE_USED(bank->mod_usage, offset)) {
+ omap_enable_gpio_module(bank, offset);
+ omap_set_gpio_direction(bank, offset, 1);
+ } else if (!omap_gpio_is_input(bank, BIT(offset))) {
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+ return -EINVAL;
+ }
+
+ bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+@@ -633,14 +633,14 @@
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ if (enable)
+ bank->context.wake_en |= gpio_bit;
+ else
+ bank->context.wake_en &= ~gpio_bit;
+
+ writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+ }
+@@ -675,7 +675,7 @@
+ if (!BANK_USED(bank))
+ pm_runtime_get_sync(bank->dev);
+
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ /* Set trigger to none. You need to enable the desired trigger with
+ * request_irq() or set_irq_type(). Only do this if the IRQ line has
+ * not already been requested.
+@@ -685,7 +685,7 @@
+ omap_enable_gpio_module(bank, offset);
+ }
+ bank->mod_usage |= BIT(offset);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+ }
+@@ -695,11 +695,11 @@
+ struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
+ unsigned long flags;
+
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ bank->mod_usage &= ~(BIT(offset));
+ omap_disable_gpio_module(bank, offset);
+ omap_reset_gpio(bank, bank->chip.base + offset);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ /*
+ * If this is the last gpio to be freed in the bank,
+@@ -799,12 +799,12 @@
+ unsigned long flags;
+ unsigned offset = GPIO_INDEX(bank, gpio);
+
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ gpio_unlock_as_irq(&bank->chip, offset);
+ bank->irq_usage &= ~(BIT(offset));
+ omap_disable_gpio_module(bank, offset);
+ omap_reset_gpio(bank, gpio);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ /*
+ * If this is the last IRQ to be freed in the bank,
+@@ -828,10 +828,10 @@
+ unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
+ unsigned long flags;
+
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ omap_set_gpio_irqenable(bank, gpio, 0);
+ omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+ }
+
+ static void omap_gpio_unmask_irq(struct irq_data *d)
+@@ -842,7 +842,7 @@
+ u32 trigger = irqd_get_trigger_type(d);
+ unsigned long flags;
+
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ if (trigger)
+ omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
+
+@@ -854,7 +854,7 @@
+ }
+
+ omap_set_gpio_irqenable(bank, gpio, 1);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+ }
+
+ /*---------------------------------------------------------------------*/
+@@ -867,9 +867,9 @@
+ OMAP_MPUIO_GPIO_MASKIT / bank->stride;
+ unsigned long flags;
+
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+ }
+@@ -882,9 +882,9 @@
+ OMAP_MPUIO_GPIO_MASKIT / bank->stride;
+ unsigned long flags;
+
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ writel_relaxed(bank->context.wake_en, mask_reg);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+ }
+@@ -930,9 +930,9 @@
+
+ bank = container_of(chip, struct gpio_bank, chip);
+ reg = bank->base + bank->regs->direction;
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ dir = !!(readl_relaxed(reg) & BIT(offset));
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+ return dir;
+ }
+
+@@ -942,9 +942,9 @@
+ unsigned long flags;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ omap_set_gpio_direction(bank, offset, 1);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+ }
+
+@@ -968,10 +968,10 @@
+ unsigned long flags;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ bank->set_dataout(bank, offset, value);
+ omap_set_gpio_direction(bank, offset, 0);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+ }
+
+@@ -983,9 +983,9 @@
+
+ bank = container_of(chip, struct gpio_bank, chip);
+
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ omap2_set_gpio_debounce(bank, offset, debounce);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+ }
+@@ -996,9 +996,9 @@
+ unsigned long flags;
+
+ bank = container_of(chip, struct gpio_bank, chip);
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+ bank->set_dataout(bank, offset, value);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+ }
+
+ /*---------------------------------------------------------------------*/
+@@ -1223,7 +1223,7 @@
+ else
+ bank->set_dataout = omap_set_gpio_dataout_mask;
+
+- spin_lock_init(&bank->lock);
++ raw_spin_lock_init(&bank->lock);
+
+ /* Static mapping, never released */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+@@ -1270,7 +1270,7 @@
+ unsigned long flags;
+ u32 wake_low, wake_hi;
+
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+
+ /*
+ * Only edges can generate a wakeup event to the PRCM.
+@@ -1323,7 +1323,7 @@
+ bank->get_context_loss_count(bank->dev);
+
+ omap_gpio_dbck_disable(bank);
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+ }
+@@ -1338,7 +1338,7 @@
+ unsigned long flags;
+ int c;
+
+- spin_lock_irqsave(&bank->lock, flags);
++ raw_spin_lock_irqsave(&bank->lock, flags);
+
+ /*
+ * On the first resume during the probe, the context has not
+@@ -1374,14 +1374,14 @@
+ if (c != bank->context_loss_count) {
+ omap_gpio_restore_context(bank);
+ } else {
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+ }
+ }
+ }
+
+ if (!bank->workaround_enabled) {
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+ return 0;
+ }
+
+@@ -1436,7 +1436,7 @@
+ }
+
+ bank->workaround_enabled = false;
+- spin_unlock_irqrestore(&bank->lock, flags);
++ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+ }
+diff -Nur linux-3.18.8.orig/drivers/gpu/drm/i915/i915_gem.c linux-3.18.8/drivers/gpu/drm/i915/i915_gem.c
+--- linux-3.18.8.orig/drivers/gpu/drm/i915/i915_gem.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/gpu/drm/i915/i915_gem.c 2015-03-03 08:05:17.000000000 +0100
+@@ -5144,7 +5144,7 @@
+ if (!mutex_is_locked(mutex))
+ return false;
+
+-#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
++#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && !defined(CONFIG_PREEMPT_RT_BASE)
+ return mutex->owner == task;
+ #else
+ /* Since UP may be pre-empted, we cannot assume that we own the lock */
+diff -Nur linux-3.18.8.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.18.8/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+--- linux-3.18.8.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-03-03 08:05:17.000000000 +0100
+@@ -1170,7 +1170,9 @@
+ return ret;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_BASE
+ trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
++#endif
+
+ i915_gem_execbuffer_move_to_active(vmas, ring);
+ i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+diff -Nur linux-3.18.8.orig/drivers/i2c/busses/i2c-omap.c linux-3.18.8/drivers/i2c/busses/i2c-omap.c
+--- linux-3.18.8.orig/drivers/i2c/busses/i2c-omap.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/i2c/busses/i2c-omap.c 2015-03-03 08:05:17.000000000 +0100
+@@ -875,15 +875,12 @@
+ u16 mask;
+ u16 stat;
+
+- spin_lock(&dev->lock);
+- mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
+ stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
++ mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
+
+ if (stat & mask)
+ ret = IRQ_WAKE_THREAD;
+
+- spin_unlock(&dev->lock);
+-
+ return ret;
+ }
+
+diff -Nur linux-3.18.8.orig/drivers/ide/alim15x3.c linux-3.18.8/drivers/ide/alim15x3.c
+--- linux-3.18.8.orig/drivers/ide/alim15x3.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/ide/alim15x3.c 2015-03-03 08:05:17.000000000 +0100
+@@ -234,7 +234,7 @@
+
+ isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ if (m5229_revision < 0xC2) {
+ /*
+@@ -325,7 +325,7 @@
+ }
+ pci_dev_put(north);
+ pci_dev_put(isa_dev);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ return 0;
+ }
+
+diff -Nur linux-3.18.8.orig/drivers/ide/hpt366.c linux-3.18.8/drivers/ide/hpt366.c
+--- linux-3.18.8.orig/drivers/ide/hpt366.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/ide/hpt366.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1241,7 +1241,7 @@
+
+ dma_old = inb(base + 2);
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ dma_new = dma_old;
+ pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
+@@ -1252,7 +1252,7 @@
+ if (dma_new != dma_old)
+ outb(dma_new, base + 2);
+
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
+ hwif->name, base, base + 7);
+diff -Nur linux-3.18.8.orig/drivers/ide/ide-io.c linux-3.18.8/drivers/ide/ide-io.c
+--- linux-3.18.8.orig/drivers/ide/ide-io.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/ide/ide-io.c 2015-03-03 08:05:18.000000000 +0100
+@@ -659,7 +659,7 @@
+ /* disable_irq_nosync ?? */
+ disable_irq(hwif->irq);
+ /* local CPU only, as if we were handling an interrupt */
+- local_irq_disable();
++ local_irq_disable_nort();
+ if (hwif->polling) {
+ startstop = handler(drive);
+ } else if (drive_is_ready(drive)) {
+diff -Nur linux-3.18.8.orig/drivers/ide/ide-iops.c linux-3.18.8/drivers/ide/ide-iops.c
+--- linux-3.18.8.orig/drivers/ide/ide-iops.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/ide/ide-iops.c 2015-03-03 08:05:18.000000000 +0100
+@@ -129,12 +129,12 @@
+ if ((stat & ATA_BUSY) == 0)
+ break;
+
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ *rstat = stat;
+ return -EBUSY;
+ }
+ }
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ /*
+ * Allow status to settle, then read it again.
+diff -Nur linux-3.18.8.orig/drivers/ide/ide-io-std.c linux-3.18.8/drivers/ide/ide-io-std.c
+--- linux-3.18.8.orig/drivers/ide/ide-io-std.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/ide/ide-io-std.c 2015-03-03 08:05:18.000000000 +0100
+@@ -175,7 +175,7 @@
+ unsigned long uninitialized_var(flags);
+
+ if ((io_32bit & 2) && !mmio) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ ata_vlb_sync(io_ports->nsect_addr);
+ }
+
+@@ -186,7 +186,7 @@
+ insl(data_addr, buf, words);
+
+ if ((io_32bit & 2) && !mmio)
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ if (((len + 1) & 3) < 2)
+ return;
+@@ -219,7 +219,7 @@
+ unsigned long uninitialized_var(flags);
+
+ if ((io_32bit & 2) && !mmio) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ ata_vlb_sync(io_ports->nsect_addr);
+ }
+
+@@ -230,7 +230,7 @@
+ outsl(data_addr, buf, words);
+
+ if ((io_32bit & 2) && !mmio)
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ if (((len + 1) & 3) < 2)
+ return;
+diff -Nur linux-3.18.8.orig/drivers/ide/ide-probe.c linux-3.18.8/drivers/ide/ide-probe.c
+--- linux-3.18.8.orig/drivers/ide/ide-probe.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/ide/ide-probe.c 2015-03-03 08:05:18.000000000 +0100
+@@ -196,10 +196,10 @@
+ int bswap = 1;
+
+ /* local CPU only; some systems need this */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ /* read 512 bytes of id info */
+ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ drive->dev_flags |= IDE_DFLAG_ID_READ;
+ #ifdef DEBUG
+diff -Nur linux-3.18.8.orig/drivers/ide/ide-taskfile.c linux-3.18.8/drivers/ide/ide-taskfile.c
+--- linux-3.18.8.orig/drivers/ide/ide-taskfile.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/ide/ide-taskfile.c 2015-03-03 08:05:18.000000000 +0100
+@@ -250,7 +250,7 @@
+
+ page_is_high = PageHighMem(page);
+ if (page_is_high)
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ buf = kmap_atomic(page) + offset;
+
+@@ -271,7 +271,7 @@
+ kunmap_atomic(buf);
+
+ if (page_is_high)
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ len -= nr_bytes;
+ }
+@@ -414,7 +414,7 @@
+ }
+
+ if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
+- local_irq_disable();
++ local_irq_disable_nort();
+
+ ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
+
+diff -Nur linux-3.18.8.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-3.18.8/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+--- linux-3.18.8.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-03-03 08:05:18.000000000 +0100
+@@ -796,7 +796,7 @@
+
+ ipoib_mcast_stop_thread(dev, 0);
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ netif_addr_lock(dev);
+ spin_lock(&priv->lock);
+
+@@ -878,7 +878,7 @@
+
+ spin_unlock(&priv->lock);
+ netif_addr_unlock(dev);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ /* We have to cancel outside of the spinlock */
+ list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
+diff -Nur linux-3.18.8.orig/drivers/input/gameport/gameport.c linux-3.18.8/drivers/input/gameport/gameport.c
+--- linux-3.18.8.orig/drivers/input/gameport/gameport.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/input/gameport/gameport.c 2015-03-03 08:05:18.000000000 +0100
+@@ -124,12 +124,12 @@
+ tx = 1 << 30;
+
+ for(i = 0; i < 50; i++) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ GET_TIME(t1);
+ for (t = 0; t < 50; t++) gameport_read(gameport);
+ GET_TIME(t2);
+ GET_TIME(t3);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ udelay(i * 10);
+ if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
+ }
+@@ -148,11 +148,11 @@
+ tx = 1 << 30;
+
+ for(i = 0; i < 50; i++) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ rdtscl(t1);
+ for (t = 0; t < 50; t++) gameport_read(gameport);
+ rdtscl(t2);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ udelay(i * 10);
+ if (t2 - t1 < tx) tx = t2 - t1;
+ }
+diff -Nur linux-3.18.8.orig/drivers/leds/trigger/Kconfig linux-3.18.8/drivers/leds/trigger/Kconfig
+--- linux-3.18.8.orig/drivers/leds/trigger/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/leds/trigger/Kconfig 2015-03-03 08:05:18.000000000 +0100
+@@ -61,7 +61,7 @@
+
+ config LEDS_TRIGGER_CPU
+ bool "LED CPU Trigger"
+- depends on LEDS_TRIGGERS
++ depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE
+ help
+ This allows LEDs to be controlled by active CPUs. This shows
+ the active CPUs across an array of LEDs so you can see which
+diff -Nur linux-3.18.8.orig/drivers/md/bcache/Kconfig linux-3.18.8/drivers/md/bcache/Kconfig
+--- linux-3.18.8.orig/drivers/md/bcache/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/md/bcache/Kconfig 2015-03-03 08:05:18.000000000 +0100
+@@ -1,6 +1,7 @@
+
+ config BCACHE
+ tristate "Block device as cache"
++ depends on !PREEMPT_RT_FULL
+ ---help---
+ Allows a block device to be used as cache for other devices; uses
+ a btree for indexing and the layout is optimized for SSDs.
+diff -Nur linux-3.18.8.orig/drivers/md/dm.c linux-3.18.8/drivers/md/dm.c
+--- linux-3.18.8.orig/drivers/md/dm.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/md/dm.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1898,14 +1898,14 @@
+ if (map_request(ti, clone, md))
+ goto requeued;
+
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ spin_lock(q->queue_lock);
+ }
+
+ goto out;
+
+ requeued:
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ spin_lock(q->queue_lock);
+
+ delay_and_out:
+diff -Nur linux-3.18.8.orig/drivers/md/raid5.c linux-3.18.8/drivers/md/raid5.c
+--- linux-3.18.8.orig/drivers/md/raid5.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/md/raid5.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1649,8 +1649,9 @@
+ struct raid5_percpu *percpu;
+ unsigned long cpu;
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ percpu = per_cpu_ptr(conf->percpu, cpu);
++ spin_lock(&percpu->lock);
+ if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
+ ops_run_biofill(sh);
+ overlap_clear++;
+@@ -1702,7 +1703,8 @@
+ if (test_and_clear_bit(R5_Overlap, &dev->flags))
+ wake_up(&sh->raid_conf->wait_for_overlap);
+ }
+- put_cpu();
++ spin_unlock(&percpu->lock);
++ put_cpu_light();
+ }
+
+ static int grow_one_stripe(struct r5conf *conf, int hash)
+@@ -5707,6 +5709,7 @@
+ __func__, cpu);
+ break;
+ }
++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
+ }
+ put_online_cpus();
+
+diff -Nur linux-3.18.8.orig/drivers/md/raid5.h linux-3.18.8/drivers/md/raid5.h
+--- linux-3.18.8.orig/drivers/md/raid5.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/md/raid5.h 2015-03-03 08:05:18.000000000 +0100
+@@ -457,6 +457,7 @@
+ int recovery_disabled;
+ /* per cpu variables */
+ struct raid5_percpu {
++ spinlock_t lock; /* Protection for -RT */
+ struct page *spare_page; /* Used when checking P/Q in raid6 */
+ void *scribble; /* space for constructing buffer
+ * lists and performing address
+diff -Nur linux-3.18.8.orig/drivers/misc/hwlat_detector.c linux-3.18.8/drivers/misc/hwlat_detector.c
+--- linux-3.18.8.orig/drivers/misc/hwlat_detector.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/drivers/misc/hwlat_detector.c 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,1240 @@
++/*
++ * hwlat_detector.c - A simple Hardware Latency detector.
++ *
++ * Use this module to detect large system latencies induced by the behavior of
++ * certain underlying system hardware or firmware, independent of Linux itself.
++ * The code was developed originally to detect the presence of SMIs on Intel
++ * and AMD systems, although there is no dependency upon x86 herein.
++ *
++ * The classical example usage of this module is in detecting the presence of
++ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
++ * somewhat special form of hardware interrupt spawned from earlier CPU debug
++ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
++ * LPC (or other device) to generate a special interrupt under certain
++ * circumstances, for example, upon expiration of a special SMI timer device,
++ * due to certain external thermal readings, on certain I/O address accesses,
++ * and other situations. An SMI hits a special CPU pin, triggers a special
++ * SMI mode (complete with special memory map), and the OS is unaware.
++ *
++ * Although certain hardware-inducing latencies are necessary (for example,
++ * a modern system often requires an SMI handler for correct thermal control
++ * and remote management) they can wreak havoc upon any OS-level performance
++ * guarantees toward low-latency, especially when the OS is not even made
++ * aware of the presence of these interrupts. For this reason, we need a
++ * somewhat brute force mechanism to detect these interrupts. In this case,
++ * we do it by hogging all of the CPU(s) for configurable timer intervals,
++ * sampling the built-in CPU timer, looking for discontiguous readings.
++ *
++ * WARNING: This implementation necessarily introduces latencies. Therefore,
++ * you should NEVER use this module in a production environment
++ * requiring any kind of low-latency performance guarantee(s).
++ *
++ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
++ *
++ * Includes useful feedback from Clark Williams <clark@redhat.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/ring_buffer.h>
++#include <linux/time.h>
++#include <linux/hrtimer.h>
++#include <linux/kthread.h>
++#include <linux/debugfs.h>
++#include <linux/seq_file.h>
++#include <linux/uaccess.h>
++#include <linux/version.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/trace_clock.h>
++
++#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */
++#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */
++#define U64STR_SIZE 22 /* 20 digits max */
++
++#define VERSION "1.0.0"
++#define BANNER "hwlat_detector: "
++#define DRVNAME "hwlat_detector"
++#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */
++#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
++#define DEFAULT_LAT_THRESHOLD 10 /* 10us */
++
++/* Module metadata */
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jon Masters <jcm@redhat.com>");
++MODULE_DESCRIPTION("A simple hardware latency detector");
++MODULE_VERSION(VERSION);
++
++/* Module parameters */
++
++static int debug;
++static int enabled;
++static int threshold;
++
++module_param(debug, int, 0); /* enable debug */
++module_param(enabled, int, 0); /* enable detector */
++module_param(threshold, int, 0); /* latency threshold */
++
++/* Buffering and sampling */
++
++static struct ring_buffer *ring_buffer; /* sample buffer */
++static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */
++static unsigned long buf_size = BUF_SIZE_DEFAULT;
++static struct task_struct *kthread; /* sampling thread */
++
++/* DebugFS filesystem entries */
++
++static struct dentry *debug_dir; /* debugfs directory */
++static struct dentry *debug_max; /* maximum TSC delta */
++static struct dentry *debug_count; /* total detect count */
++static struct dentry *debug_sample_width; /* sample width us */
++static struct dentry *debug_sample_window; /* sample window us */
++static struct dentry *debug_sample; /* raw samples us */
++static struct dentry *debug_threshold; /* threshold us */
++static struct dentry *debug_enable; /* enable/disable */
++
++/* Individual samples and global state */
++
++struct sample; /* latency sample */
++struct data; /* Global state */
++
++/* Sampling functions */
++static int __buffer_add_sample(struct sample *sample);
++static struct sample *buffer_get_sample(struct sample *sample);
++
++/* Threading and state */
++static int kthread_fn(void *unused);
++static int start_kthread(void);
++static int stop_kthread(void);
++static void __reset_stats(void);
++static int init_stats(void);
++
++/* Debugfs interface */
++static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos, const u64 *entry);
++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
++ size_t cnt, loff_t *ppos, u64 *entry);
++static int debug_sample_fopen(struct inode *inode, struct file *filp);
++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos);
++static int debug_sample_release(struct inode *inode, struct file *filp);
++static int debug_enable_fopen(struct inode *inode, struct file *filp);
++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos);
++static ssize_t debug_enable_fwrite(struct file *file,
++ const char __user *user_buffer,
++ size_t user_size, loff_t *offset);
++
++/* Initialization functions */
++static int init_debugfs(void);
++static void free_debugfs(void);
++static int detector_init(void);
++static void detector_exit(void);
++
++/* Individual latency samples are stored here when detected and packed into
++ * the ring_buffer circular buffer, where they are overwritten when
++ * more than buf_size/sizeof(sample) samples are received. */
++struct sample {
++ u64 seqnum; /* unique sequence */
++ u64 duration; /* ktime delta */
++ u64 outer_duration; /* ktime delta (outer loop) */
++ struct timespec timestamp; /* wall time */
++ unsigned long lost;
++};
++
++/* keep the global state somewhere. */
++static struct data {
++
++ struct mutex lock; /* protect changes */
++
++ u64 count; /* total since reset */
++ u64 max_sample; /* max hardware latency */
++ u64 threshold; /* sample threshold level */
++
++ u64 sample_window; /* total sampling window (on+off) */
++ u64 sample_width; /* active sampling portion of window */
++
++ atomic_t sample_open; /* whether the sample file is open */
++
++ wait_queue_head_t wq; /* waitqeue for new sample values */
++
++} data;
++
++/**
++ * __buffer_add_sample - add a new latency sample recording to the ring buffer
++ * @sample: The new latency sample value
++ *
++ * This receives a new latency sample and records it in a global ring buffer.
++ * No additional locking is used in this case.
++ */
++static int __buffer_add_sample(struct sample *sample)
++{
++ return ring_buffer_write(ring_buffer,
++ sizeof(struct sample), sample);
++}
++
++/**
++ * buffer_get_sample - remove a hardware latency sample from the ring buffer
++ * @sample: Pre-allocated storage for the sample
++ *
++ * This retrieves a hardware latency sample from the global circular buffer
++ */
++static struct sample *buffer_get_sample(struct sample *sample)
++{
++ struct ring_buffer_event *e = NULL;
++ struct sample *s = NULL;
++ unsigned int cpu = 0;
++
++ if (!sample)
++ return NULL;
++
++ mutex_lock(&ring_buffer_mutex);
++ for_each_online_cpu(cpu) {
++ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost);
++ if (e)
++ break;
++ }
++
++ if (e) {
++ s = ring_buffer_event_data(e);
++ memcpy(sample, s, sizeof(struct sample));
++ } else
++ sample = NULL;
++ mutex_unlock(&ring_buffer_mutex);
++
++ return sample;
++}
++
++#ifndef CONFIG_TRACING
++#define time_type ktime_t
++#define time_get() ktime_get()
++#define time_to_us(x) ktime_to_us(x)
++#define time_sub(a, b) ktime_sub(a, b)
++#define init_time(a, b) (a).tv64 = b
++#define time_u64(a) ((a).tv64)
++#else
++#define time_type u64
++#define time_get() trace_clock_local()
++#define time_to_us(x) div_u64(x, 1000)
++#define time_sub(a, b) ((a) - (b))
++#define init_time(a, b) (a = b)
++#define time_u64(a) a
++#endif
++/**
++ * get_sample - sample the CPU TSC and look for likely hardware latencies
++ *
++ * Used to repeatedly capture the CPU TSC (or similar), looking for potential
++ * hardware-induced latency. Called with interrupts disabled and with
++ * data.lock held.
++ */
++static int get_sample(void)
++{
++ time_type start, t1, t2, last_t2;
++ s64 diff, total = 0;
++ u64 sample = 0;
++ u64 outer_sample = 0;
++ int ret = -1;
++
++ init_time(last_t2, 0);
++ start = time_get(); /* start timestamp */
++
++ do {
++
++ t1 = time_get(); /* we'll look for a discontinuity */
++ t2 = time_get();
++
++ if (time_u64(last_t2)) {
++ /* Check the delta from outer loop (t2 to next t1) */
++ diff = time_to_us(time_sub(t1, last_t2));
++ /* This shouldn't happen */
++ if (diff < 0) {
++ pr_err(BANNER "time running backwards\n");
++ goto out;
++ }
++ if (diff > outer_sample)
++ outer_sample = diff;
++ }
++ last_t2 = t2;
++
++ total = time_to_us(time_sub(t2, start)); /* sample width */
++
++ /* This checks the inner loop (t1 to t2) */
++ diff = time_to_us(time_sub(t2, t1)); /* current diff */
++
++ /* This shouldn't happen */
++ if (diff < 0) {
++ pr_err(BANNER "time running backwards\n");
++ goto out;
++ }
++
++ if (diff > sample)
++ sample = diff; /* only want highest value */
++
++ } while (total <= data.sample_width);
++
++ ret = 0;
++
++ /* If we exceed the threshold value, we have found a hardware latency */
++ if (sample > data.threshold || outer_sample > data.threshold) {
++ struct sample s;
++
++ ret = 1;
++
++ data.count++;
++ s.seqnum = data.count;
++ s.duration = sample;
++ s.outer_duration = outer_sample;
++ s.timestamp = CURRENT_TIME;
++ __buffer_add_sample(&s);
++
++ /* Keep a running maximum ever recorded hardware latency */
++ if (sample > data.max_sample)
++ data.max_sample = sample;
++ }
++
++out:
++ return ret;
++}
++
++/*
++ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
++ * @unused: A required part of the kthread API.
++ *
++ * Used to periodically sample the CPU TSC via a call to get_sample. We
++ * disable interrupts, which does (intentionally) introduce latency since we
++ * need to ensure nothing else might be running (and thus pre-empting).
++ * Obviously this should never be used in production environments.
++ *
++ * Currently this runs on which ever CPU it was scheduled on, but most
++ * real-worald hardware latency situations occur across several CPUs,
++ * but we might later generalize this if we find there are any actualy
++ * systems with alternate SMI delivery or other hardware latencies.
++ */
++static int kthread_fn(void *unused)
++{
++ int ret;
++ u64 interval;
++
++ while (!kthread_should_stop()) {
++
++ mutex_lock(&data.lock);
++
++ local_irq_disable();
++ ret = get_sample();
++ local_irq_enable();
++
++ if (ret > 0)
++ wake_up(&data.wq); /* wake up reader(s) */
++
++ interval = data.sample_window - data.sample_width;
++ do_div(interval, USEC_PER_MSEC); /* modifies interval value */
++
++ mutex_unlock(&data.lock);
++
++ if (msleep_interruptible(interval))
++ break;
++ }
++
++ return 0;
++}
++
++/**
++ * start_kthread - Kick off the hardware latency sampling/detector kthread
++ *
++ * This starts a kernel thread that will sit and sample the CPU timestamp
++ * counter (TSC or similar) and look for potential hardware latencies.
++ */
++static int start_kthread(void)
++{
++ kthread = kthread_run(kthread_fn, NULL,
++ DRVNAME);
++ if (IS_ERR(kthread)) {
++ pr_err(BANNER "could not start sampling thread\n");
++ enabled = 0;
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++/**
++ * stop_kthread - Inform the hardware latency samping/detector kthread to stop
++ *
++ * This kicks the running hardware latency sampling/detector kernel thread and
++ * tells it to stop sampling now. Use this on unload and at system shutdown.
++ */
++static int stop_kthread(void)
++{
++ int ret;
++
++ ret = kthread_stop(kthread);
++
++ return ret;
++}
++
++/**
++ * __reset_stats - Reset statistics for the hardware latency detector
++ *
++ * We use data to store various statistics and global state. We call this
++ * function in order to reset those when "enable" is toggled on or off, and
++ * also at initialization. Should be called with data.lock held.
++ */
++static void __reset_stats(void)
++{
++ data.count = 0;
++ data.max_sample = 0;
++ ring_buffer_reset(ring_buffer); /* flush out old sample entries */
++}
++
++/**
++ * init_stats - Setup global state statistics for the hardware latency detector
++ *
++ * We use data to store various statistics and global state. We also use
++ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware
++ * induced system latencies. This function initializes these structures and
++ * allocates the global ring buffer also.
++ */
++static int init_stats(void)
++{
++ int ret = -ENOMEM;
++
++ mutex_init(&data.lock);
++ init_waitqueue_head(&data.wq);
++ atomic_set(&data.sample_open, 0);
++
++ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS);
++
++ if (WARN(!ring_buffer, KERN_ERR BANNER
++ "failed to allocate ring buffer!\n"))
++ goto out;
++
++ __reset_stats();
++ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */
++ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
++ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */
++
++ ret = 0;
++
++out:
++ return ret;
++
++}
++
++/*
++ * simple_data_read - Wrapper read function for global state debugfs entries
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ * @entry: The entry to read from
++ *
++ * This function provides a generic read implementation for the global state
++ * "data" structure debugfs filesystem entries. It would be nice to use
++ * simple_attr_read directly, but we need to make sure that the data.lock
++ * is held during the actual read.
++ */
++static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos, const u64 *entry)
++{
++ char buf[U64STR_SIZE];
++ u64 val = 0;
++ int len = 0;
++
++ memset(buf, 0, sizeof(buf));
++
++ if (!entry)
++ return -EFAULT;
++
++ mutex_lock(&data.lock);
++ val = *entry;
++ mutex_unlock(&data.lock);
++
++ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val);
++
++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
++
++}
++
++/*
++ * simple_data_write - Wrapper write function for global state debugfs entries
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to write value from
++ * @cnt: The maximum number of bytes to write
++ * @ppos: The current "file" position
++ * @entry: The entry to write to
++ *
++ * This function provides a generic write implementation for the global state
++ * "data" structure debugfs filesystem entries. It would be nice to use
++ * simple_attr_write directly, but we need to make sure that the data.lock
++ * is held during the actual write.
++ */
++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
++ size_t cnt, loff_t *ppos, u64 *entry)
++{
++ char buf[U64STR_SIZE];
++ int csize = min(cnt, sizeof(buf));
++ u64 val = 0;
++ int err = 0;
++
++ memset(buf, '\0', sizeof(buf));
++ if (copy_from_user(buf, ubuf, csize))
++ return -EFAULT;
++
++ buf[U64STR_SIZE-1] = '\0'; /* just in case */
++ err = kstrtoull(buf, 10, &val);
++ if (err)
++ return -EINVAL;
++
++ mutex_lock(&data.lock);
++ *entry = val;
++ mutex_unlock(&data.lock);
++
++ return csize;
++}
++
++/**
++ * debug_count_fopen - Open function for "count" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "count" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_count_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_count_fread - Read function for "count" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "count" debugfs
++ * interface to the hardware latency detector. Can be used to read the
++ * number of latency readings exceeding the configured threshold since
++ * the detector was last reset (e.g. by writing a zero into "count").
++ */
++static ssize_t debug_count_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.count);
++}
++
++/**
++ * debug_count_fwrite - Write function for "count" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "count" debugfs
++ * interface to the hardware latency detector. Can be used to write a
++ * desired value, especially to zero the total count.
++ */
++static ssize_t debug_count_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ return simple_data_write(filp, ubuf, cnt, ppos, &data.count);
++}
++
++/**
++ * debug_enable_fopen - Dummy open function for "enable" debugfs interface
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "enable" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_enable_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_enable_fread - Read function for "enable" debugfs interface
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "enable" debugfs
++ * interface to the hardware latency detector. Can be used to determine
++ * whether the detector is currently enabled ("0\n" or "1\n" returned).
++ */
++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ char buf[4];
++
++ if ((cnt < sizeof(buf)) || (*ppos))
++ return 0;
++
++ buf[0] = enabled ? '1' : '0';
++ buf[1] = '\n';
++ buf[2] = '\0';
++ if (copy_to_user(ubuf, buf, strlen(buf)))
++ return -EFAULT;
++ return *ppos = strlen(buf);
++}
++
++/**
++ * debug_enable_fwrite - Write function for "enable" debugfs interface
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "enable" debugfs
++ * interface to the hardware latency detector. Can be used to enable or
++ * disable the detector, which will have the side-effect of possibly
++ * also resetting the global stats and kicking off the measuring
++ * kthread (on an enable) or the converse (upon a disable).
++ */
++static ssize_t debug_enable_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ char buf[4];
++ int csize = min(cnt, sizeof(buf));
++ long val = 0;
++ int err = 0;
++
++ memset(buf, '\0', sizeof(buf));
++ if (copy_from_user(buf, ubuf, csize))
++ return -EFAULT;
++
++ buf[sizeof(buf)-1] = '\0'; /* just in case */
++ err = kstrtoul(buf, 10, &val);
++ if (0 != err)
++ return -EINVAL;
++
++ if (val) {
++ if (enabled)
++ goto unlock;
++ enabled = 1;
++ __reset_stats();
++ if (start_kthread())
++ return -EFAULT;
++ } else {
++ if (!enabled)
++ goto unlock;
++ enabled = 0;
++ err = stop_kthread();
++ if (err) {
++ pr_err(BANNER "cannot stop kthread\n");
++ return -EFAULT;
++ }
++ wake_up(&data.wq); /* reader(s) should return */
++ }
++unlock:
++ return csize;
++}
++
++/**
++ * debug_max_fopen - Open function for "max" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "max" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_max_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_max_fread - Read function for "max" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "max" debugfs
++ * interface to the hardware latency detector. Can be used to determine
++ * the maximum latency value observed since it was last reset.
++ */
++static ssize_t debug_max_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample);
++}
++
++/**
++ * debug_max_fwrite - Write function for "max" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "max" debugfs
++ * interface to the hardware latency detector. Can be used to reset the
++ * maximum or set it to some other desired value - if, then, subsequent
++ * measurements exceed this value, the maximum will be updated.
++ */
++static ssize_t debug_max_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample);
++}
++
++
++/**
++ * debug_sample_fopen - An open function for "sample" debugfs interface
++ * @inode: The in-kernel inode representation of this debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function handles opening the "sample" file within the hardware
++ * latency detector debugfs directory interface. This file is used to read
++ * raw samples from the global ring_buffer and allows the user to see a
++ * running latency history. Can be opened blocking or non-blocking,
++ * affecting whether it behaves as a buffer read pipe, or does not.
++ * Implements simple locking to prevent multiple simultaneous use.
++ */
++static int debug_sample_fopen(struct inode *inode, struct file *filp)
++{
++ if (!atomic_add_unless(&data.sample_open, 1, 1))
++ return -EBUSY;
++ else
++ return 0;
++}
++
++/**
++ * debug_sample_fread - A read function for "sample" debugfs interface
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that will contain the samples read
++ * @cnt: The maximum bytes to read from the debugfs "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function handles reading from the "sample" file within the hardware
++ * latency detector debugfs directory interface. This file is used to read
++ * raw samples from the global ring_buffer and allows the user to see a
++ * running latency history. By default this will block pending a new
++ * value written into the sample buffer, unless there are already a
++ * number of value(s) waiting in the buffer, or the sample file was
++ * previously opened in a non-blocking mode of operation.
++ */
++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ int len = 0;
++ char buf[64];
++ struct sample *sample = NULL;
++
++ if (!enabled)
++ return 0;
++
++ sample = kzalloc(sizeof(struct sample), GFP_KERNEL);
++ if (!sample)
++ return -ENOMEM;
++
++ while (!buffer_get_sample(sample)) {
++
++ DEFINE_WAIT(wait);
++
++ if (filp->f_flags & O_NONBLOCK) {
++ len = -EAGAIN;
++ goto out;
++ }
++
++ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE);
++ schedule();
++ finish_wait(&data.wq, &wait);
++
++ if (signal_pending(current)) {
++ len = -EINTR;
++ goto out;
++ }
++
++ if (!enabled) { /* enable was toggled */
++ len = 0;
++ goto out;
++ }
++ }
++
++ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n",
++ sample->timestamp.tv_sec,
++ sample->timestamp.tv_nsec,
++ sample->duration,
++ sample->outer_duration);
++
++
++ /* handling partial reads is more trouble than it's worth */
++ if (len > cnt)
++ goto out;
++
++ if (copy_to_user(ubuf, buf, len))
++ len = -EFAULT;
++
++out:
++ kfree(sample);
++ return len;
++}
++
++/**
++ * debug_sample_release - Release function for "sample" debugfs interface
++ * @inode: The in-kernel inode represenation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function completes the close of the debugfs interface "sample" file.
++ * Frees the sample_open "lock" so that other users may open the interface.
++ */
++static int debug_sample_release(struct inode *inode, struct file *filp)
++{
++ atomic_dec(&data.sample_open);
++
++ return 0;
++}
++
++/**
++ * debug_threshold_fopen - Open function for "threshold" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "threshold" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_threshold_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_threshold_fread - Read function for "threshold" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "threshold" debugfs
++ * interface to the hardware latency detector. It can be used to determine
++ * the current threshold level at which a latency will be recorded in the
++ * global ring buffer, typically on the order of 10us.
++ */
++static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold);
++}
++
++/**
++ * debug_threshold_fwrite - Write function for "threshold" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "threshold" debugfs
++ * interface to the hardware latency detector. It can be used to configure
++ * the threshold level at which any subsequently detected latencies will
++ * be recorded into the global ring buffer.
++ */
++static ssize_t debug_threshold_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ int ret;
++
++ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold);
++
++ if (enabled)
++ wake_up_process(kthread);
++
++ return ret;
++}
++
++/**
++ * debug_width_fopen - Open function for "width" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "width" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_width_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_width_fread - Read function for "width" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "width" debugfs
++ * interface to the hardware latency detector. It can be used to determine
++ * for how many us of the total window us we will actively sample for any
++ * hardware-induced latecy periods. Obviously, it is not possible to
++ * sample constantly and have the system respond to a sample reader, or,
++ * worse, without having the system appear to have gone out to lunch.
++ */
++static ssize_t debug_width_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width);
++}
++
++/**
++ * debug_width_fwrite - Write function for "width" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "width" debugfs
++ * interface to the hardware latency detector. It can be used to configure
++ * for how many us of the total window us we will actively sample for any
++ * hardware-induced latency periods. Obviously, it is not possible to
++ * sample constantly and have the system respond to a sample reader, or,
++ * worse, without having the system appear to have gone out to lunch. It
++ * is enforced that width is less that the total window size.
++ */
++static ssize_t debug_width_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ char buf[U64STR_SIZE];
++ int csize = min(cnt, sizeof(buf));
++ u64 val = 0;
++ int err = 0;
++
++ memset(buf, '\0', sizeof(buf));
++ if (copy_from_user(buf, ubuf, csize))
++ return -EFAULT;
++
++ buf[U64STR_SIZE-1] = '\0'; /* just in case */
++ err = kstrtoull(buf, 10, &val);
++ if (0 != err)
++ return -EINVAL;
++
++ mutex_lock(&data.lock);
++ if (val < data.sample_window)
++ data.sample_width = val;
++ else {
++ mutex_unlock(&data.lock);
++ return -EINVAL;
++ }
++ mutex_unlock(&data.lock);
++
++ if (enabled)
++ wake_up_process(kthread);
++
++ return csize;
++}
++
++/**
++ * debug_window_fopen - Open function for "window" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "window" debugfs
++ * interface to the hardware latency detector. The window is the total time
++ * in us that will be considered one sample period. Conceptually, windows
++ * occur back-to-back and contain a sample width period during which
++ * actual sampling occurs.
++ */
++static int debug_window_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_window_fread - Read function for "window" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "window" debugfs
++ * interface to the hardware latency detector. The window is the total time
++ * in us that will be considered one sample period. Conceptually, windows
++ * occur back-to-back and contain a sample width period during which
++ * actual sampling occurs. Can be used to read the total window size.
++ */
++static ssize_t debug_window_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window);
++}
++
++/**
++ * debug_window_fwrite - Write function for "window" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "window" debufds
++ * interface to the hardware latency detetector. The window is the total time
++ * in us that will be considered one sample period. Conceptually, windows
++ * occur back-to-back and contain a sample width period during which
++ * actual sampling occurs. Can be used to write a new total window size. It
++ * is enfoced that any value written must be greater than the sample width
++ * size, or an error results.
++ */
++static ssize_t debug_window_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ char buf[U64STR_SIZE];
++ int csize = min(cnt, sizeof(buf));
++ u64 val = 0;
++ int err = 0;
++
++ memset(buf, '\0', sizeof(buf));
++ if (copy_from_user(buf, ubuf, csize))
++ return -EFAULT;
++
++ buf[U64STR_SIZE-1] = '\0'; /* just in case */
++ err = kstrtoull(buf, 10, &val);
++ if (0 != err)
++ return -EINVAL;
++
++ mutex_lock(&data.lock);
++ if (data.sample_width < val)
++ data.sample_window = val;
++ else {
++ mutex_unlock(&data.lock);
++ return -EINVAL;
++ }
++ mutex_unlock(&data.lock);
++
++ return csize;
++}
++
++/*
++ * Function pointers for the "count" debugfs file operations
++ */
++static const struct file_operations count_fops = {
++ .open = debug_count_fopen,
++ .read = debug_count_fread,
++ .write = debug_count_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "enable" debugfs file operations
++ */
++static const struct file_operations enable_fops = {
++ .open = debug_enable_fopen,
++ .read = debug_enable_fread,
++ .write = debug_enable_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "max" debugfs file operations
++ */
++static const struct file_operations max_fops = {
++ .open = debug_max_fopen,
++ .read = debug_max_fread,
++ .write = debug_max_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "sample" debugfs file operations
++ */
++static const struct file_operations sample_fops = {
++ .open = debug_sample_fopen,
++ .read = debug_sample_fread,
++ .release = debug_sample_release,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "threshold" debugfs file operations
++ */
++static const struct file_operations threshold_fops = {
++ .open = debug_threshold_fopen,
++ .read = debug_threshold_fread,
++ .write = debug_threshold_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "width" debugfs file operations
++ */
++static const struct file_operations width_fops = {
++ .open = debug_width_fopen,
++ .read = debug_width_fread,
++ .write = debug_width_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "window" debugfs file operations
++ */
++static const struct file_operations window_fops = {
++ .open = debug_window_fopen,
++ .read = debug_window_fread,
++ .write = debug_window_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/**
++ * init_debugfs - A function to initialize the debugfs interface files
++ *
++ * This function creates entries in debugfs for "hwlat_detector", including
++ * files to read values from the detector, current samples, and the
++ * maximum sample that has been captured since the hardware latency
++ * dectector was started.
++ */
++static int init_debugfs(void)
++{
++ int ret = -ENOMEM;
++
++ debug_dir = debugfs_create_dir(DRVNAME, NULL);
++ if (!debug_dir)
++ goto err_debug_dir;
++
++ debug_sample = debugfs_create_file("sample", 0444,
++ debug_dir, NULL,
++ &sample_fops);
++ if (!debug_sample)
++ goto err_sample;
++
++ debug_count = debugfs_create_file("count", 0444,
++ debug_dir, NULL,
++ &count_fops);
++ if (!debug_count)
++ goto err_count;
++
++ debug_max = debugfs_create_file("max", 0444,
++ debug_dir, NULL,
++ &max_fops);
++ if (!debug_max)
++ goto err_max;
++
++ debug_sample_window = debugfs_create_file("window", 0644,
++ debug_dir, NULL,
++ &window_fops);
++ if (!debug_sample_window)
++ goto err_window;
++
++ debug_sample_width = debugfs_create_file("width", 0644,
++ debug_dir, NULL,
++ &width_fops);
++ if (!debug_sample_width)
++ goto err_width;
++
++ debug_threshold = debugfs_create_file("threshold", 0644,
++ debug_dir, NULL,
++ &threshold_fops);
++ if (!debug_threshold)
++ goto err_threshold;
++
++ debug_enable = debugfs_create_file("enable", 0644,
++ debug_dir, &enabled,
++ &enable_fops);
++ if (!debug_enable)
++ goto err_enable;
++
++ else {
++ ret = 0;
++ goto out;
++ }
++
++err_enable:
++ debugfs_remove(debug_threshold);
++err_threshold:
++ debugfs_remove(debug_sample_width);
++err_width:
++ debugfs_remove(debug_sample_window);
++err_window:
++ debugfs_remove(debug_max);
++err_max:
++ debugfs_remove(debug_count);
++err_count:
++ debugfs_remove(debug_sample);
++err_sample:
++ debugfs_remove(debug_dir);
++err_debug_dir:
++out:
++ return ret;
++}
++
++/**
++ * free_debugfs - A function to cleanup the debugfs file interface
++ */
++static void free_debugfs(void)
++{
++ /* could also use a debugfs_remove_recursive */
++ debugfs_remove(debug_enable);
++ debugfs_remove(debug_threshold);
++ debugfs_remove(debug_sample_width);
++ debugfs_remove(debug_sample_window);
++ debugfs_remove(debug_max);
++ debugfs_remove(debug_count);
++ debugfs_remove(debug_sample);
++ debugfs_remove(debug_dir);
++}
++
++/**
++ * detector_init - Standard module initialization code
++ */
++static int detector_init(void)
++{
++ int ret = -ENOMEM;
++
++ pr_info(BANNER "version %s\n", VERSION);
++
++ ret = init_stats();
++ if (0 != ret)
++ goto out;
++
++ ret = init_debugfs();
++ if (0 != ret)
++ goto err_stats;
++
++ if (enabled)
++ ret = start_kthread();
++
++ goto out;
++
++err_stats:
++ ring_buffer_free(ring_buffer);
++out:
++ return ret;
++
++}
++
++/**
++ * detector_exit - Standard module cleanup code
++ */
++static void detector_exit(void)
++{
++ int err;
++
++ if (enabled) {
++ enabled = 0;
++ err = stop_kthread();
++ if (err)
++ pr_err(BANNER "cannot stop kthread\n");
++ }
++
++ free_debugfs();
++ ring_buffer_free(ring_buffer); /* free up the ring buffer */
++
++}
++
++module_init(detector_init);
++module_exit(detector_exit);
+diff -Nur linux-3.18.8.orig/drivers/misc/Kconfig linux-3.18.8/drivers/misc/Kconfig
+--- linux-3.18.8.orig/drivers/misc/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/misc/Kconfig 2015-03-03 08:05:18.000000000 +0100
+@@ -54,6 +54,7 @@
+ config ATMEL_TCLIB
+ bool "Atmel AT32/AT91 Timer/Counter Library"
+ depends on (AVR32 || ARCH_AT91)
++ default y if PREEMPT_RT_FULL
+ help
+ Select this if you want a library to allocate the Timer/Counter
+ blocks found on many Atmel processors. This facilitates using
+@@ -69,8 +70,7 @@
+ are combined to make a single 32-bit timer.
+
+ When GENERIC_CLOCKEVENTS is defined, the third timer channel
+- may be used as a clock event device supporting oneshot mode
+- (delays of up to two seconds) based on the 32 KiHz clock.
++ may be used as a clock event device supporting oneshot mode.
+
+ config ATMEL_TCB_CLKSRC_BLOCK
+ int
+@@ -84,6 +84,15 @@
+ TC can be used for other purposes, such as PWM generation and
+ interval timing.
+
++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
++ bool "TC Block use 32 KiHz clock"
++ depends on ATMEL_TCB_CLKSRC
++ default y if !PREEMPT_RT_FULL
++ help
++ Select this to use 32 KiHz base clock rate as TC block clock
++ source for clock events.
++
++
+ config DUMMY_IRQ
+ tristate "Dummy IRQ handler"
+ default n
+@@ -113,6 +122,35 @@
+ for information on the specific driver level and support statement
+ for your IBM server.
+
++config HWLAT_DETECTOR
++ tristate "Testing module to detect hardware-induced latencies"
++ depends on DEBUG_FS
++ depends on RING_BUFFER
++ default m
++ ---help---
++ A simple hardware latency detector. Use this module to detect
++ large latencies introduced by the behavior of the underlying
++ system firmware external to Linux. We do this using periodic
++ use of stop_machine to grab all available CPUs and measure
++ for unexplainable gaps in the CPU timestamp counter(s). By
++ default, the module is not enabled until the "enable" file
++ within the "hwlat_detector" debugfs directory is toggled.
++
++ This module is often used to detect SMI (System Management
++ Interrupts) on x86 systems, though is not x86 specific. To
++ this end, we default to using a sample window of 1 second,
++ during which we will sample for 0.5 seconds. If an SMI or
++ similar event occurs during that time, it is recorded
++ into an 8K samples global ring buffer until retreived.
++
++ WARNING: This software should never be enabled (it can be built
++ but should not be turned on after it is loaded) in a production
++ environment where high latencies are a concern since the
++ sampling mechanism actually introduces latencies for
++ regular tasks while the CPU(s) are being held.
++
++ If unsure, say N
++
+ config PHANTOM
+ tristate "Sensable PHANToM (PCI)"
+ depends on PCI
+diff -Nur linux-3.18.8.orig/drivers/misc/Makefile linux-3.18.8/drivers/misc/Makefile
+--- linux-3.18.8.orig/drivers/misc/Makefile 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/misc/Makefile 2015-03-03 08:05:18.000000000 +0100
+@@ -38,6 +38,7 @@
+ obj-$(CONFIG_HMC6352) += hmc6352.o
+ obj-y += eeprom/
+ obj-y += cb710/
++obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o
+ obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
+ obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
+ obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
+diff -Nur linux-3.18.8.orig/drivers/mmc/host/mmci.c linux-3.18.8/drivers/mmc/host/mmci.c
+--- linux-3.18.8.orig/drivers/mmc/host/mmci.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/mmc/host/mmci.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1153,15 +1153,12 @@
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ struct variant_data *variant = host->variant;
+ void __iomem *base = host->base;
+- unsigned long flags;
+ u32 status;
+
+ status = readl(base + MMCISTATUS);
+
+ dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
+
+- local_irq_save(flags);
+-
+ do {
+ unsigned int remain, len;
+ char *buffer;
+@@ -1201,8 +1198,6 @@
+
+ sg_miter_stop(sg_miter);
+
+- local_irq_restore(flags);
+-
+ /*
+ * If we have less than the fifo 'half-full' threshold to transfer,
+ * trigger a PIO interrupt as soon as any data is available.
+diff -Nur linux-3.18.8.orig/drivers/net/ethernet/3com/3c59x.c linux-3.18.8/drivers/net/ethernet/3com/3c59x.c
+--- linux-3.18.8.orig/drivers/net/ethernet/3com/3c59x.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/net/ethernet/3com/3c59x.c 2015-03-03 08:05:18.000000000 +0100
+@@ -842,9 +842,9 @@
+ {
+ struct vortex_private *vp = netdev_priv(dev);
+ unsigned long flags;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ #endif
+
+@@ -1916,12 +1916,12 @@
+ * Block interrupts because vortex_interrupt does a bare spin_lock()
+ */
+ unsigned long flags;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (vp->full_bus_master_tx)
+ boomerang_interrupt(dev->irq, dev);
+ else
+ vortex_interrupt(dev->irq, dev);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ }
+
+diff -Nur linux-3.18.8.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-3.18.8/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+--- linux-3.18.8.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-03-03 08:05:18.000000000 +0100
+@@ -2213,11 +2213,7 @@
+ }
+
+ tpd_req = atl1c_cal_tpd_req(skb);
+- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
+- if (netif_msg_pktdata(adapter))
+- dev_info(&adapter->pdev->dev, "tx locked\n");
+- return NETDEV_TX_LOCKED;
+- }
++ spin_lock_irqsave(&adapter->tx_lock, flags);
+
+ if (atl1c_tpd_avail(adapter, type) < tpd_req) {
+ /* no enough descriptor, just stop queue */
+diff -Nur linux-3.18.8.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-3.18.8/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+--- linux-3.18.8.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1880,8 +1880,7 @@
+ return NETDEV_TX_OK;
+ }
+ tpd_req = atl1e_cal_tdp_req(skb);
+- if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
+- return NETDEV_TX_LOCKED;
++ spin_lock_irqsave(&adapter->tx_lock, flags);
+
+ if (atl1e_tpd_avail(adapter) < tpd_req) {
+ /* no enough descriptor, just stop queue */
+diff -Nur linux-3.18.8.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-3.18.8/drivers/net/ethernet/chelsio/cxgb/sge.c
+--- linux-3.18.8.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1663,8 +1663,7 @@
+ struct cmdQ *q = &sge->cmdQ[qid];
+ unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
+
+- if (!spin_trylock(&q->lock))
+- return NETDEV_TX_LOCKED;
++ spin_lock(&q->lock);
+
+ reclaim_completed_tx(sge, q);
+
+diff -Nur linux-3.18.8.orig/drivers/net/ethernet/freescale/gianfar.c linux-3.18.8/drivers/net/ethernet/freescale/gianfar.c
+--- linux-3.18.8.orig/drivers/net/ethernet/freescale/gianfar.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/net/ethernet/freescale/gianfar.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1483,7 +1483,7 @@
+
+ if (netif_running(ndev)) {
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ lock_tx_qs(priv);
+
+ gfar_halt_nodisable(priv);
+@@ -1499,7 +1499,7 @@
+ gfar_write(&regs->maccfg1, tempval);
+
+ unlock_tx_qs(priv);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ disable_napi(priv);
+
+@@ -1541,7 +1541,7 @@
+ /* Disable Magic Packet mode, in case something
+ * else woke us up.
+ */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ lock_tx_qs(priv);
+
+ tempval = gfar_read(&regs->maccfg2);
+@@ -1551,7 +1551,7 @@
+ gfar_start(priv);
+
+ unlock_tx_qs(priv);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ netif_device_attach(ndev);
+
+@@ -3307,14 +3307,14 @@
+ dev->stats.tx_dropped++;
+ atomic64_inc(&priv->extra_stats.tx_underrun);
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ lock_tx_qs(priv);
+
+ /* Reactivate the Tx Queues */
+ gfar_write(&regs->tstat, gfargrp->tstat);
+
+ unlock_tx_qs(priv);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ netif_dbg(priv, tx_err, dev, "Transmit Error\n");
+ }
+diff -Nur linux-3.18.8.orig/drivers/net/ethernet/neterion/s2io.c linux-3.18.8/drivers/net/ethernet/neterion/s2io.c
+--- linux-3.18.8.orig/drivers/net/ethernet/neterion/s2io.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/net/ethernet/neterion/s2io.c 2015-03-03 08:05:18.000000000 +0100
+@@ -4084,12 +4084,7 @@
+ [skb->priority & (MAX_TX_FIFOS - 1)];
+ fifo = &mac_control->fifos[queue];
+
+- if (do_spin_lock)
+- spin_lock_irqsave(&fifo->tx_lock, flags);
+- else {
+- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
+- return NETDEV_TX_LOCKED;
+- }
++ spin_lock_irqsave(&fifo->tx_lock, flags);
+
+ if (sp->config.multiq) {
+ if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
+diff -Nur linux-3.18.8.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-3.18.8/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+--- linux-3.18.8.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-03-03 08:05:18.000000000 +0100
+@@ -2137,10 +2137,8 @@
+ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+ unsigned long flags;
+
+- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
+- /* Collision - tell upper layer to requeue */
+- return NETDEV_TX_LOCKED;
+- }
++ spin_lock_irqsave(&tx_ring->tx_lock, flags);
++
+ if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+diff -Nur linux-3.18.8.orig/drivers/net/ethernet/realtek/8139too.c linux-3.18.8/drivers/net/ethernet/realtek/8139too.c
+--- linux-3.18.8.orig/drivers/net/ethernet/realtek/8139too.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/net/ethernet/realtek/8139too.c 2015-03-03 08:05:18.000000000 +0100
+@@ -2215,7 +2215,7 @@
+ struct rtl8139_private *tp = netdev_priv(dev);
+ const int irq = tp->pci_dev->irq;
+
+- disable_irq(irq);
++ disable_irq_nosync(irq);
+ rtl8139_interrupt(irq, dev);
+ enable_irq(irq);
+ }
+diff -Nur linux-3.18.8.orig/drivers/net/ethernet/tehuti/tehuti.c linux-3.18.8/drivers/net/ethernet/tehuti/tehuti.c
+--- linux-3.18.8.orig/drivers/net/ethernet/tehuti/tehuti.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/net/ethernet/tehuti/tehuti.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1629,13 +1629,8 @@
+ unsigned long flags;
+
+ ENTER;
+- local_irq_save(flags);
+- if (!spin_trylock(&priv->tx_lock)) {
+- local_irq_restore(flags);
+- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
+- BDX_DRV_NAME, ndev->name);
+- return NETDEV_TX_LOCKED;
+- }
++
++ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ /* build tx descriptor */
+ BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
+diff -Nur linux-3.18.8.orig/drivers/net/rionet.c linux-3.18.8/drivers/net/rionet.c
+--- linux-3.18.8.orig/drivers/net/rionet.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/net/rionet.c 2015-03-03 08:05:18.000000000 +0100
+@@ -174,11 +174,7 @@
+ unsigned long flags;
+ int add_num = 1;
+
+- local_irq_save(flags);
+- if (!spin_trylock(&rnet->tx_lock)) {
+- local_irq_restore(flags);
+- return NETDEV_TX_LOCKED;
+- }
++ spin_lock_irqsave(&rnet->tx_lock, flags);
+
+ if (is_multicast_ether_addr(eth->h_dest))
+ add_num = nets[rnet->mport->id].nact;
+diff -Nur linux-3.18.8.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-3.18.8/drivers/net/wireless/orinoco/orinoco_usb.c
+--- linux-3.18.8.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/net/wireless/orinoco/orinoco_usb.c 2015-03-03 08:05:18.000000000 +0100
+@@ -699,7 +699,7 @@
+ while (!ctx->done.done && msecs--)
+ udelay(1000);
+ } else {
+- wait_event_interruptible(ctx->done.wait,
++ swait_event_interruptible(ctx->done.wait,
+ ctx->done.done);
+ }
+ break;
+diff -Nur linux-3.18.8.orig/drivers/pci/access.c linux-3.18.8/drivers/pci/access.c
+--- linux-3.18.8.orig/drivers/pci/access.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/pci/access.c 2015-03-03 08:05:18.000000000 +0100
+@@ -434,7 +434,7 @@
+ WARN_ON(!dev->block_cfg_access);
+
+ dev->block_cfg_access = 0;
+- wake_up_all(&pci_cfg_wait);
++ wake_up_all_locked(&pci_cfg_wait);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
+diff -Nur linux-3.18.8.orig/drivers/scsi/fcoe/fcoe.c linux-3.18.8/drivers/scsi/fcoe/fcoe.c
+--- linux-3.18.8.orig/drivers/scsi/fcoe/fcoe.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/scsi/fcoe/fcoe.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1286,7 +1286,7 @@
+ struct sk_buff *skb;
+ #ifdef CONFIG_SMP
+ struct fcoe_percpu_s *p0;
+- unsigned targ_cpu = get_cpu();
++ unsigned targ_cpu = get_cpu_light();
+ #endif /* CONFIG_SMP */
+
+ FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
+@@ -1342,7 +1342,7 @@
+ kfree_skb(skb);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+ }
+- put_cpu();
++ put_cpu_light();
+ #else
+ /*
+ * This a non-SMP scenario where the singular Rx thread is
+@@ -1566,11 +1566,11 @@
+ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
+ {
+ struct fcoe_percpu_s *fps;
+- int rc;
++ int rc, cpu = get_cpu_light();
+
+- fps = &get_cpu_var(fcoe_percpu);
++ fps = &per_cpu(fcoe_percpu, cpu);
+ rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
+- put_cpu_var(fcoe_percpu);
++ put_cpu_light();
+
+ return rc;
+ }
+@@ -1768,11 +1768,11 @@
+ return 0;
+ }
+
+- stats = per_cpu_ptr(lport->stats, get_cpu());
++ stats = per_cpu_ptr(lport->stats, get_cpu_light());
+ stats->InvalidCRCCount++;
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+- put_cpu();
++ put_cpu_light();
+ return -EINVAL;
+ }
+
+@@ -1848,13 +1848,13 @@
+ goto drop;
+
+ if (!fcoe_filter_frames(lport, fp)) {
+- put_cpu();
++ put_cpu_light();
+ fc_exch_recv(lport, fp);
+ return;
+ }
+ drop:
+ stats->ErrorFrames++;
+- put_cpu();
++ put_cpu_light();
+ kfree_skb(skb);
+ }
+
+diff -Nur linux-3.18.8.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.18.8/drivers/scsi/fcoe/fcoe_ctlr.c
+--- linux-3.18.8.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/scsi/fcoe/fcoe_ctlr.c 2015-03-03 08:05:18.000000000 +0100
+@@ -831,7 +831,7 @@
+
+ INIT_LIST_HEAD(&del_list);
+
+- stats = per_cpu_ptr(fip->lp->stats, get_cpu());
++ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light());
+
+ list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
+ deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
+@@ -867,7 +867,7 @@
+ sel_time = fcf->time;
+ }
+ }
+- put_cpu();
++ put_cpu_light();
+
+ list_for_each_entry_safe(fcf, next, &del_list, list) {
+ /* Removes fcf from current list */
+diff -Nur linux-3.18.8.orig/drivers/scsi/libfc/fc_exch.c linux-3.18.8/drivers/scsi/libfc/fc_exch.c
+--- linux-3.18.8.orig/drivers/scsi/libfc/fc_exch.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/scsi/libfc/fc_exch.c 2015-03-03 08:05:18.000000000 +0100
+@@ -816,10 +816,10 @@
+ }
+ memset(ep, 0, sizeof(*ep));
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ pool = per_cpu_ptr(mp->pool, cpu);
+ spin_lock_bh(&pool->lock);
+- put_cpu();
++ put_cpu_light();
+
+ /* peek cache of free slot */
+ if (pool->left != FC_XID_UNKNOWN) {
+diff -Nur linux-3.18.8.orig/drivers/scsi/libsas/sas_ata.c linux-3.18.8/drivers/scsi/libsas/sas_ata.c
+--- linux-3.18.8.orig/drivers/scsi/libsas/sas_ata.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/scsi/libsas/sas_ata.c 2015-03-03 08:05:18.000000000 +0100
+@@ -191,7 +191,7 @@
+ /* TODO: audit callers to ensure they are ready for qc_issue to
+ * unconditionally re-enable interrupts
+ */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ spin_unlock(ap->lock);
+
+ /* If the device fell off, no sense in issuing commands */
+@@ -261,7 +261,7 @@
+
+ out:
+ spin_lock(ap->lock);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ return ret;
+ }
+
+diff -Nur linux-3.18.8.orig/drivers/scsi/qla2xxx/qla_inline.h linux-3.18.8/drivers/scsi/qla2xxx/qla_inline.h
+--- linux-3.18.8.orig/drivers/scsi/qla2xxx/qla_inline.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/scsi/qla2xxx/qla_inline.h 2015-03-03 08:05:18.000000000 +0100
+@@ -59,12 +59,12 @@
+ {
+ unsigned long flags;
+ struct qla_hw_data *ha = rsp->hw;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (IS_P3P_TYPE(ha))
+ qla82xx_poll(0, rsp);
+ else
+ ha->isp_ops->intr_handler(0, rsp);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ static inline uint8_t *
+diff -Nur linux-3.18.8.orig/drivers/tty/serial/8250/8250_core.c linux-3.18.8/drivers/tty/serial/8250/8250_core.c
+--- linux-3.18.8.orig/drivers/tty/serial/8250/8250_core.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/tty/serial/8250/8250_core.c 2015-03-03 08:05:18.000000000 +0100
+@@ -37,6 +37,7 @@
+ #include <linux/nmi.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/kdb.h>
+ #include <linux/uaccess.h>
+ #include <linux/pm_runtime.h>
+ #ifdef CONFIG_SPARC
+@@ -81,7 +82,16 @@
+ #define DEBUG_INTR(fmt...) do { } while (0)
+ #endif
+
+-#define PASS_LIMIT 512
++/*
++ * On -rt we can have a more delays, and legitimately
++ * so - so don't drop work spuriously and spam the
++ * syslog:
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define PASS_LIMIT 1000000
++#else
++# define PASS_LIMIT 512
++#endif
+
+ #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+@@ -3198,7 +3208,7 @@
+
+ serial8250_rpm_get(up);
+
+- if (port->sysrq || oops_in_progress)
++ if (port->sysrq || oops_in_progress || in_kdb_printk())
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
+diff -Nur linux-3.18.8.orig/drivers/tty/serial/amba-pl011.c linux-3.18.8/drivers/tty/serial/amba-pl011.c
+--- linux-3.18.8.orig/drivers/tty/serial/amba-pl011.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/tty/serial/amba-pl011.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1935,13 +1935,19 @@
+
+ clk_enable(uap->clk);
+
+- local_irq_save(flags);
++ /*
++ * local_irq_save(flags);
++ *
++ * This local_irq_save() is nonsense. If we come in via sysrq
++ * handling then interrupts are already disabled. Aside of
++ * that the port.sysrq check is racy on SMP regardless.
++ */
+ if (uap->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&uap->port.lock);
++ locked = spin_trylock_irqsave(&uap->port.lock, flags);
+ else
+- spin_lock(&uap->port.lock);
++ spin_lock_irqsave(&uap->port.lock, flags);
+
+ /*
+ * First save the CR then disable the interrupts
+@@ -1963,8 +1969,7 @@
+ writew(old_cr, uap->port.membase + UART011_CR);
+
+ if (locked)
+- spin_unlock(&uap->port.lock);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&uap->port.lock, flags);
+
+ clk_disable(uap->clk);
+ }
+diff -Nur linux-3.18.8.orig/drivers/tty/serial/omap-serial.c linux-3.18.8/drivers/tty/serial/omap-serial.c
+--- linux-3.18.8.orig/drivers/tty/serial/omap-serial.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/tty/serial/omap-serial.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1270,13 +1270,10 @@
+
+ pm_runtime_get_sync(up->dev);
+
+- local_irq_save(flags);
+- if (up->port.sysrq)
+- locked = 0;
+- else if (oops_in_progress)
+- locked = spin_trylock(&up->port.lock);
++ if (up->port.sysrq || oops_in_progress)
++ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+- spin_lock(&up->port.lock);
++ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -1305,8 +1302,7 @@
+ pm_runtime_mark_last_busy(up->dev);
+ pm_runtime_put_autosuspend(up->dev);
+ if (locked)
+- spin_unlock(&up->port.lock);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&up->port.lock, flags);
+ }
+
+ static int __init
+diff -Nur linux-3.18.8.orig/drivers/usb/core/hcd.c linux-3.18.8/drivers/usb/core/hcd.c
+--- linux-3.18.8.orig/drivers/usb/core/hcd.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/usb/core/hcd.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1681,9 +1681,9 @@
+ * and no one may trigger the above deadlock situation when
+ * running complete() in tasklet.
+ */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ urb->complete(urb);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ usb_anchor_resume_wakeups(anchor);
+ atomic_dec(&urb->use_count);
+diff -Nur linux-3.18.8.orig/drivers/usb/gadget/function/f_fs.c linux-3.18.8/drivers/usb/gadget/function/f_fs.c
+--- linux-3.18.8.orig/drivers/usb/gadget/function/f_fs.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/usb/gadget/function/f_fs.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1428,7 +1428,7 @@
+ pr_info("%s(): freeing\n", __func__);
+ ffs_data_clear(ffs);
+ BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
+- waitqueue_active(&ffs->ep0req_completion.wait));
++ swaitqueue_active(&ffs->ep0req_completion.wait));
+ kfree(ffs->dev_name);
+ kfree(ffs);
+ }
+diff -Nur linux-3.18.8.orig/drivers/usb/gadget/legacy/inode.c linux-3.18.8/drivers/usb/gadget/legacy/inode.c
+--- linux-3.18.8.orig/drivers/usb/gadget/legacy/inode.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/drivers/usb/gadget/legacy/inode.c 2015-03-03 08:05:18.000000000 +0100
+@@ -339,7 +339,7 @@
+ spin_unlock_irq (&epdata->dev->lock);
+
+ if (likely (value == 0)) {
+- value = wait_event_interruptible (done.wait, done.done);
++ value = swait_event_interruptible (done.wait, done.done);
+ if (value != 0) {
+ spin_lock_irq (&epdata->dev->lock);
+ if (likely (epdata->ep != NULL)) {
+@@ -348,7 +348,7 @@
+ usb_ep_dequeue (epdata->ep, epdata->req);
+ spin_unlock_irq (&epdata->dev->lock);
+
+- wait_event (done.wait, done.done);
++ swait_event (done.wait, done.done);
+ if (epdata->status == -ECONNRESET)
+ epdata->status = -EINTR;
+ } else {
+diff -Nur linux-3.18.8.orig/fs/aio.c linux-3.18.8/fs/aio.c
+--- linux-3.18.8.orig/fs/aio.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/fs/aio.c 2015-03-03 08:05:18.000000000 +0100
+@@ -40,6 +40,7 @@
+ #include <linux/ramfs.h>
+ #include <linux/percpu-refcount.h>
+ #include <linux/mount.h>
++#include <linux/work-simple.h>
+
+ #include <asm/kmap_types.h>
+ #include <asm/uaccess.h>
+@@ -110,7 +111,7 @@
+ struct page **ring_pages;
+ long nr_pages;
+
+- struct work_struct free_work;
++ struct swork_event free_work;
+
+ /*
+ * signals when all in-flight requests are done
+@@ -226,6 +227,7 @@
+ .mount = aio_mount,
+ .kill_sb = kill_anon_super,
+ };
++ BUG_ON(swork_get());
+ aio_mnt = kern_mount(&aio_fs);
+ if (IS_ERR(aio_mnt))
+ panic("Failed to create aio fs mount.");
+@@ -505,9 +507,9 @@
+ return cancel(kiocb);
+ }
+
+-static void free_ioctx(struct work_struct *work)
++static void free_ioctx(struct swork_event *sev)
+ {
+- struct kioctx *ctx = container_of(work, struct kioctx, free_work);
++ struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
+
+ pr_debug("freeing %p\n", ctx);
+
+@@ -526,8 +528,8 @@
+ if (ctx->requests_done)
+ complete(ctx->requests_done);
+
+- INIT_WORK(&ctx->free_work, free_ioctx);
+- schedule_work(&ctx->free_work);
++ INIT_SWORK(&ctx->free_work, free_ioctx);
++ swork_queue(&ctx->free_work);
+ }
+
+ /*
+@@ -535,9 +537,9 @@
+ * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
+ * now it's safe to cancel any that need to be.
+ */
+-static void free_ioctx_users(struct percpu_ref *ref)
++static void free_ioctx_users_work(struct swork_event *sev)
+ {
+- struct kioctx *ctx = container_of(ref, struct kioctx, users);
++ struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
+ struct kiocb *req;
+
+ spin_lock_irq(&ctx->ctx_lock);
+@@ -556,6 +558,14 @@
+ percpu_ref_put(&ctx->reqs);
+ }
+
++static void free_ioctx_users(struct percpu_ref *ref)
++{
++ struct kioctx *ctx = container_of(ref, struct kioctx, users);
++
++ INIT_SWORK(&ctx->free_work, free_ioctx_users_work);
++ swork_queue(&ctx->free_work);
++}
++
+ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
+ {
+ unsigned i, new_nr;
+diff -Nur linux-3.18.8.orig/fs/autofs4/autofs_i.h linux-3.18.8/fs/autofs4/autofs_i.h
+--- linux-3.18.8.orig/fs/autofs4/autofs_i.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/fs/autofs4/autofs_i.h 2015-03-03 08:05:18.000000000 +0100
+@@ -34,6 +34,7 @@
+ #include <linux/sched.h>
+ #include <linux/mount.h>
+ #include <linux/namei.h>
++#include <linux/delay.h>
+ #include <asm/current.h>
+ #include <asm/uaccess.h>
+
+diff -Nur linux-3.18.8.orig/fs/autofs4/expire.c linux-3.18.8/fs/autofs4/expire.c
+--- linux-3.18.8.orig/fs/autofs4/expire.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/fs/autofs4/expire.c 2015-03-03 08:05:18.000000000 +0100
+@@ -151,7 +151,7 @@
+ parent = p->d_parent;
+ if (!spin_trylock(&parent->d_lock)) {
+ spin_unlock(&p->d_lock);
+- cpu_relax();
++ cpu_chill();
+ goto relock;
+ }
+ spin_unlock(&p->d_lock);
+diff -Nur linux-3.18.8.orig/fs/buffer.c linux-3.18.8/fs/buffer.c
+--- linux-3.18.8.orig/fs/buffer.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/fs/buffer.c 2015-03-03 08:05:18.000000000 +0100
+@@ -301,8 +301,7 @@
+ * decide that the page is now completely done.
+ */
+ first = page_buffers(page);
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++ flags = bh_uptodate_lock_irqsave(first);
+ clear_buffer_async_read(bh);
+ unlock_buffer(bh);
+ tmp = bh;
+@@ -315,8 +314,7 @@
+ }
+ tmp = tmp->b_this_page;
+ } while (tmp != bh);
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(first, flags);
+
+ /*
+ * If none of the buffers had errors and they are all
+@@ -328,9 +326,7 @@
+ return;
+
+ still_busy:
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
+- return;
++ bh_uptodate_unlock_irqrestore(first, flags);
+ }
+
+ /*
+@@ -358,8 +354,7 @@
+ }
+
+ first = page_buffers(page);
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++ flags = bh_uptodate_lock_irqsave(first);
+
+ clear_buffer_async_write(bh);
+ unlock_buffer(bh);
+@@ -371,15 +366,12 @@
+ }
+ tmp = tmp->b_this_page;
+ }
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(first, flags);
+ end_page_writeback(page);
+ return;
+
+ still_busy:
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
+- return;
++ bh_uptodate_unlock_irqrestore(first, flags);
+ }
+ EXPORT_SYMBOL(end_buffer_async_write);
+
+@@ -3325,6 +3317,7 @@
+ struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
+ if (ret) {
+ INIT_LIST_HEAD(&ret->b_assoc_buffers);
++ buffer_head_init_locks(ret);
+ preempt_disable();
+ __this_cpu_inc(bh_accounting.nr);
+ recalc_bh_state();
+diff -Nur linux-3.18.8.orig/fs/dcache.c linux-3.18.8/fs/dcache.c
+--- linux-3.18.8.orig/fs/dcache.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/fs/dcache.c 2015-03-03 08:05:18.000000000 +0100
+@@ -19,6 +19,7 @@
+ #include <linux/mm.h>
+ #include <linux/fs.h>
+ #include <linux/fsnotify.h>
++#include <linux/delay.h>
+ #include <linux/slab.h>
+ #include <linux/init.h>
+ #include <linux/hash.h>
+@@ -552,7 +553,7 @@
+
+ failed:
+ spin_unlock(&dentry->d_lock);
+- cpu_relax();
++ cpu_chill();
+ return dentry; /* try again with same dentry */
+ }
+
+@@ -2285,7 +2286,7 @@
+ if (dentry->d_lockref.count == 1) {
+ if (!spin_trylock(&inode->i_lock)) {
+ spin_unlock(&dentry->d_lock);
+- cpu_relax();
++ cpu_chill();
+ goto again;
+ }
+ dentry->d_flags &= ~DCACHE_CANT_MOUNT;
+diff -Nur linux-3.18.8.orig/fs/eventpoll.c linux-3.18.8/fs/eventpoll.c
+--- linux-3.18.8.orig/fs/eventpoll.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/fs/eventpoll.c 2015-03-03 08:05:18.000000000 +0100
+@@ -505,12 +505,12 @@
+ */
+ static void ep_poll_safewake(wait_queue_head_t *wq)
+ {
+- int this_cpu = get_cpu();
++ int this_cpu = get_cpu_light();
+
+ ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
+ ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
+
+- put_cpu();
++ put_cpu_light();
+ }
+
+ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
+diff -Nur linux-3.18.8.orig/fs/exec.c linux-3.18.8/fs/exec.c
+--- linux-3.18.8.orig/fs/exec.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/fs/exec.c 2015-03-03 08:05:18.000000000 +0100
+@@ -841,12 +841,14 @@
+ }
+ }
+ task_lock(tsk);
++ preempt_disable_rt();
+ active_mm = tsk->active_mm;
+ tsk->mm = mm;
+ tsk->active_mm = mm;
+ activate_mm(active_mm, mm);
+ tsk->mm->vmacache_seqnum = 0;
+ vmacache_flush(tsk);
++ preempt_enable_rt();
+ task_unlock(tsk);
+ if (old_mm) {
+ up_read(&old_mm->mmap_sem);
+diff -Nur linux-3.18.8.orig/fs/jbd/checkpoint.c linux-3.18.8/fs/jbd/checkpoint.c
+--- linux-3.18.8.orig/fs/jbd/checkpoint.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/fs/jbd/checkpoint.c 2015-03-03 08:05:18.000000000 +0100
+@@ -129,6 +129,8 @@
+ if (journal->j_flags & JFS_ABORT)
+ return;
+ spin_unlock(&journal->j_state_lock);
++ if (current->plug)
++ io_schedule();
+ mutex_lock(&journal->j_checkpoint_mutex);
+
+ /*
+diff -Nur linux-3.18.8.orig/fs/jbd2/checkpoint.c linux-3.18.8/fs/jbd2/checkpoint.c
+--- linux-3.18.8.orig/fs/jbd2/checkpoint.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/fs/jbd2/checkpoint.c 2015-03-03 08:05:18.000000000 +0100
+@@ -116,6 +116,8 @@
+ nblocks = jbd2_space_needed(journal);
+ while (jbd2_log_space_left(journal) < nblocks) {
+ write_unlock(&journal->j_state_lock);
++ if (current->plug)
++ io_schedule();
+ mutex_lock(&journal->j_checkpoint_mutex);
+
+ /*
+diff -Nur linux-3.18.8.orig/fs/namespace.c linux-3.18.8/fs/namespace.c
+--- linux-3.18.8.orig/fs/namespace.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/fs/namespace.c 2015-03-03 08:05:18.000000000 +0100
+@@ -14,6 +14,7 @@
+ #include <linux/mnt_namespace.h>
+ #include <linux/user_namespace.h>
+ #include <linux/namei.h>
++#include <linux/delay.h>
+ #include <linux/security.h>
+ #include <linux/idr.h>
+ #include <linux/init.h> /* init_rootfs */
+@@ -344,8 +345,11 @@
+ * incremented count after it has set MNT_WRITE_HOLD.
+ */
+ smp_mb();
+- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
+- cpu_relax();
++ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
++ preempt_enable();
++ cpu_chill();
++ preempt_disable();
++ }
+ /*
+ * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
+ * be set to match its requirements. So we must not load that until
+diff -Nur linux-3.18.8.orig/fs/ntfs/aops.c linux-3.18.8/fs/ntfs/aops.c
+--- linux-3.18.8.orig/fs/ntfs/aops.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/fs/ntfs/aops.c 2015-03-03 08:05:18.000000000 +0100
+@@ -107,8 +107,7 @@
+ "0x%llx.", (unsigned long long)bh->b_blocknr);
+ }
+ first = page_buffers(page);
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++ flags = bh_uptodate_lock_irqsave(first);
+ clear_buffer_async_read(bh);
+ unlock_buffer(bh);
+ tmp = bh;
+@@ -123,8 +122,7 @@
+ }
+ tmp = tmp->b_this_page;
+ } while (tmp != bh);
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(first, flags);
+ /*
+ * If none of the buffers had errors then we can set the page uptodate,
+ * but we first have to perform the post read mst fixups, if the
+@@ -145,13 +143,13 @@
+ recs = PAGE_CACHE_SIZE / rec_size;
+ /* Should have been verified before we got here... */
+ BUG_ON(!recs);
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ kaddr = kmap_atomic(page);
+ for (i = 0; i < recs; i++)
+ post_read_mst_fixup((NTFS_RECORD*)(kaddr +
+ i * rec_size), rec_size);
+ kunmap_atomic(kaddr);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ flush_dcache_page(page);
+ if (likely(page_uptodate && !PageError(page)))
+ SetPageUptodate(page);
+@@ -159,9 +157,7 @@
+ unlock_page(page);
+ return;
+ still_busy:
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
+- return;
++ bh_uptodate_unlock_irqrestore(first, flags);
+ }
+
+ /**
+diff -Nur linux-3.18.8.orig/fs/timerfd.c linux-3.18.8/fs/timerfd.c
+--- linux-3.18.8.orig/fs/timerfd.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/fs/timerfd.c 2015-03-03 08:05:18.000000000 +0100
+@@ -449,7 +449,10 @@
+ break;
+ }
+ spin_unlock_irq(&ctx->wqh.lock);
+- cpu_relax();
++ if (isalarm(ctx))
++ hrtimer_wait_for_timer(&ctx->t.alarm.timer);
++ else
++ hrtimer_wait_for_timer(&ctx->t.tmr);
+ }
+
+ /*
+diff -Nur linux-3.18.8.orig/include/acpi/platform/aclinux.h linux-3.18.8/include/acpi/platform/aclinux.h
+--- linux-3.18.8.orig/include/acpi/platform/aclinux.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/acpi/platform/aclinux.h 2015-03-03 08:05:18.000000000 +0100
+@@ -123,6 +123,7 @@
+
+ #define acpi_cache_t struct kmem_cache
+ #define acpi_spinlock spinlock_t *
++#define acpi_raw_spinlock raw_spinlock_t *
+ #define acpi_cpu_flags unsigned long
+
+ /* Use native linux version of acpi_os_allocate_zeroed */
+@@ -141,6 +142,20 @@
+ #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
+ #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
+
++#define acpi_os_create_raw_lock(__handle) \
++({ \
++ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
++ \
++ if (lock) { \
++ *(__handle) = lock; \
++ raw_spin_lock_init(*(__handle)); \
++ } \
++ lock ? AE_OK : AE_NO_MEMORY; \
++ })
++
++#define acpi_os_delete_raw_lock(__handle) kfree(__handle)
++
++
+ /*
+ * OSL interfaces used by debugger/disassembler
+ */
+diff -Nur linux-3.18.8.orig/include/asm-generic/bug.h linux-3.18.8/include/asm-generic/bug.h
+--- linux-3.18.8.orig/include/asm-generic/bug.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/asm-generic/bug.h 2015-03-03 08:05:18.000000000 +0100
+@@ -206,6 +206,20 @@
+ # define WARN_ON_SMP(x) ({0;})
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define BUG_ON_RT(c) BUG_ON(c)
++# define BUG_ON_NONRT(c) do { } while (0)
++# define WARN_ON_RT(condition) WARN_ON(condition)
++# define WARN_ON_NONRT(condition) do { } while (0)
++# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
++#else
++# define BUG_ON_RT(c) do { } while (0)
++# define BUG_ON_NONRT(c) BUG_ON(c)
++# define WARN_ON_RT(condition) do { } while (0)
++# define WARN_ON_NONRT(condition) WARN_ON(condition)
++# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
++#endif
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
+diff -Nur linux-3.18.8.orig/include/linux/blkdev.h linux-3.18.8/include/linux/blkdev.h
+--- linux-3.18.8.orig/include/linux/blkdev.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/blkdev.h 2015-03-03 08:05:18.000000000 +0100
+@@ -101,6 +101,7 @@
+ struct list_head queuelist;
+ union {
+ struct call_single_data csd;
++ struct work_struct work;
+ unsigned long fifo_time;
+ };
+
+@@ -478,7 +479,7 @@
+ struct throtl_data *td;
+ #endif
+ struct rcu_head rcu_head;
+- wait_queue_head_t mq_freeze_wq;
++ struct swait_head mq_freeze_wq;
+ struct percpu_ref mq_usage_counter;
+ struct list_head all_q_node;
+
+diff -Nur linux-3.18.8.orig/include/linux/blk-mq.h linux-3.18.8/include/linux/blk-mq.h
+--- linux-3.18.8.orig/include/linux/blk-mq.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/blk-mq.h 2015-03-03 08:05:18.000000000 +0100
+@@ -169,6 +169,7 @@
+
+ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
+ struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
++void __blk_mq_complete_request_remote_work(struct work_struct *work);
+
+ void blk_mq_start_request(struct request *rq);
+ void blk_mq_end_request(struct request *rq, int error);
+diff -Nur linux-3.18.8.orig/include/linux/bottom_half.h linux-3.18.8/include/linux/bottom_half.h
+--- linux-3.18.8.orig/include/linux/bottom_half.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/bottom_half.h 2015-03-03 08:05:18.000000000 +0100
+@@ -4,6 +4,17 @@
+ #include <linux/preempt.h>
+ #include <linux/preempt_mask.h>
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++extern void local_bh_disable(void);
++extern void _local_bh_enable(void);
++extern void local_bh_enable(void);
++extern void local_bh_enable_ip(unsigned long ip);
++extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
++extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
++
++#else
++
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+ #else
+@@ -31,5 +42,6 @@
+ {
+ __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+ }
++#endif
+
+ #endif /* _LINUX_BH_H */
+diff -Nur linux-3.18.8.orig/include/linux/buffer_head.h linux-3.18.8/include/linux/buffer_head.h
+--- linux-3.18.8.orig/include/linux/buffer_head.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/buffer_head.h 2015-03-03 08:05:18.000000000 +0100
+@@ -75,8 +75,52 @@
+ struct address_space *b_assoc_map; /* mapping this buffer is
+ associated with */
+ atomic_t b_count; /* users using this buffer_head */
++#ifdef CONFIG_PREEMPT_RT_BASE
++ spinlock_t b_uptodate_lock;
++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
++ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
++ spinlock_t b_state_lock;
++ spinlock_t b_journal_head_lock;
++#endif
++#endif
+ };
+
++static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
++{
++ unsigned long flags;
++
++#ifndef CONFIG_PREEMPT_RT_BASE
++ local_irq_save(flags);
++ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
++#else
++ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
++#endif
++ return flags;
++}
++
++static inline void
++bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
++{
++#ifndef CONFIG_PREEMPT_RT_BASE
++ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
++ local_irq_restore(flags);
++#else
++ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
++#endif
++}
++
++static inline void buffer_head_init_locks(struct buffer_head *bh)
++{
++#ifdef CONFIG_PREEMPT_RT_BASE
++ spin_lock_init(&bh->b_uptodate_lock);
++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
++ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
++ spin_lock_init(&bh->b_state_lock);
++ spin_lock_init(&bh->b_journal_head_lock);
++#endif
++#endif
++}
++
+ /*
+ * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
+ * and buffer_foo() functions.
+diff -Nur linux-3.18.8.orig/include/linux/cgroup.h linux-3.18.8/include/linux/cgroup.h
+--- linux-3.18.8.orig/include/linux/cgroup.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/cgroup.h 2015-03-03 08:05:18.000000000 +0100
+@@ -22,6 +22,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/kernfs.h>
+ #include <linux/wait.h>
++#include <linux/work-simple.h>
+
+ #ifdef CONFIG_CGROUPS
+
+@@ -91,6 +92,7 @@
+ /* percpu_ref killing and RCU release */
+ struct rcu_head rcu_head;
+ struct work_struct destroy_work;
++ struct swork_event destroy_swork;
+ };
+
+ /* bits in struct cgroup_subsys_state flags field */
+diff -Nur linux-3.18.8.orig/include/linux/completion.h linux-3.18.8/include/linux/completion.h
+--- linux-3.18.8.orig/include/linux/completion.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/completion.h 2015-03-03 08:05:18.000000000 +0100
+@@ -7,8 +7,7 @@
+ * Atomic wait-for-completion handler data structures.
+ * See kernel/sched/completion.c for details.
+ */
+-
+-#include <linux/wait.h>
++#include <linux/wait-simple.h>
+
+ /*
+ * struct completion - structure used to maintain state for a "completion"
+@@ -24,11 +23,11 @@
+ */
+ struct completion {
+ unsigned int done;
+- wait_queue_head_t wait;
++ struct swait_head wait;
+ };
+
+ #define COMPLETION_INITIALIZER(work) \
+- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
++ { 0, SWAIT_HEAD_INITIALIZER((work).wait) }
+
+ #define COMPLETION_INITIALIZER_ONSTACK(work) \
+ ({ init_completion(&work); work; })
+@@ -73,7 +72,7 @@
+ static inline void init_completion(struct completion *x)
+ {
+ x->done = 0;
+- init_waitqueue_head(&x->wait);
++ init_swait_head(&x->wait);
+ }
+
+ /**
+diff -Nur linux-3.18.8.orig/include/linux/cpu.h linux-3.18.8/include/linux/cpu.h
+--- linux-3.18.8.orig/include/linux/cpu.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/cpu.h 2015-03-03 08:05:18.000000000 +0100
+@@ -217,6 +217,8 @@
+ extern void put_online_cpus(void);
+ extern void cpu_hotplug_disable(void);
+ extern void cpu_hotplug_enable(void);
++extern void pin_current_cpu(void);
++extern void unpin_current_cpu(void);
+ #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
+ #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
+ #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
+@@ -235,6 +237,8 @@
+ #define put_online_cpus() do { } while (0)
+ #define cpu_hotplug_disable() do { } while (0)
+ #define cpu_hotplug_enable() do { } while (0)
++static inline void pin_current_cpu(void) { }
++static inline void unpin_current_cpu(void) { }
+ #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+ #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+ /* These aren't inline functions due to a GCC bug. */
+diff -Nur linux-3.18.8.orig/include/linux/delay.h linux-3.18.8/include/linux/delay.h
+--- linux-3.18.8.orig/include/linux/delay.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/delay.h 2015-03-03 08:05:18.000000000 +0100
+@@ -52,4 +52,10 @@
+ msleep(seconds * 1000);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++extern void cpu_chill(void);
++#else
++# define cpu_chill() cpu_relax()
++#endif
++
+ #endif /* defined(_LINUX_DELAY_H) */
+diff -Nur linux-3.18.8.orig/include/linux/ftrace_event.h linux-3.18.8/include/linux/ftrace_event.h
+--- linux-3.18.8.orig/include/linux/ftrace_event.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/ftrace_event.h 2015-03-03 08:05:18.000000000 +0100
+@@ -61,6 +61,9 @@
+ unsigned char flags;
+ unsigned char preempt_count;
+ int pid;
++ unsigned short migrate_disable;
++ unsigned short padding;
++ unsigned char preempt_lazy_count;
+ };
+
+ #define FTRACE_MAX_EVENT \
+diff -Nur linux-3.18.8.orig/include/linux/highmem.h linux-3.18.8/include/linux/highmem.h
+--- linux-3.18.8.orig/include/linux/highmem.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/highmem.h 2015-03-03 08:05:18.000000000 +0100
+@@ -7,6 +7,7 @@
+ #include <linux/mm.h>
+ #include <linux/uaccess.h>
+ #include <linux/hardirq.h>
++#include <linux/sched.h>
+
+ #include <asm/cacheflush.h>
+
+@@ -85,32 +86,51 @@
+
+ #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ DECLARE_PER_CPU(int, __kmap_atomic_idx);
++#endif
+
+ static inline int kmap_atomic_idx_push(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
+
+-#ifdef CONFIG_DEBUG_HIGHMEM
++# ifdef CONFIG_DEBUG_HIGHMEM
+ WARN_ON_ONCE(in_irq() && !irqs_disabled());
+ BUG_ON(idx >= KM_TYPE_NR);
+-#endif
++# endif
+ return idx;
++#else
++ current->kmap_idx++;
++ BUG_ON(current->kmap_idx > KM_TYPE_NR);
++ return current->kmap_idx - 1;
++#endif
+ }
+
+ static inline int kmap_atomic_idx(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ return __this_cpu_read(__kmap_atomic_idx) - 1;
++#else
++ return current->kmap_idx - 1;
++#endif
+ }
+
+ static inline void kmap_atomic_idx_pop(void)
+ {
+-#ifdef CONFIG_DEBUG_HIGHMEM
++#ifndef CONFIG_PREEMPT_RT_FULL
++# ifdef CONFIG_DEBUG_HIGHMEM
+ int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+
+ BUG_ON(idx < 0);
+-#else
++# else
+ __this_cpu_dec(__kmap_atomic_idx);
++# endif
++#else
++ current->kmap_idx--;
++# ifdef CONFIG_DEBUG_HIGHMEM
++ BUG_ON(current->kmap_idx < 0);
++# endif
+ #endif
+ }
+
+diff -Nur linux-3.18.8.orig/include/linux/hrtimer.h linux-3.18.8/include/linux/hrtimer.h
+--- linux-3.18.8.orig/include/linux/hrtimer.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/hrtimer.h 2015-03-03 08:05:18.000000000 +0100
+@@ -111,6 +111,11 @@
+ enum hrtimer_restart (*function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ unsigned long state;
++ struct list_head cb_entry;
++ int irqsafe;
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ ktime_t praecox;
++#endif
+ #ifdef CONFIG_TIMER_STATS
+ int start_pid;
+ void *start_site;
+@@ -147,6 +152,7 @@
+ int index;
+ clockid_t clockid;
+ struct timerqueue_head active;
++ struct list_head expired;
+ ktime_t resolution;
+ ktime_t (*get_time)(void);
+ ktime_t softirq_time;
+@@ -192,6 +198,9 @@
+ unsigned long nr_hangs;
+ ktime_t max_hang_time;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++ wait_queue_head_t wait;
++#endif
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+ };
+
+@@ -379,6 +388,13 @@
+ return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+ }
+
++/* Softirq preemption could deadlock timer removal */
++#ifdef CONFIG_PREEMPT_RT_BASE
++ extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
++#else
++# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
++#endif
++
+ /* Query timers: */
+ extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
+ extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
+diff -Nur linux-3.18.8.orig/include/linux/idr.h linux-3.18.8/include/linux/idr.h
+--- linux-3.18.8.orig/include/linux/idr.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/idr.h 2015-03-03 08:05:18.000000000 +0100
+@@ -95,10 +95,14 @@
+ * Each idr_preload() should be matched with an invocation of this
+ * function. See idr_preload() for details.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++void idr_preload_end(void);
++#else
+ static inline void idr_preload_end(void)
+ {
+ preempt_enable();
+ }
++#endif
+
+ /**
+ * idr_find - return pointer for given id
+diff -Nur linux-3.18.8.orig/include/linux/init_task.h linux-3.18.8/include/linux/init_task.h
+--- linux-3.18.8.orig/include/linux/init_task.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/init_task.h 2015-03-03 08:05:18.000000000 +0100
+@@ -147,9 +147,16 @@
+ # define INIT_PERF_EVENTS(tsk)
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define INIT_TIMER_LIST .posix_timer_list = NULL,
++#else
++# define INIT_TIMER_LIST
++#endif
++
+ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+ # define INIT_VTIME(tsk) \
+- .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
++ .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \
++ .vtime_seq = SEQCNT_ZERO(tsk.vtime_seq), \
+ .vtime_snap = 0, \
+ .vtime_snap_whence = VTIME_SYS,
+ #else
+@@ -219,6 +226,7 @@
+ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
++ INIT_TIMER_LIST \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
+diff -Nur linux-3.18.8.orig/include/linux/interrupt.h linux-3.18.8/include/linux/interrupt.h
+--- linux-3.18.8.orig/include/linux/interrupt.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/interrupt.h 2015-03-03 08:05:18.000000000 +0100
+@@ -57,6 +57,7 @@
+ * IRQF_NO_THREAD - Interrupt cannot be threaded
+ * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
+ * resume time.
++ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
+ */
+ #define IRQF_DISABLED 0x00000020
+ #define IRQF_SHARED 0x00000080
+@@ -70,6 +71,7 @@
+ #define IRQF_FORCE_RESUME 0x00008000
+ #define IRQF_NO_THREAD 0x00010000
+ #define IRQF_EARLY_RESUME 0x00020000
++#define IRQF_NO_SOFTIRQ_CALL 0x00080000
+
+ #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
+
+@@ -180,7 +182,7 @@
+ #ifdef CONFIG_LOCKDEP
+ # define local_irq_enable_in_hardirq() do { } while (0)
+ #else
+-# define local_irq_enable_in_hardirq() local_irq_enable()
++# define local_irq_enable_in_hardirq() local_irq_enable_nort()
+ #endif
+
+ extern void disable_irq_nosync(unsigned int irq);
+@@ -210,6 +212,7 @@
+ unsigned int irq;
+ struct kref kref;
+ struct work_struct work;
++ struct list_head list;
+ void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
+ void (*release)(struct kref *ref);
+ };
+@@ -358,9 +361,13 @@
+
+
+ #ifdef CONFIG_IRQ_FORCED_THREADING
++# ifndef CONFIG_PREEMPT_RT_BASE
+ extern bool force_irqthreads;
++# else
++# define force_irqthreads (true)
++# endif
+ #else
+-#define force_irqthreads (0)
++#define force_irqthreads (false)
+ #endif
+
+ #ifndef __ARCH_SET_SOFTIRQ_PENDING
+@@ -416,9 +423,10 @@
+ void (*action)(struct softirq_action *);
+ };
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
+-
++static inline void thread_do_softirq(void) { do_softirq(); }
+ #ifdef __ARCH_HAS_DO_SOFTIRQ
+ void do_softirq_own_stack(void);
+ #else
+@@ -427,6 +435,9 @@
+ __do_softirq();
+ }
+ #endif
++#else
++extern void thread_do_softirq(void);
++#endif
+
+ extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+ extern void softirq_init(void);
+@@ -434,6 +445,7 @@
+
+ extern void raise_softirq_irqoff(unsigned int nr);
+ extern void raise_softirq(unsigned int nr);
++extern void softirq_check_pending_idle(void);
+
+ DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+
+@@ -455,8 +467,9 @@
+ to be executed on some cpu at least once after this.
+ * If the tasklet is already scheduled, but its execution is still not
+ started, it will be executed only once.
+- * If this tasklet is already running on another CPU (or schedule is called
+- from tasklet itself), it is rescheduled for later.
++ * If this tasklet is already running on another CPU, it is rescheduled
++ for later.
++ * Schedule must not be called from the tasklet itself (a lockup occurs)
+ * Tasklet is strictly serialized wrt itself, but not
+ wrt another tasklets. If client needs some intertask synchronization,
+ he makes it with spinlocks.
+@@ -481,27 +494,36 @@
+ enum
+ {
+ TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
+- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
++ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
++ TASKLET_STATE_PENDING /* Tasklet is pending */
+ };
+
+-#ifdef CONFIG_SMP
++#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
++#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
++#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
++
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ static inline int tasklet_trylock(struct tasklet_struct *t)
+ {
+ return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+
++static inline int tasklet_tryunlock(struct tasklet_struct *t)
++{
++ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
++}
++
+ static inline void tasklet_unlock(struct tasklet_struct *t)
+ {
+ smp_mb__before_atomic();
+ clear_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+
+-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+-{
+- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+-}
++extern void tasklet_unlock_wait(struct tasklet_struct *t);
++
+ #else
+ #define tasklet_trylock(t) 1
++#define tasklet_tryunlock(t) 1
+ #define tasklet_unlock_wait(t) do { } while (0)
+ #define tasklet_unlock(t) do { } while (0)
+ #endif
+@@ -550,17 +572,8 @@
+ smp_mb();
+ }
+
+-static inline void tasklet_enable(struct tasklet_struct *t)
+-{
+- smp_mb__before_atomic();
+- atomic_dec(&t->count);
+-}
+-
+-static inline void tasklet_hi_enable(struct tasklet_struct *t)
+-{
+- smp_mb__before_atomic();
+- atomic_dec(&t->count);
+-}
++extern void tasklet_enable(struct tasklet_struct *t);
++extern void tasklet_hi_enable(struct tasklet_struct *t);
+
+ extern void tasklet_kill(struct tasklet_struct *t);
+ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
+@@ -592,6 +605,12 @@
+ tasklet_kill(&ttimer->tasklet);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++extern void softirq_early_init(void);
++#else
++static inline void softirq_early_init(void) { }
++#endif
++
+ /*
+ * Autoprobing for irqs:
+ *
+diff -Nur linux-3.18.8.orig/include/linux/irqdesc.h linux-3.18.8/include/linux/irqdesc.h
+--- linux-3.18.8.orig/include/linux/irqdesc.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/irqdesc.h 2015-03-03 08:05:18.000000000 +0100
+@@ -63,6 +63,7 @@
+ unsigned int irqs_unhandled;
+ atomic_t threads_handled;
+ int threads_handled_last;
++ u64 random_ip;
+ raw_spinlock_t lock;
+ struct cpumask *percpu_enabled;
+ #ifdef CONFIG_SMP
+diff -Nur linux-3.18.8.orig/include/linux/irqflags.h linux-3.18.8/include/linux/irqflags.h
+--- linux-3.18.8.orig/include/linux/irqflags.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/irqflags.h 2015-03-03 08:05:18.000000000 +0100
+@@ -25,8 +25,6 @@
+ # define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
+ # define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
+ # define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
+-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
+-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
+ # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
+ #else
+ # define trace_hardirqs_on() do { } while (0)
+@@ -39,9 +37,15 @@
+ # define trace_softirqs_enabled(p) 0
+ # define trace_hardirq_enter() do { } while (0)
+ # define trace_hardirq_exit() do { } while (0)
++# define INIT_TRACE_IRQFLAGS
++#endif
++
++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
++# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
++# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
++#else
+ # define lockdep_softirq_enter() do { } while (0)
+ # define lockdep_softirq_exit() do { } while (0)
+-# define INIT_TRACE_IRQFLAGS
+ #endif
+
+ #if defined(CONFIG_IRQSOFF_TRACER) || \
+@@ -147,4 +151,23 @@
+
+ #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
+
++/*
++ * local_irq* variants depending on RT/!RT
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define local_irq_disable_nort() do { } while (0)
++# define local_irq_enable_nort() do { } while (0)
++# define local_irq_save_nort(flags) local_save_flags(flags)
++# define local_irq_restore_nort(flags) (void)(flags)
++# define local_irq_disable_rt() local_irq_disable()
++# define local_irq_enable_rt() local_irq_enable()
++#else
++# define local_irq_disable_nort() local_irq_disable()
++# define local_irq_enable_nort() local_irq_enable()
++# define local_irq_save_nort(flags) local_irq_save(flags)
++# define local_irq_restore_nort(flags) local_irq_restore(flags)
++# define local_irq_disable_rt() do { } while (0)
++# define local_irq_enable_rt() do { } while (0)
++#endif
++
+ #endif
+diff -Nur linux-3.18.8.orig/include/linux/irq.h linux-3.18.8/include/linux/irq.h
+--- linux-3.18.8.orig/include/linux/irq.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/irq.h 2015-03-03 08:05:18.000000000 +0100
+@@ -73,6 +73,7 @@
+ * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
+ * it from the spurious interrupt detection
+ * mechanism and from core side polling.
++ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
+ */
+ enum {
+ IRQ_TYPE_NONE = 0x00000000,
+@@ -98,13 +99,14 @@
+ IRQ_NOTHREAD = (1 << 16),
+ IRQ_PER_CPU_DEVID = (1 << 17),
+ IRQ_IS_POLLED = (1 << 18),
++ IRQ_NO_SOFTIRQ_CALL = (1 << 19),
+ };
+
+ #define IRQF_MODIFY_MASK \
+ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
+ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
+- IRQ_IS_POLLED)
++ IRQ_IS_POLLED | IRQ_NO_SOFTIRQ_CALL)
+
+ #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
+
+diff -Nur linux-3.18.8.orig/include/linux/irq_work.h linux-3.18.8/include/linux/irq_work.h
+--- linux-3.18.8.orig/include/linux/irq_work.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/irq_work.h 2015-03-03 08:05:18.000000000 +0100
+@@ -16,6 +16,7 @@
+ #define IRQ_WORK_BUSY 2UL
+ #define IRQ_WORK_FLAGS 3UL
+ #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
++#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */
+
+ struct irq_work {
+ unsigned long flags;
+diff -Nur linux-3.18.8.orig/include/linux/jbd_common.h linux-3.18.8/include/linux/jbd_common.h
+--- linux-3.18.8.orig/include/linux/jbd_common.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/jbd_common.h 2015-03-03 08:05:18.000000000 +0100
+@@ -15,32 +15,56 @@
+
+ static inline void jbd_lock_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_lock(BH_State, &bh->b_state);
++#else
++ spin_lock(&bh->b_state_lock);
++#endif
+ }
+
+ static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ return bit_spin_trylock(BH_State, &bh->b_state);
++#else
++ return spin_trylock(&bh->b_state_lock);
++#endif
+ }
+
+ static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ return bit_spin_is_locked(BH_State, &bh->b_state);
++#else
++ return spin_is_locked(&bh->b_state_lock);
++#endif
+ }
+
+ static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_unlock(BH_State, &bh->b_state);
++#else
++ spin_unlock(&bh->b_state_lock);
++#endif
+ }
+
+ static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_lock(BH_JournalHead, &bh->b_state);
++#else
++ spin_lock(&bh->b_journal_head_lock);
++#endif
+ }
+
+ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_unlock(BH_JournalHead, &bh->b_state);
++#else
++ spin_unlock(&bh->b_journal_head_lock);
++#endif
+ }
+
+ #endif
+diff -Nur linux-3.18.8.orig/include/linux/jump_label.h linux-3.18.8/include/linux/jump_label.h
+--- linux-3.18.8.orig/include/linux/jump_label.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/jump_label.h 2015-03-03 08:05:18.000000000 +0100
+@@ -55,7 +55,8 @@
+ "%s used before call to jump_label_init", \
+ __func__)
+
+-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
++#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \
++ !defined(CONFIG_PREEMPT_BASE)
+
+ struct static_key {
+ atomic_t enabled;
+diff -Nur linux-3.18.8.orig/include/linux/kdb.h linux-3.18.8/include/linux/kdb.h
+--- linux-3.18.8.orig/include/linux/kdb.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/kdb.h 2015-03-03 08:05:18.000000000 +0100
+@@ -116,7 +116,7 @@
+ extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
+ extern __printf(1, 2) int kdb_printf(const char *, ...);
+ typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
+-
++#define in_kdb_printk() (kdb_trap_printk)
+ extern void kdb_init(int level);
+
+ /* Access to kdb specific polling devices */
+@@ -151,6 +151,7 @@
+ extern int kdb_unregister(char *);
+ #else /* ! CONFIG_KGDB_KDB */
+ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
++#define in_kdb_printk() (0)
+ static inline void kdb_init(int level) {}
+ static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
+ char *help, short minlen) { return 0; }
+diff -Nur linux-3.18.8.orig/include/linux/kernel.h linux-3.18.8/include/linux/kernel.h
+--- linux-3.18.8.orig/include/linux/kernel.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/kernel.h 2015-03-03 08:05:18.000000000 +0100
+@@ -451,6 +451,7 @@
+ SYSTEM_HALT,
+ SYSTEM_POWER_OFF,
+ SYSTEM_RESTART,
++ SYSTEM_SUSPEND,
+ } system_state;
+
+ #define TAINT_PROPRIETARY_MODULE 0
+diff -Nur linux-3.18.8.orig/include/linux/lglock.h linux-3.18.8/include/linux/lglock.h
+--- linux-3.18.8.orig/include/linux/lglock.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/lglock.h 2015-03-03 08:05:18.000000000 +0100
+@@ -34,22 +34,39 @@
+ #endif
+
+ struct lglock {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ arch_spinlock_t __percpu *lock;
++#else
++ struct rt_mutex __percpu *lock;
++#endif
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lock_key;
+ struct lockdep_map lock_dep_map;
+ #endif
+ };
+
+-#define DEFINE_LGLOCK(name) \
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define DEFINE_LGLOCK(name) \
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
+ struct lglock name = { .lock = &name ## _lock }
+
+-#define DEFINE_STATIC_LGLOCK(name) \
++# define DEFINE_STATIC_LGLOCK(name) \
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
+ static struct lglock name = { .lock = &name ## _lock }
++#else
++
++# define DEFINE_LGLOCK(name) \
++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
++ = __RT_MUTEX_INITIALIZER( name ## _lock); \
++ struct lglock name = { .lock = &name ## _lock }
++
++# define DEFINE_STATIC_LGLOCK(name) \
++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
++ = __RT_MUTEX_INITIALIZER( name ## _lock); \
++ static struct lglock name = { .lock = &name ## _lock }
++#endif
+
+ void lg_lock_init(struct lglock *lg, char *name);
+ void lg_local_lock(struct lglock *lg);
+@@ -59,6 +76,12 @@
+ void lg_global_lock(struct lglock *lg);
+ void lg_global_unlock(struct lglock *lg);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++#define lg_global_trylock_relax(name) lg_global_lock(name)
++#else
++void lg_global_trylock_relax(struct lglock *lg);
++#endif
++
+ #else
+ /* When !CONFIG_SMP, map lglock to spinlock */
+ #define lglock spinlock
+diff -Nur linux-3.18.8.orig/include/linux/list_bl.h linux-3.18.8/include/linux/list_bl.h
+--- linux-3.18.8.orig/include/linux/list_bl.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/list_bl.h 2015-03-03 08:05:18.000000000 +0100
+@@ -2,6 +2,7 @@
+ #define _LINUX_LIST_BL_H
+
+ #include <linux/list.h>
++#include <linux/spinlock.h>
+ #include <linux/bit_spinlock.h>
+
+ /*
+@@ -32,13 +33,22 @@
+
+ struct hlist_bl_head {
+ struct hlist_bl_node *first;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ raw_spinlock_t lock;
++#endif
+ };
+
+ struct hlist_bl_node {
+ struct hlist_bl_node *next, **pprev;
+ };
+-#define INIT_HLIST_BL_HEAD(ptr) \
+- ((ptr)->first = NULL)
++
++static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h)
++{
++ h->first = NULL;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ raw_spin_lock_init(&h->lock);
++#endif
++}
+
+ static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
+ {
+@@ -117,12 +127,26 @@
+
+ static inline void hlist_bl_lock(struct hlist_bl_head *b)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_lock(0, (unsigned long *)b);
++#else
++ raw_spin_lock(&b->lock);
++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
++ __set_bit(0, (unsigned long *)b);
++#endif
++#endif
+ }
+
+ static inline void hlist_bl_unlock(struct hlist_bl_head *b)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ __bit_spin_unlock(0, (unsigned long *)b);
++#else
++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
++ __clear_bit(0, (unsigned long *)b);
++#endif
++ raw_spin_unlock(&b->lock);
++#endif
+ }
+
+ static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
+diff -Nur linux-3.18.8.orig/include/linux/locallock.h linux-3.18.8/include/linux/locallock.h
+--- linux-3.18.8.orig/include/linux/locallock.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/linux/locallock.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,270 @@
++#ifndef _LINUX_LOCALLOCK_H
++#define _LINUX_LOCALLOCK_H
++
++#include <linux/percpu.h>
++#include <linux/spinlock.h>
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++
++#ifdef CONFIG_DEBUG_SPINLOCK
++# define LL_WARN(cond) WARN_ON(cond)
++#else
++# define LL_WARN(cond) do { } while (0)
++#endif
++
++/*
++ * per cpu lock based substitute for local_irq_*()
++ */
++struct local_irq_lock {
++ spinlock_t lock;
++ struct task_struct *owner;
++ int nestcnt;
++ unsigned long flags;
++};
++
++#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
++ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
++ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
++
++#define DECLARE_LOCAL_IRQ_LOCK(lvar) \
++ DECLARE_PER_CPU(struct local_irq_lock, lvar)
++
++#define local_irq_lock_init(lvar) \
++ do { \
++ int __cpu; \
++ for_each_possible_cpu(__cpu) \
++ spin_lock_init(&per_cpu(lvar, __cpu).lock); \
++ } while (0)
++
++/*
++ * spin_lock|trylock|unlock_local flavour that does not migrate disable
++ * used for __local_lock|trylock|unlock where get_local_var/put_local_var
++ * already takes care of the migrate_disable/enable
++ * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define spin_lock_local(lock) rt_spin_lock(lock)
++# define spin_trylock_local(lock) rt_spin_trylock(lock)
++# define spin_unlock_local(lock) rt_spin_unlock(lock)
++#else
++# define spin_lock_local(lock) spin_lock(lock)
++# define spin_trylock_local(lock) spin_trylock(lock)
++# define spin_unlock_local(lock) spin_unlock(lock)
++#endif
++
++static inline void __local_lock(struct local_irq_lock *lv)
++{
++ if (lv->owner != current) {
++ spin_lock_local(&lv->lock);
++ LL_WARN(lv->owner);
++ LL_WARN(lv->nestcnt);
++ lv->owner = current;
++ }
++ lv->nestcnt++;
++}
++
++#define local_lock(lvar) \
++ do { __local_lock(&get_local_var(lvar)); } while (0)
++
++static inline int __local_trylock(struct local_irq_lock *lv)
++{
++ if (lv->owner != current && spin_trylock_local(&lv->lock)) {
++ LL_WARN(lv->owner);
++ LL_WARN(lv->nestcnt);
++ lv->owner = current;
++ lv->nestcnt = 1;
++ return 1;
++ }
++ return 0;
++}
++
++#define local_trylock(lvar) \
++ ({ \
++ int __locked; \
++ __locked = __local_trylock(&get_local_var(lvar)); \
++ if (!__locked) \
++ put_local_var(lvar); \
++ __locked; \
++ })
++
++static inline void __local_unlock(struct local_irq_lock *lv)
++{
++ LL_WARN(lv->nestcnt == 0);
++ LL_WARN(lv->owner != current);
++ if (--lv->nestcnt)
++ return;
++
++ lv->owner = NULL;
++ spin_unlock_local(&lv->lock);
++}
++
++#define local_unlock(lvar) \
++ do { \
++ __local_unlock(&__get_cpu_var(lvar)); \
++ put_local_var(lvar); \
++ } while (0)
++
++static inline void __local_lock_irq(struct local_irq_lock *lv)
++{
++ spin_lock_irqsave(&lv->lock, lv->flags);
++ LL_WARN(lv->owner);
++ LL_WARN(lv->nestcnt);
++ lv->owner = current;
++ lv->nestcnt = 1;
++}
++
++#define local_lock_irq(lvar) \
++ do { __local_lock_irq(&get_local_var(lvar)); } while (0)
++
++#define local_lock_irq_on(lvar, cpu) \
++ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
++
++static inline void __local_unlock_irq(struct local_irq_lock *lv)
++{
++ LL_WARN(!lv->nestcnt);
++ LL_WARN(lv->owner != current);
++ lv->owner = NULL;
++ lv->nestcnt = 0;
++ spin_unlock_irq(&lv->lock);
++}
++
++#define local_unlock_irq(lvar) \
++ do { \
++ __local_unlock_irq(&__get_cpu_var(lvar)); \
++ put_local_var(lvar); \
++ } while (0)
++
++#define local_unlock_irq_on(lvar, cpu) \
++ do { \
++ __local_unlock_irq(&per_cpu(lvar, cpu)); \
++ } while (0)
++
++static inline int __local_lock_irqsave(struct local_irq_lock *lv)
++{
++ if (lv->owner != current) {
++ __local_lock_irq(lv);
++ return 0;
++ } else {
++ lv->nestcnt++;
++ return 1;
++ }
++}
++
++#define local_lock_irqsave(lvar, _flags) \
++ do { \
++ if (__local_lock_irqsave(&get_local_var(lvar))) \
++ put_local_var(lvar); \
++ _flags = __get_cpu_var(lvar).flags; \
++ } while (0)
++
++#define local_lock_irqsave_on(lvar, _flags, cpu) \
++ do { \
++ __local_lock_irqsave(&per_cpu(lvar, cpu)); \
++ _flags = per_cpu(lvar, cpu).flags; \
++ } while (0)
++
++static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
++ unsigned long flags)
++{
++ LL_WARN(!lv->nestcnt);
++ LL_WARN(lv->owner != current);
++ if (--lv->nestcnt)
++ return 0;
++
++ lv->owner = NULL;
++ spin_unlock_irqrestore(&lv->lock, lv->flags);
++ return 1;
++}
++
++#define local_unlock_irqrestore(lvar, flags) \
++ do { \
++ if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \
++ put_local_var(lvar); \
++ } while (0)
++
++#define local_unlock_irqrestore_on(lvar, flags, cpu) \
++ do { \
++ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
++ } while (0)
++
++#define local_spin_trylock_irq(lvar, lock) \
++ ({ \
++ int __locked; \
++ local_lock_irq(lvar); \
++ __locked = spin_trylock(lock); \
++ if (!__locked) \
++ local_unlock_irq(lvar); \
++ __locked; \
++ })
++
++#define local_spin_lock_irq(lvar, lock) \
++ do { \
++ local_lock_irq(lvar); \
++ spin_lock(lock); \
++ } while (0)
++
++#define local_spin_unlock_irq(lvar, lock) \
++ do { \
++ spin_unlock(lock); \
++ local_unlock_irq(lvar); \
++ } while (0)
++
++#define local_spin_lock_irqsave(lvar, lock, flags) \
++ do { \
++ local_lock_irqsave(lvar, flags); \
++ spin_lock(lock); \
++ } while (0)
++
++#define local_spin_unlock_irqrestore(lvar, lock, flags) \
++ do { \
++ spin_unlock(lock); \
++ local_unlock_irqrestore(lvar, flags); \
++ } while (0)
++
++#define get_locked_var(lvar, var) \
++ (*({ \
++ local_lock(lvar); \
++ &__get_cpu_var(var); \
++ }))
++
++#define put_locked_var(lvar, var) local_unlock(lvar);
++
++#define local_lock_cpu(lvar) \
++ ({ \
++ local_lock(lvar); \
++ smp_processor_id(); \
++ })
++
++#define local_unlock_cpu(lvar) local_unlock(lvar)
++
++#else /* PREEMPT_RT_BASE */
++
++#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
++#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar
++
++static inline void local_irq_lock_init(int lvar) { }
++
++#define local_lock(lvar) preempt_disable()
++#define local_unlock(lvar) preempt_enable()
++#define local_lock_irq(lvar) local_irq_disable()
++#define local_unlock_irq(lvar) local_irq_enable()
++#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
++#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
++
++#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
++#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
++#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
++#define local_spin_lock_irqsave(lvar, lock, flags) \
++ spin_lock_irqsave(lock, flags)
++#define local_spin_unlock_irqrestore(lvar, lock, flags) \
++ spin_unlock_irqrestore(lock, flags)
++
++#define get_locked_var(lvar, var) get_cpu_var(var)
++#define put_locked_var(lvar, var) put_cpu_var(var)
++
++#define local_lock_cpu(lvar) get_cpu()
++#define local_unlock_cpu(lvar) put_cpu()
++
++#endif
++
++#endif
+diff -Nur linux-3.18.8.orig/include/linux/mm_types.h linux-3.18.8/include/linux/mm_types.h
+--- linux-3.18.8.orig/include/linux/mm_types.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/mm_types.h 2015-03-03 08:05:18.000000000 +0100
+@@ -11,6 +11,7 @@
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
+ #include <linux/page-debug-flags.h>
++#include <linux/rcupdate.h>
+ #include <linux/uprobes.h>
+ #include <linux/page-flags-layout.h>
+ #include <asm/page.h>
+@@ -454,6 +455,9 @@
+ bool tlb_flush_pending;
+ #endif
+ struct uprobes_state uprobes_state;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct rcu_head delayed_drop;
++#endif
+ };
+
+ static inline void mm_init_cpumask(struct mm_struct *mm)
+diff -Nur linux-3.18.8.orig/include/linux/mutex.h linux-3.18.8/include/linux/mutex.h
+--- linux-3.18.8.orig/include/linux/mutex.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/mutex.h 2015-03-03 08:05:18.000000000 +0100
+@@ -19,6 +19,17 @@
+ #include <asm/processor.h>
+ #include <linux/osq_lock.h>
+
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
++ , .dep_map = { .name = #lockname }
++#else
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
++#endif
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/mutex_rt.h>
++#else
++
+ /*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+@@ -100,13 +111,6 @@
+ static inline void mutex_destroy(struct mutex *lock) {}
+ #endif
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+- , .dep_map = { .name = #lockname }
+-#else
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+-#endif
+-
+ #define __MUTEX_INITIALIZER(lockname) \
+ { .count = ATOMIC_INIT(1) \
+ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+@@ -174,6 +178,8 @@
+ extern int mutex_trylock(struct mutex *lock);
+ extern void mutex_unlock(struct mutex *lock);
+
++#endif /* !PREEMPT_RT_FULL */
++
+ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+
+ #endif /* __LINUX_MUTEX_H */
+diff -Nur linux-3.18.8.orig/include/linux/mutex_rt.h linux-3.18.8/include/linux/mutex_rt.h
+--- linux-3.18.8.orig/include/linux/mutex_rt.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/linux/mutex_rt.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,84 @@
++#ifndef __LINUX_MUTEX_RT_H
++#define __LINUX_MUTEX_RT_H
++
++#ifndef __LINUX_MUTEX_H
++#error "Please include mutex.h"
++#endif
++
++#include <linux/rtmutex.h>
++
++/* FIXME: Just for __lockfunc */
++#include <linux/spinlock.h>
++
++struct mutex {
++ struct rt_mutex lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#define __MUTEX_INITIALIZER(mutexname) \
++ { \
++ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
++ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
++ }
++
++#define DEFINE_MUTEX(mutexname) \
++ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
++
++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
++extern void __lockfunc _mutex_lock(struct mutex *lock);
++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
++extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_trylock(struct mutex *lock);
++extern void __lockfunc _mutex_unlock(struct mutex *lock);
++
++#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
++#define mutex_lock(l) _mutex_lock(l)
++#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
++#define mutex_lock_killable(l) _mutex_lock_killable(l)
++#define mutex_trylock(l) _mutex_trylock(l)
++#define mutex_unlock(l) _mutex_unlock(l)
++#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
++# define mutex_lock_interruptible_nested(l, s) \
++ _mutex_lock_interruptible_nested(l, s)
++# define mutex_lock_killable_nested(l, s) \
++ _mutex_lock_killable_nested(l, s)
++
++# define mutex_lock_nest_lock(lock, nest_lock) \
++do { \
++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
++ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
++} while (0)
++
++#else
++# define mutex_lock_nested(l, s) _mutex_lock(l)
++# define mutex_lock_interruptible_nested(l, s) \
++ _mutex_lock_interruptible(l)
++# define mutex_lock_killable_nested(l, s) \
++ _mutex_lock_killable(l)
++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
++#endif
++
++# define mutex_init(mutex) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(mutex)->lock); \
++ __mutex_do_init((mutex), #mutex, &__key); \
++} while (0)
++
++# define __mutex_init(mutex, name, key) \
++do { \
++ rt_mutex_init(&(mutex)->lock); \
++ __mutex_do_init((mutex), name, key); \
++} while (0)
++
++#endif
+diff -Nur linux-3.18.8.orig/include/linux/netdevice.h linux-3.18.8/include/linux/netdevice.h
+--- linux-3.18.8.orig/include/linux/netdevice.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/netdevice.h 2015-03-03 08:05:18.000000000 +0100
+@@ -2345,6 +2345,7 @@
+ unsigned int dropped;
+ struct sk_buff_head input_pkt_queue;
+ struct napi_struct backlog;
++ struct sk_buff_head tofree_queue;
+
+ #ifdef CONFIG_NET_FLOW_LIMIT
+ struct sd_flow_limit __rcu *flow_limit;
+diff -Nur linux-3.18.8.orig/include/linux/netfilter/x_tables.h linux-3.18.8/include/linux/netfilter/x_tables.h
+--- linux-3.18.8.orig/include/linux/netfilter/x_tables.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/netfilter/x_tables.h 2015-03-03 08:05:18.000000000 +0100
+@@ -3,6 +3,7 @@
+
+
+ #include <linux/netdevice.h>
++#include <linux/locallock.h>
+ #include <uapi/linux/netfilter/x_tables.h>
+
+ /**
+@@ -282,6 +283,8 @@
+ */
+ DECLARE_PER_CPU(seqcount_t, xt_recseq);
+
++DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
++
+ /**
+ * xt_write_recseq_begin - start of a write section
+ *
+@@ -296,6 +299,9 @@
+ {
+ unsigned int addend;
+
++ /* RT protection */
++ local_lock(xt_write_lock);
++
+ /*
+ * Low order bit of sequence is set if we already
+ * called xt_write_recseq_begin().
+@@ -326,6 +332,7 @@
+ /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
+ smp_wmb();
+ __this_cpu_add(xt_recseq.sequence, addend);
++ local_unlock(xt_write_lock);
+ }
+
+ /*
+diff -Nur linux-3.18.8.orig/include/linux/notifier.h linux-3.18.8/include/linux/notifier.h
+--- linux-3.18.8.orig/include/linux/notifier.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/notifier.h 2015-03-03 08:05:18.000000000 +0100
+@@ -6,7 +6,7 @@
+ *
+ * Alan Cox <Alan.Cox@linux.org>
+ */
+-
++
+ #ifndef _LINUX_NOTIFIER_H
+ #define _LINUX_NOTIFIER_H
+ #include <linux/errno.h>
+@@ -42,9 +42,7 @@
+ * in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
+ * As compensation, srcu_notifier_chain_unregister() is rather expensive.
+ * SRCU notifier chains should be used when the chain will be called very
+- * often but notifier_blocks will seldom be removed. Also, SRCU notifier
+- * chains are slightly more difficult to use because they require special
+- * runtime initialization.
++ * often but notifier_blocks will seldom be removed.
+ */
+
+ typedef int (*notifier_fn_t)(struct notifier_block *nb,
+@@ -88,7 +86,7 @@
+ (name)->head = NULL; \
+ } while (0)
+
+-/* srcu_notifier_heads must be initialized and cleaned up dynamically */
++/* srcu_notifier_heads must be cleaned up dynamically */
+ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
+ #define srcu_cleanup_notifier_head(name) \
+ cleanup_srcu_struct(&(name)->srcu);
+@@ -101,7 +99,13 @@
+ .head = NULL }
+ #define RAW_NOTIFIER_INIT(name) { \
+ .head = NULL }
+-/* srcu_notifier_heads cannot be initialized statically */
++
++#define SRCU_NOTIFIER_INIT(name, pcpu) \
++ { \
++ .mutex = __MUTEX_INITIALIZER(name.mutex), \
++ .head = NULL, \
++ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
++ }
+
+ #define ATOMIC_NOTIFIER_HEAD(name) \
+ struct atomic_notifier_head name = \
+@@ -113,6 +117,18 @@
+ struct raw_notifier_head name = \
+ RAW_NOTIFIER_INIT(name)
+
++#define _SRCU_NOTIFIER_HEAD(name, mod) \
++ static DEFINE_PER_CPU(struct srcu_struct_array, \
++ name##_head_srcu_array); \
++ mod struct srcu_notifier_head name = \
++ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
++
++#define SRCU_NOTIFIER_HEAD(name) \
++ _SRCU_NOTIFIER_HEAD(name, )
++
++#define SRCU_NOTIFIER_HEAD_STATIC(name) \
++ _SRCU_NOTIFIER_HEAD(name, static)
++
+ #ifdef __KERNEL__
+
+ extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
+@@ -182,12 +198,12 @@
+
+ /*
+ * Declared notifiers so far. I can imagine quite a few more chains
+- * over time (eg laptop power reset chains, reboot chain (to clean
++ * over time (eg laptop power reset chains, reboot chain (to clean
+ * device units up), device [un]mount chain, module load/unload chain,
+- * low memory chain, screenblank chain (for plug in modular screenblankers)
++ * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
+ */
+-
++
+ /* CPU notfiers are defined in include/linux/cpu.h. */
+
+ /* netdevice notifiers are defined in include/linux/netdevice.h */
+diff -Nur linux-3.18.8.orig/include/linux/percpu.h linux-3.18.8/include/linux/percpu.h
+--- linux-3.18.8.orig/include/linux/percpu.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/percpu.h 2015-03-03 08:05:18.000000000 +0100
+@@ -23,6 +23,35 @@
+ PERCPU_MODULE_RESERVE)
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++
++#define get_local_var(var) (*({ \
++ migrate_disable(); \
++ &__get_cpu_var(var); }))
++
++#define put_local_var(var) do { \
++ (void)&(var); \
++ migrate_enable(); \
++} while (0)
++
++# define get_local_ptr(var) ({ \
++ migrate_disable(); \
++ this_cpu_ptr(var); })
++
++# define put_local_ptr(var) do { \
++ (void)(var); \
++ migrate_enable(); \
++} while (0)
++
++#else
++
++#define get_local_var(var) get_cpu_var(var)
++#define put_local_var(var) put_cpu_var(var)
++#define get_local_ptr(var) get_cpu_ptr(var)
++#define put_local_ptr(var) put_cpu_ptr(var)
++
++#endif
++
+ /* minimum unit size, also is the maximum supported allocation size */
+ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
+
+diff -Nur linux-3.18.8.orig/include/linux/pid.h linux-3.18.8/include/linux/pid.h
+--- linux-3.18.8.orig/include/linux/pid.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/pid.h 2015-03-03 08:05:18.000000000 +0100
+@@ -2,6 +2,7 @@
+ #define _LINUX_PID_H
+
+ #include <linux/rcupdate.h>
++#include <linux/atomic.h>
+
+ enum pid_type
+ {
+diff -Nur linux-3.18.8.orig/include/linux/preempt.h linux-3.18.8/include/linux/preempt.h
+--- linux-3.18.8.orig/include/linux/preempt.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/preempt.h 2015-03-03 08:05:18.000000000 +0100
+@@ -33,6 +33,20 @@
+ #define preempt_count_inc() preempt_count_add(1)
+ #define preempt_count_dec() preempt_count_sub(1)
+
++#ifdef CONFIG_PREEMPT_LAZY
++#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
++#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
++#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
++#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
++#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
++#else
++#define add_preempt_lazy_count(val) do { } while (0)
++#define sub_preempt_lazy_count(val) do { } while (0)
++#define inc_preempt_lazy_count() do { } while (0)
++#define dec_preempt_lazy_count() do { } while (0)
++#define preempt_lazy_count() (0)
++#endif
++
+ #ifdef CONFIG_PREEMPT_COUNT
+
+ #define preempt_disable() \
+@@ -41,13 +55,25 @@
+ barrier(); \
+ } while (0)
+
++#define preempt_lazy_disable() \
++do { \
++ inc_preempt_lazy_count(); \
++ barrier(); \
++} while (0)
++
+ #define sched_preempt_enable_no_resched() \
+ do { \
+ barrier(); \
+ preempt_count_dec(); \
+ } while (0)
+
+-#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
++# define preempt_check_resched_rt() preempt_check_resched()
++#else
++# define preempt_enable_no_resched() preempt_enable()
++# define preempt_check_resched_rt() barrier();
++#endif
+
+ #ifdef CONFIG_PREEMPT
+ #define preempt_enable() \
+@@ -63,6 +89,13 @@
+ __preempt_schedule(); \
+ } while (0)
+
++#define preempt_lazy_enable() \
++do { \
++ dec_preempt_lazy_count(); \
++ barrier(); \
++ preempt_check_resched(); \
++} while (0)
++
+ #else
+ #define preempt_enable() \
+ do { \
+@@ -121,6 +154,7 @@
+ #define preempt_disable_notrace() barrier()
+ #define preempt_enable_no_resched_notrace() barrier()
+ #define preempt_enable_notrace() barrier()
++#define preempt_check_resched_rt() barrier()
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+
+@@ -140,10 +174,31 @@
+ } while (0)
+ #define preempt_fold_need_resched() \
+ do { \
+- if (tif_need_resched()) \
++ if (tif_need_resched_now()) \
+ set_preempt_need_resched(); \
+ } while (0)
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define preempt_disable_rt() preempt_disable()
++# define preempt_enable_rt() preempt_enable()
++# define preempt_disable_nort() barrier()
++# define preempt_enable_nort() barrier()
++# ifdef CONFIG_SMP
++ extern void migrate_disable(void);
++ extern void migrate_enable(void);
++# else /* CONFIG_SMP */
++# define migrate_disable() barrier()
++# define migrate_enable() barrier()
++# endif /* CONFIG_SMP */
++#else
++# define preempt_disable_rt() barrier()
++# define preempt_enable_rt() barrier()
++# define preempt_disable_nort() preempt_disable()
++# define preempt_enable_nort() preempt_enable()
++# define migrate_disable() preempt_disable()
++# define migrate_enable() preempt_enable()
++#endif
++
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+
+ struct preempt_notifier;
+diff -Nur linux-3.18.8.orig/include/linux/preempt_mask.h linux-3.18.8/include/linux/preempt_mask.h
+--- linux-3.18.8.orig/include/linux/preempt_mask.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/preempt_mask.h 2015-03-03 08:05:18.000000000 +0100
+@@ -44,16 +44,26 @@
+ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+ #define NMI_OFFSET (1UL << NMI_SHIFT)
+
+-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
++#else
++# define SOFTIRQ_DISABLE_OFFSET (0)
++#endif
+
+ #define PREEMPT_ACTIVE_BITS 1
+ #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
+ #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
+
+ #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+ #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
++# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
++#else
++# define softirq_count() (0UL)
++extern int in_serving_softirq(void);
++#endif
+
+ /*
+ * Are we doing bottom half or hardware interrupt processing?
+@@ -64,7 +74,6 @@
+ #define in_irq() (hardirq_count())
+ #define in_softirq() (softirq_count())
+ #define in_interrupt() (irq_count())
+-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+
+ /*
+ * Are we in NMI context?
+diff -Nur linux-3.18.8.orig/include/linux/printk.h linux-3.18.8/include/linux/printk.h
+--- linux-3.18.8.orig/include/linux/printk.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/printk.h 2015-03-03 08:05:18.000000000 +0100
+@@ -119,9 +119,11 @@
+ extern asmlinkage __printf(1, 2)
+ void early_printk(const char *fmt, ...);
+ void early_vprintk(const char *fmt, va_list ap);
++extern void printk_kill(void);
+ #else
+ static inline __printf(1, 2) __cold
+ void early_printk(const char *s, ...) { }
++static inline void printk_kill(void) { }
+ #endif
+
+ #ifdef CONFIG_PRINTK
+@@ -155,7 +157,6 @@
+ #define printk_ratelimit() __printk_ratelimit(__func__)
+ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+ unsigned int interval_msec);
+-
+ extern int printk_delay_msec;
+ extern int dmesg_restrict;
+ extern int kptr_restrict;
+diff -Nur linux-3.18.8.orig/include/linux/radix-tree.h linux-3.18.8/include/linux/radix-tree.h
+--- linux-3.18.8.orig/include/linux/radix-tree.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/radix-tree.h 2015-03-03 08:05:18.000000000 +0100
+@@ -277,8 +277,13 @@
+ unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
+ void ***results, unsigned long *indices,
+ unsigned long first_index, unsigned int max_items);
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int radix_tree_preload(gfp_t gfp_mask);
+ int radix_tree_maybe_preload(gfp_t gfp_mask);
++#else
++static inline int radix_tree_preload(gfp_t gm) { return 0; }
++static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; }
++#endif
+ void radix_tree_init(void);
+ void *radix_tree_tag_set(struct radix_tree_root *root,
+ unsigned long index, unsigned int tag);
+@@ -303,7 +308,7 @@
+
+ static inline void radix_tree_preload_end(void)
+ {
+- preempt_enable();
++ preempt_enable_nort();
+ }
+
+ /**
+diff -Nur linux-3.18.8.orig/include/linux/random.h linux-3.18.8/include/linux/random.h
+--- linux-3.18.8.orig/include/linux/random.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/random.h 2015-03-03 08:05:18.000000000 +0100
+@@ -11,7 +11,7 @@
+ extern void add_device_randomness(const void *, unsigned int);
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value);
+-extern void add_interrupt_randomness(int irq, int irq_flags);
++extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
+
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern void get_random_bytes_arch(void *buf, int nbytes);
+diff -Nur linux-3.18.8.orig/include/linux/rcupdate.h linux-3.18.8/include/linux/rcupdate.h
+--- linux-3.18.8.orig/include/linux/rcupdate.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/rcupdate.h 2015-03-03 08:05:18.000000000 +0100
+@@ -147,6 +147,9 @@
+
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++#define call_rcu_bh call_rcu
++#else
+ /**
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+@@ -170,6 +173,7 @@
+ */
+ void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
++#endif
+
+ /**
+ * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
+@@ -231,6 +235,11 @@
+ * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
+ */
+ #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
++#ifndef CONFIG_PREEMPT_RT_FULL
++#define sched_rcu_preempt_depth() rcu_preempt_depth()
++#else
++static inline int sched_rcu_preempt_depth(void) { return 0; }
++#endif
+
+ #else /* #ifdef CONFIG_PREEMPT_RCU */
+
+@@ -254,6 +263,8 @@
+ return 0;
+ }
+
++#define sched_rcu_preempt_depth() rcu_preempt_depth()
++
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+ /* Internal to kernel */
+@@ -430,7 +441,14 @@
+ int debug_lockdep_rcu_enabled(void);
+
+ int rcu_read_lock_held(void);
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline int rcu_read_lock_bh_held(void)
++{
++ return rcu_read_lock_held();
++}
++#else
+ int rcu_read_lock_bh_held(void);
++#endif
+
+ /**
+ * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
+@@ -955,10 +973,14 @@
+ static inline void rcu_read_lock_bh(void)
+ {
+ local_bh_disable();
++#ifdef CONFIG_PREEMPT_RT_FULL
++ rcu_read_lock();
++#else
+ __acquire(RCU_BH);
+ rcu_lock_acquire(&rcu_bh_lock_map);
+ rcu_lockdep_assert(rcu_is_watching(),
+ "rcu_read_lock_bh() used illegally while idle");
++#endif
+ }
+
+ /*
+@@ -968,10 +990,14 @@
+ */
+ static inline void rcu_read_unlock_bh(void)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ rcu_read_unlock();
++#else
+ rcu_lockdep_assert(rcu_is_watching(),
+ "rcu_read_unlock_bh() used illegally while idle");
+ rcu_lock_release(&rcu_bh_lock_map);
+ __release(RCU_BH);
++#endif
+ local_bh_enable();
+ }
+
+diff -Nur linux-3.18.8.orig/include/linux/rcutree.h linux-3.18.8/include/linux/rcutree.h
+--- linux-3.18.8.orig/include/linux/rcutree.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/rcutree.h 2015-03-03 08:05:18.000000000 +0100
+@@ -46,7 +46,11 @@
+ rcu_note_context_switch(cpu);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define synchronize_rcu_bh synchronize_rcu
++#else
+ void synchronize_rcu_bh(void);
++#endif
+ void synchronize_sched_expedited(void);
+ void synchronize_rcu_expedited(void);
+
+@@ -74,7 +78,11 @@
+ }
+
+ void rcu_barrier(void);
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define rcu_barrier_bh rcu_barrier
++#else
+ void rcu_barrier_bh(void);
++#endif
+ void rcu_barrier_sched(void);
+ unsigned long get_state_synchronize_rcu(void);
+ void cond_synchronize_rcu(unsigned long oldstate);
+@@ -82,12 +90,10 @@
+ extern unsigned long rcutorture_testseq;
+ extern unsigned long rcutorture_vernum;
+ long rcu_batches_completed(void);
+-long rcu_batches_completed_bh(void);
+ long rcu_batches_completed_sched(void);
+ void show_rcu_gp_kthreads(void);
+
+ void rcu_force_quiescent_state(void);
+-void rcu_bh_force_quiescent_state(void);
+ void rcu_sched_force_quiescent_state(void);
+
+ void exit_rcu(void);
+@@ -97,4 +103,12 @@
+
+ bool rcu_is_watching(void);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++void rcu_bh_force_quiescent_state(void);
++long rcu_batches_completed_bh(void);
++#else
++# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
++# define rcu_batches_completed_bh rcu_batches_completed
++#endif
++
+ #endif /* __LINUX_RCUTREE_H */
+diff -Nur linux-3.18.8.orig/include/linux/rtmutex.h linux-3.18.8/include/linux/rtmutex.h
+--- linux-3.18.8.orig/include/linux/rtmutex.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/rtmutex.h 2015-03-03 08:05:18.000000000 +0100
+@@ -14,10 +14,14 @@
+
+ #include <linux/linkage.h>
+ #include <linux/rbtree.h>
+-#include <linux/spinlock_types.h>
++#include <linux/spinlock_types_raw.h>
+
+ extern int max_lock_depth; /* for sysctl */
+
++#ifdef CONFIG_DEBUG_MUTEXES
++#include <linux/debug_locks.h>
++#endif
++
+ /**
+ * The rt_mutex structure
+ *
+@@ -31,8 +35,8 @@
+ struct rb_root waiters;
+ struct rb_node *waiters_leftmost;
+ struct task_struct *owner;
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+ int save_state;
++#ifdef CONFIG_DEBUG_RT_MUTEXES
+ const char *name, *file;
+ int line;
+ void *magic;
+@@ -55,22 +59,33 @@
+ # define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
+ #endif
+
++# define rt_mutex_init(mutex) \
++ do { \
++ raw_spin_lock_init(&(mutex)->wait_lock); \
++ __rt_mutex_init(mutex, #mutex); \
++ } while (0)
++
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+ , .name = #mutexname, .file = __FILE__, .line = __LINE__
+-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
+ extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+ #else
+ # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
+-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
+ # define rt_mutex_debug_task_free(t) do { } while (0)
+ #endif
+
+-#define __RT_MUTEX_INITIALIZER(mutexname) \
+- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .waiters = RB_ROOT \
+ , .owner = NULL \
+- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
++ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
++
++#define __RT_MUTEX_INITIALIZER(mutexname) \
++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
++
++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ , .save_state = 1 }
+
+ #define DEFINE_RT_MUTEX(mutexname) \
+ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
+@@ -91,6 +106,7 @@
+
+ extern void rt_mutex_lock(struct rt_mutex *lock);
+ extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
++extern int rt_mutex_lock_killable(struct rt_mutex *lock);
+ extern int rt_mutex_timed_lock(struct rt_mutex *lock,
+ struct hrtimer_sleeper *timeout);
+
+diff -Nur linux-3.18.8.orig/include/linux/rwlock_rt.h linux-3.18.8/include/linux/rwlock_rt.h
+--- linux-3.18.8.orig/include/linux/rwlock_rt.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/linux/rwlock_rt.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,99 @@
++#ifndef __LINUX_RWLOCK_RT_H
++#define __LINUX_RWLOCK_RT_H
++
++#ifndef __LINUX_SPINLOCK_H
++#error Do not include directly. Use spinlock.h
++#endif
++
++#define rwlock_init(rwl) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(rwl)->lock); \
++ __rt_rwlock_init(rwl, #rwl, &__key); \
++} while (0)
++
++extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
++extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
++extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
++extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
++extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
++
++#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
++#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
++
++#define write_trylock_irqsave(lock, flags) \
++ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
++
++#define read_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = rt_read_lock_irqsave(lock); \
++ } while (0)
++
++#define write_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = rt_write_lock_irqsave(lock); \
++ } while (0)
++
++#define read_lock(lock) rt_read_lock(lock)
++
++#define read_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ rt_read_lock(lock); \
++ } while (0)
++
++#define read_lock_irq(lock) read_lock(lock)
++
++#define write_lock(lock) rt_write_lock(lock)
++
++#define write_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ rt_write_lock(lock); \
++ } while (0)
++
++#define write_lock_irq(lock) write_lock(lock)
++
++#define read_unlock(lock) rt_read_unlock(lock)
++
++#define read_unlock_bh(lock) \
++ do { \
++ rt_read_unlock(lock); \
++ local_bh_enable(); \
++ } while (0)
++
++#define read_unlock_irq(lock) read_unlock(lock)
++
++#define write_unlock(lock) rt_write_unlock(lock)
++
++#define write_unlock_bh(lock) \
++ do { \
++ rt_write_unlock(lock); \
++ local_bh_enable(); \
++ } while (0)
++
++#define write_unlock_irq(lock) write_unlock(lock)
++
++#define read_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ rt_read_unlock(lock); \
++ } while (0)
++
++#define write_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ rt_write_unlock(lock); \
++ } while (0)
++
++#endif
+diff -Nur linux-3.18.8.orig/include/linux/rwlock_types.h linux-3.18.8/include/linux/rwlock_types.h
+--- linux-3.18.8.orig/include/linux/rwlock_types.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/rwlock_types.h 2015-03-03 08:05:18.000000000 +0100
+@@ -1,6 +1,10 @@
+ #ifndef __LINUX_RWLOCK_TYPES_H
+ #define __LINUX_RWLOCK_TYPES_H
+
++#if !defined(__LINUX_SPINLOCK_TYPES_H)
++# error "Do not include directly, include spinlock_types.h"
++#endif
++
+ /*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+@@ -43,6 +47,7 @@
+ RW_DEP_MAP_INIT(lockname) }
+ #endif
+
+-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
++#define DEFINE_RWLOCK(name) \
++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+
+ #endif /* __LINUX_RWLOCK_TYPES_H */
+diff -Nur linux-3.18.8.orig/include/linux/rwlock_types_rt.h linux-3.18.8/include/linux/rwlock_types_rt.h
+--- linux-3.18.8.orig/include/linux/rwlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/linux/rwlock_types_rt.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,33 @@
++#ifndef __LINUX_RWLOCK_TYPES_RT_H
++#define __LINUX_RWLOCK_TYPES_RT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * rwlocks - rtmutex which allows single reader recursion
++ */
++typedef struct {
++ struct rt_mutex lock;
++ int read_depth;
++ unsigned int break_lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} rwlock_t;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
++#else
++# define RW_DEP_MAP_INIT(lockname)
++#endif
++
++#define __RW_LOCK_UNLOCKED(name) \
++ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
++ RW_DEP_MAP_INIT(name) }
++
++#define DEFINE_RWLOCK(name) \
++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
++
++#endif
+diff -Nur linux-3.18.8.orig/include/linux/rwsem.h linux-3.18.8/include/linux/rwsem.h
+--- linux-3.18.8.orig/include/linux/rwsem.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/rwsem.h 2015-03-03 08:05:18.000000000 +0100
+@@ -18,6 +18,10 @@
+ #include <linux/osq_lock.h>
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++#include <linux/rwsem_rt.h>
++#else /* PREEMPT_RT_FULL */
++
+ struct rw_semaphore;
+
+ #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+@@ -177,4 +181,6 @@
+ # define up_read_non_owner(sem) up_read(sem)
+ #endif
+
++#endif /* !PREEMPT_RT_FULL */
++
+ #endif /* _LINUX_RWSEM_H */
+diff -Nur linux-3.18.8.orig/include/linux/rwsem_rt.h linux-3.18.8/include/linux/rwsem_rt.h
+--- linux-3.18.8.orig/include/linux/rwsem_rt.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/linux/rwsem_rt.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,133 @@
++#ifndef _LINUX_RWSEM_RT_H
++#define _LINUX_RWSEM_RT_H
++
++#ifndef _LINUX_RWSEM_H
++#error "Include rwsem.h"
++#endif
++
++/*
++ * RW-semaphores are a spinlock plus a reader-depth count.
++ *
++ * Note that the semantics are different from the usual
++ * Linux rw-sems, in PREEMPT_RT mode we do not allow
++ * multiple readers to hold the lock at once, we only allow
++ * a read-lock owner to read-lock recursively. This is
++ * better for latency, makes the implementation inherently
++ * fair and makes it simpler as well.
++ */
++
++#include <linux/rtmutex.h>
++
++struct rw_semaphore {
++ struct rt_mutex lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#define __RWSEM_INITIALIZER(name) \
++ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
++ RW_DEP_MAP_INIT(name) }
++
++#define DECLARE_RWSEM(lockname) \
++ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
++
++extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
++ struct lock_class_key *key);
++
++#define __rt_init_rwsem(sem, name, key) \
++ do { \
++ rt_mutex_init(&(sem)->lock); \
++ __rt_rwsem_init((sem), (name), (key));\
++ } while (0)
++
++#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
++
++# define rt_init_rwsem(sem) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ __rt_init_rwsem((sem), #sem, &__key); \
++} while (0)
++
++extern void rt_down_write(struct rw_semaphore *rwsem);
++extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
++extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
++extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
++ struct lockdep_map *nest);
++extern void rt_down_read(struct rw_semaphore *rwsem);
++extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
++extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
++extern void rt_up_read(struct rw_semaphore *rwsem);
++extern void rt_up_write(struct rw_semaphore *rwsem);
++extern void rt_downgrade_write(struct rw_semaphore *rwsem);
++
++#define init_rwsem(sem) rt_init_rwsem(sem)
++#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
++
++static inline int rwsem_is_contended(struct rw_semaphore *sem)
++{
++ /* rt_mutex_has_waiters() */
++ return !RB_EMPTY_ROOT(&sem->lock.waiters);
++}
++
++static inline void down_read(struct rw_semaphore *sem)
++{
++ rt_down_read(sem);
++}
++
++static inline int down_read_trylock(struct rw_semaphore *sem)
++{
++ return rt_down_read_trylock(sem);
++}
++
++static inline void down_write(struct rw_semaphore *sem)
++{
++ rt_down_write(sem);
++}
++
++static inline int down_write_trylock(struct rw_semaphore *sem)
++{
++ return rt_down_write_trylock(sem);
++}
++
++static inline void up_read(struct rw_semaphore *sem)
++{
++ rt_up_read(sem);
++}
++
++static inline void up_write(struct rw_semaphore *sem)
++{
++ rt_up_write(sem);
++}
++
++static inline void downgrade_write(struct rw_semaphore *sem)
++{
++ rt_downgrade_write(sem);
++}
++
++static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
++{
++ return rt_down_read_nested(sem, subclass);
++}
++
++static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
++{
++ rt_down_write_nested(sem, subclass);
++}
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++static inline void down_write_nest_lock(struct rw_semaphore *sem,
++ struct rw_semaphore *nest_lock)
++{
++ rt_down_write_nested_lock(sem, &nest_lock->dep_map);
++}
++
++#else
++
++static inline void down_write_nest_lock(struct rw_semaphore *sem,
++ struct rw_semaphore *nest_lock)
++{
++ rt_down_write_nested_lock(sem, NULL);
++}
++#endif
++#endif
+diff -Nur linux-3.18.8.orig/include/linux/sched.h linux-3.18.8/include/linux/sched.h
+--- linux-3.18.8.orig/include/linux/sched.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/sched.h 2015-03-03 08:05:18.000000000 +0100
+@@ -26,6 +26,7 @@
+ #include <linux/nodemask.h>
+ #include <linux/mm_types.h>
+ #include <linux/preempt_mask.h>
++#include <asm/kmap_types.h>
+
+ #include <asm/page.h>
+ #include <asm/ptrace.h>
+@@ -56,6 +57,7 @@
+ #include <linux/cred.h>
+ #include <linux/llist.h>
+ #include <linux/uidgid.h>
++#include <linux/hardirq.h>
+ #include <linux/gfp.h>
+ #include <linux/magic.h>
+
+@@ -235,10 +237,7 @@
+ TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
+ __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
+
+-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
+ #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+-#define task_is_stopped_or_traced(task) \
+- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+ #define task_contributes_to_load(task) \
+ ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
+ (task->flags & PF_FROZEN) == 0)
+@@ -1234,6 +1233,7 @@
+
+ struct task_struct {
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
++ volatile long saved_state; /* saved state for "spinlock sleepers" */
+ void *stack;
+ atomic_t usage;
+ unsigned int flags; /* per process flags, defined below */
+@@ -1270,6 +1270,12 @@
+ #endif
+
+ unsigned int policy;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int migrate_disable;
++# ifdef CONFIG_SCHED_DEBUG
++ int migrate_disable_atomic;
++# endif
++#endif
+ int nr_cpus_allowed;
+ cpumask_t cpus_allowed;
+
+@@ -1371,7 +1377,8 @@
+ struct cputime prev_cputime;
+ #endif
+ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+- seqlock_t vtime_seqlock;
++ raw_spinlock_t vtime_lock;
++ seqcount_t vtime_seq;
+ unsigned long long vtime_snap;
+ enum {
+ VTIME_SLEEPING = 0,
+@@ -1387,6 +1394,9 @@
+
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct task_struct *posix_timer_list;
++#endif
+
+ /* process credentials */
+ const struct cred __rcu *real_cred; /* objective and real subjective task
+@@ -1419,10 +1429,15 @@
+ /* signal handlers */
+ struct signal_struct *signal;
+ struct sighand_struct *sighand;
++ struct sigqueue *sigqueue_cache;
+
+ sigset_t blocked, real_blocked;
+ sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
+ struct sigpending pending;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /* TODO: move me into ->restart_block ? */
++ struct siginfo forced_info;
++#endif
+
+ unsigned long sas_ss_sp;
+ size_t sas_ss_size;
+@@ -1460,6 +1475,9 @@
+ /* mutex deadlock detection */
+ struct mutex_waiter *blocked_on;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int pagefault_disabled;
++#endif
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+@@ -1644,6 +1662,12 @@
+ unsigned long trace;
+ /* bitmask and counter of trace recursion */
+ unsigned long trace_recursion;
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ u64 preempt_timestamp_hist;
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ long timer_offset;
++#endif
++#endif
+ #endif /* CONFIG_TRACING */
+ #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
+ unsigned int memcg_kmem_skip_account;
+@@ -1661,11 +1685,19 @@
+ unsigned int sequential_io;
+ unsigned int sequential_io_avg;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct rcu_head put_rcu;
++ int softirq_nestcnt;
++ unsigned int softirqs_raised;
++#endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
++ int kmap_idx;
++ pte_t kmap_pte[KM_TYPE_NR];
++# endif
++#endif
+ };
+
+-/* Future-safe accessor for struct task_struct's cpus_allowed. */
+-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+-
+ #define TNF_MIGRATED 0x01
+ #define TNF_NO_GROUP 0x02
+ #define TNF_SHARED 0x04
+@@ -1700,6 +1732,17 @@
+ }
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
++#else
++static inline bool cur_pf_disabled(void) { return false; }
++#endif
++
++static inline bool pagefault_disabled(void)
++{
++ return in_atomic() || cur_pf_disabled();
++}
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ return task->pids[PIDTYPE_PID].pid;
+@@ -1853,6 +1896,15 @@
+ extern void free_task(struct task_struct *tsk);
+ #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __put_task_struct_cb(struct rcu_head *rhp);
++
++static inline void put_task_struct(struct task_struct *t)
++{
++ if (atomic_dec_and_test(&t->usage))
++ call_rcu(&t->put_rcu, __put_task_struct_cb);
++}
++#else
+ extern void __put_task_struct(struct task_struct *t);
+
+ static inline void put_task_struct(struct task_struct *t)
+@@ -1860,6 +1912,7 @@
+ if (atomic_dec_and_test(&t->usage))
+ __put_task_struct(t);
+ }
++#endif
+
+ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+ extern void task_cputime(struct task_struct *t,
+@@ -1898,6 +1951,7 @@
+ /*
+ * Per process flags
+ */
++#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
+ #define PF_EXITING 0x00000004 /* getting shut down */
+ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
+ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+@@ -2058,6 +2112,10 @@
+
+ extern int set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask);
++int migrate_me(void);
++void tell_sched_cpu_down_begin(int cpu);
++void tell_sched_cpu_down_done(int cpu);
++
+ #else
+ static inline void do_set_cpus_allowed(struct task_struct *p,
+ const struct cpumask *new_mask)
+@@ -2070,6 +2128,9 @@
+ return -EINVAL;
+ return 0;
+ }
++static inline int migrate_me(void) { return 0; }
++static inline void tell_sched_cpu_down_begin(int cpu) { }
++static inline void tell_sched_cpu_down_done(int cpu) { }
+ #endif
+
+ #ifdef CONFIG_NO_HZ_COMMON
+@@ -2290,6 +2351,7 @@
+
+ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+ extern int wake_up_process(struct task_struct *tsk);
++extern int wake_up_lock_sleeper(struct task_struct * tsk);
+ extern void wake_up_new_task(struct task_struct *tsk);
+ #ifdef CONFIG_SMP
+ extern void kick_process(struct task_struct *tsk);
+@@ -2406,12 +2468,24 @@
+
+ /* mmdrop drops the mm and the page tables */
+ extern void __mmdrop(struct mm_struct *);
++
+ static inline void mmdrop(struct mm_struct * mm)
+ {
+ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+ __mmdrop(mm);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __mmdrop_delayed(struct rcu_head *rhp);
++static inline void mmdrop_delayed(struct mm_struct *mm)
++{
++ if (atomic_dec_and_test(&mm->mm_count))
++ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
++}
++#else
++# define mmdrop_delayed(mm) mmdrop(mm)
++#endif
++
+ /* mmput gets rid of the mappings and all user-space */
+ extern void mmput(struct mm_struct *);
+ /* Grab a reference to a task's mm, if it is not already going away */
+@@ -2719,6 +2793,43 @@
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+ }
+
++#ifdef CONFIG_PREEMPT_LAZY
++static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
++}
++
++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
++}
++
++static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
++{
++ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
++}
++
++static inline int need_resched_lazy(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++}
++
++static inline int need_resched_now(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED);
++}
++
++#else
++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
++static inline int need_resched_lazy(void) { return 0; }
++
++static inline int need_resched_now(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED);
++}
++
++#endif
++
+ static inline int restart_syscall(void)
+ {
+ set_tsk_thread_flag(current, TIF_SIGPENDING);
+@@ -2750,6 +2861,51 @@
+ return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
+ }
+
++static inline bool __task_is_stopped_or_traced(struct task_struct *task)
++{
++ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
++ return true;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
++ return true;
++#endif
++ return false;
++}
++
++static inline bool task_is_stopped_or_traced(struct task_struct *task)
++{
++ bool traced_stopped;
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&task->pi_lock, flags);
++ traced_stopped = __task_is_stopped_or_traced(task);
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++#else
++ traced_stopped = __task_is_stopped_or_traced(task);
++#endif
++ return traced_stopped;
++}
++
++static inline bool task_is_traced(struct task_struct *task)
++{
++ bool traced = false;
++
++ if (task->state & __TASK_TRACED)
++ return true;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /* in case the task is sleeping on tasklist_lock */
++ raw_spin_lock_irq(&task->pi_lock);
++ if (task->state & __TASK_TRACED)
++ traced = true;
++ else if (task->saved_state & __TASK_TRACED)
++ traced = true;
++ raw_spin_unlock_irq(&task->pi_lock);
++#endif
++ return traced;
++}
++
+ /*
+ * cond_resched() and cond_resched_lock(): latency reduction via
+ * explicit rescheduling in places that are safe. The return
+@@ -2766,7 +2922,7 @@
+
+ extern int __cond_resched_lock(spinlock_t *lock);
+
+-#ifdef CONFIG_PREEMPT_COUNT
++#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
+ #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
+ #else
+ #define PREEMPT_LOCK_OFFSET 0
+@@ -2777,12 +2933,16 @@
+ __cond_resched_lock(lock); \
+ })
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern int __cond_resched_softirq(void);
+
+ #define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
+ })
++#else
++# define cond_resched_softirq() cond_resched()
++#endif
+
+ static inline void cond_resched_rcu(void)
+ {
+@@ -2949,6 +3109,26 @@
+
+ #endif /* CONFIG_SMP */
+
++static inline int __migrate_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++ return p->migrate_disable;
++#else
++ return 0;
++#endif
++}
++
++/* Future-safe accessor for struct task_struct's cpus_allowed. */
++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (p->migrate_disable)
++ return cpumask_of(task_cpu(p));
++#endif
++
++ return &p->cpus_allowed;
++}
++
+ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
+
+diff -Nur linux-3.18.8.orig/include/linux/seqlock.h linux-3.18.8/include/linux/seqlock.h
+--- linux-3.18.8.orig/include/linux/seqlock.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/seqlock.h 2015-03-03 08:05:18.000000000 +0100
+@@ -219,20 +219,30 @@
+ return __read_seqcount_retry(s, start);
+ }
+
+-
+-
+-static inline void raw_write_seqcount_begin(seqcount_t *s)
++static inline void __raw_write_seqcount_begin(seqcount_t *s)
+ {
+ s->sequence++;
+ smp_wmb();
+ }
+
+-static inline void raw_write_seqcount_end(seqcount_t *s)
++static inline void raw_write_seqcount_begin(seqcount_t *s)
++{
++ preempt_disable_rt();
++ __raw_write_seqcount_begin(s);
++}
++
++static inline void __raw_write_seqcount_end(seqcount_t *s)
+ {
+ smp_wmb();
+ s->sequence++;
+ }
+
++static inline void raw_write_seqcount_end(seqcount_t *s)
++{
++ __raw_write_seqcount_end(s);
++ preempt_enable_rt();
++}
++
+ /*
+ * raw_write_seqcount_latch - redirect readers to even/odd copy
+ * @s: pointer to seqcount_t
+@@ -305,10 +315,32 @@
+ /*
+ * Read side functions for starting and finalizing a read side section.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static inline unsigned read_seqbegin(const seqlock_t *sl)
+ {
+ return read_seqcount_begin(&sl->seqcount);
+ }
++#else
++/*
++ * Starvation safe read side for RT
++ */
++static inline unsigned read_seqbegin(seqlock_t *sl)
++{
++ unsigned ret;
++
++repeat:
++ ret = ACCESS_ONCE(sl->seqcount.sequence);
++ if (unlikely(ret & 1)) {
++ /*
++ * Take the lock and let the writer proceed (i.e. evtl
++ * boost it), otherwise we could loop here forever.
++ */
++ spin_unlock_wait(&sl->lock);
++ goto repeat;
++ }
++ return ret;
++}
++#endif
+
+ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ {
+@@ -323,36 +355,36 @@
+ static inline void write_seqlock(seqlock_t *sl)
+ {
+ spin_lock(&sl->lock);
+- write_seqcount_begin(&sl->seqcount);
++ __raw_write_seqcount_begin(&sl->seqcount);
+ }
+
+ static inline void write_sequnlock(seqlock_t *sl)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __raw_write_seqcount_end(&sl->seqcount);
+ spin_unlock(&sl->lock);
+ }
+
+ static inline void write_seqlock_bh(seqlock_t *sl)
+ {
+ spin_lock_bh(&sl->lock);
+- write_seqcount_begin(&sl->seqcount);
++ __raw_write_seqcount_begin(&sl->seqcount);
+ }
+
+ static inline void write_sequnlock_bh(seqlock_t *sl)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __raw_write_seqcount_end(&sl->seqcount);
+ spin_unlock_bh(&sl->lock);
+ }
+
+ static inline void write_seqlock_irq(seqlock_t *sl)
+ {
+ spin_lock_irq(&sl->lock);
+- write_seqcount_begin(&sl->seqcount);
++ __raw_write_seqcount_begin(&sl->seqcount);
+ }
+
+ static inline void write_sequnlock_irq(seqlock_t *sl)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __raw_write_seqcount_end(&sl->seqcount);
+ spin_unlock_irq(&sl->lock);
+ }
+
+@@ -361,7 +393,7 @@
+ unsigned long flags;
+
+ spin_lock_irqsave(&sl->lock, flags);
+- write_seqcount_begin(&sl->seqcount);
++ __raw_write_seqcount_begin(&sl->seqcount);
+ return flags;
+ }
+
+@@ -371,7 +403,7 @@
+ static inline void
+ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __raw_write_seqcount_end(&sl->seqcount);
+ spin_unlock_irqrestore(&sl->lock, flags);
+ }
+
+diff -Nur linux-3.18.8.orig/include/linux/signal.h linux-3.18.8/include/linux/signal.h
+--- linux-3.18.8.orig/include/linux/signal.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/signal.h 2015-03-03 08:05:18.000000000 +0100
+@@ -218,6 +218,7 @@
+ }
+
+ extern void flush_sigqueue(struct sigpending *queue);
++extern void flush_task_sigqueue(struct task_struct *tsk);
+
+ /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
+ static inline int valid_signal(unsigned long sig)
+diff -Nur linux-3.18.8.orig/include/linux/skbuff.h linux-3.18.8/include/linux/skbuff.h
+--- linux-3.18.8.orig/include/linux/skbuff.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/skbuff.h 2015-03-03 08:05:18.000000000 +0100
+@@ -172,6 +172,7 @@
+
+ __u32 qlen;
+ spinlock_t lock;
++ raw_spinlock_t raw_lock;
+ };
+
+ struct sk_buff;
+@@ -1327,6 +1328,12 @@
+ __skb_queue_head_init(list);
+ }
+
++static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
++{
++ raw_spin_lock_init(&list->raw_lock);
++ __skb_queue_head_init(list);
++}
++
+ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
+ struct lock_class_key *class)
+ {
+diff -Nur linux-3.18.8.orig/include/linux/smp.h linux-3.18.8/include/linux/smp.h
+--- linux-3.18.8.orig/include/linux/smp.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/smp.h 2015-03-03 08:05:18.000000000 +0100
+@@ -178,6 +178,9 @@
+ #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
+ #define put_cpu() preempt_enable()
+
++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
++#define put_cpu_light() migrate_enable()
++
+ /*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+ * boot command line:
+diff -Nur linux-3.18.8.orig/include/linux/spinlock_api_smp.h linux-3.18.8/include/linux/spinlock_api_smp.h
+--- linux-3.18.8.orig/include/linux/spinlock_api_smp.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/spinlock_api_smp.h 2015-03-03 08:05:18.000000000 +0100
+@@ -187,6 +187,8 @@
+ return 0;
+ }
+
+-#include <linux/rwlock_api_smp.h>
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/rwlock_api_smp.h>
++#endif
+
+ #endif /* __LINUX_SPINLOCK_API_SMP_H */
+diff -Nur linux-3.18.8.orig/include/linux/spinlock.h linux-3.18.8/include/linux/spinlock.h
+--- linux-3.18.8.orig/include/linux/spinlock.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/spinlock.h 2015-03-03 08:05:18.000000000 +0100
+@@ -278,7 +278,11 @@
+ #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
+
+ /* Include rwlock functions */
+-#include <linux/rwlock.h>
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/rwlock_rt.h>
++#else
++# include <linux/rwlock.h>
++#endif
+
+ /*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+@@ -289,6 +293,10 @@
+ # include <linux/spinlock_api_up.h>
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/spinlock_rt.h>
++#else /* PREEMPT_RT_FULL */
++
+ /*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+@@ -418,4 +426,6 @@
+ #define atomic_dec_and_lock(atomic, lock) \
+ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+
++#endif /* !PREEMPT_RT_FULL */
++
+ #endif /* __LINUX_SPINLOCK_H */
+diff -Nur linux-3.18.8.orig/include/linux/spinlock_rt.h linux-3.18.8/include/linux/spinlock_rt.h
+--- linux-3.18.8.orig/include/linux/spinlock_rt.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/linux/spinlock_rt.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,167 @@
++#ifndef __LINUX_SPINLOCK_RT_H
++#define __LINUX_SPINLOCK_RT_H
++
++#ifndef __LINUX_SPINLOCK_H
++#error Do not include directly. Use spinlock.h
++#endif
++
++#include <linux/bug.h>
++
++extern void
++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
++
++#define spin_lock_init(slock) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(slock)->lock); \
++ __rt_spin_lock_init(slock, #slock, &__key); \
++} while (0)
++
++extern void __lockfunc rt_spin_lock(spinlock_t *lock);
++extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
++extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
++extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock);
++extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
++extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
++
++/*
++ * lockdep-less calls, for derived types like rwlock:
++ * (for trylock they can use rt_mutex_trylock() directly.
++ */
++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
++extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
++
++#define spin_lock(lock) \
++ do { \
++ migrate_disable(); \
++ rt_spin_lock(lock); \
++ } while (0)
++
++#define spin_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ migrate_disable(); \
++ rt_spin_lock(lock); \
++ } while (0)
++
++#define spin_lock_irq(lock) spin_lock(lock)
++
++#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
++
++#define spin_trylock(lock) \
++({ \
++ int __locked; \
++ migrate_disable(); \
++ __locked = spin_do_trylock(lock); \
++ if (!__locked) \
++ migrate_enable(); \
++ __locked; \
++})
++
++#ifdef CONFIG_LOCKDEP
++# define spin_lock_nested(lock, subclass) \
++ do { \
++ migrate_disable(); \
++ rt_spin_lock_nested(lock, subclass); \
++ } while (0)
++
++# define spin_lock_irqsave_nested(lock, flags, subclass) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ migrate_disable(); \
++ rt_spin_lock_nested(lock, subclass); \
++ } while (0)
++#else
++# define spin_lock_nested(lock, subclass) spin_lock(lock)
++
++# define spin_lock_irqsave_nested(lock, flags, subclass) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ spin_lock(lock); \
++ } while (0)
++#endif
++
++#define spin_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ spin_lock(lock); \
++ } while (0)
++
++static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
++{
++ unsigned long flags = 0;
++#ifdef CONFIG_TRACE_IRQFLAGS
++ flags = rt_spin_lock_trace_flags(lock);
++#else
++ spin_lock(lock); /* lock_local */
++#endif
++ return flags;
++}
++
++/* FIXME: we need rt_spin_lock_nest_lock */
++#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
++
++#define spin_unlock(lock) \
++ do { \
++ rt_spin_unlock(lock); \
++ migrate_enable(); \
++ } while (0)
++
++#define spin_unlock_bh(lock) \
++ do { \
++ rt_spin_unlock(lock); \
++ migrate_enable(); \
++ local_bh_enable(); \
++ } while (0)
++
++#define spin_unlock_irq(lock) spin_unlock(lock)
++
++#define spin_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ spin_unlock(lock); \
++ } while (0)
++
++#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
++#define spin_trylock_irq(lock) spin_trylock(lock)
++
++#define spin_trylock_irqsave(lock, flags) \
++ rt_spin_trylock_irqsave(lock, &(flags))
++
++#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
++
++#ifdef CONFIG_GENERIC_LOCKBREAK
++# define spin_is_contended(lock) ((lock)->break_lock)
++#else
++# define spin_is_contended(lock) (((void)(lock), 0))
++#endif
++
++static inline int spin_can_lock(spinlock_t *lock)
++{
++ return !rt_mutex_is_locked(&lock->lock);
++}
++
++static inline int spin_is_locked(spinlock_t *lock)
++{
++ return rt_mutex_is_locked(&lock->lock);
++}
++
++static inline void assert_spin_locked(spinlock_t *lock)
++{
++ BUG_ON(!spin_is_locked(lock));
++}
++
++#define atomic_dec_and_lock(atomic, lock) \
++ atomic_dec_and_spin_lock(atomic, lock)
++
++#endif
+diff -Nur linux-3.18.8.orig/include/linux/spinlock_types.h linux-3.18.8/include/linux/spinlock_types.h
+--- linux-3.18.8.orig/include/linux/spinlock_types.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/spinlock_types.h 2015-03-03 08:05:18.000000000 +0100
+@@ -9,80 +9,15 @@
+ * Released under the General Public License (GPL).
+ */
+
+-#if defined(CONFIG_SMP)
+-# include <asm/spinlock_types.h>
+-#else
+-# include <linux/spinlock_types_up.h>
+-#endif
+-
+-#include <linux/lockdep.h>
+-
+-typedef struct raw_spinlock {
+- arch_spinlock_t raw_lock;
+-#ifdef CONFIG_GENERIC_LOCKBREAK
+- unsigned int break_lock;
+-#endif
+-#ifdef CONFIG_DEBUG_SPINLOCK
+- unsigned int magic, owner_cpu;
+- void *owner;
+-#endif
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+- struct lockdep_map dep_map;
+-#endif
+-} raw_spinlock_t;
+-
+-#define SPINLOCK_MAGIC 0xdead4ead
+-
+-#define SPINLOCK_OWNER_INIT ((void *)-1L)
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+-#else
+-# define SPIN_DEP_MAP_INIT(lockname)
+-#endif
++#include <linux/spinlock_types_raw.h>
+
+-#ifdef CONFIG_DEBUG_SPINLOCK
+-# define SPIN_DEBUG_INIT(lockname) \
+- .magic = SPINLOCK_MAGIC, \
+- .owner_cpu = -1, \
+- .owner = SPINLOCK_OWNER_INIT,
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/spinlock_types_nort.h>
++# include <linux/rwlock_types.h>
+ #else
+-# define SPIN_DEBUG_INIT(lockname)
++# include <linux/rtmutex.h>
++# include <linux/spinlock_types_rt.h>
++# include <linux/rwlock_types_rt.h>
+ #endif
+
+-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+- { \
+- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+- SPIN_DEBUG_INIT(lockname) \
+- SPIN_DEP_MAP_INIT(lockname) }
+-
+-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+-
+-typedef struct spinlock {
+- union {
+- struct raw_spinlock rlock;
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+- struct {
+- u8 __padding[LOCK_PADSIZE];
+- struct lockdep_map dep_map;
+- };
+-#endif
+- };
+-} spinlock_t;
+-
+-#define __SPIN_LOCK_INITIALIZER(lockname) \
+- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+-
+-#define __SPIN_LOCK_UNLOCKED(lockname) \
+- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+-
+-#include <linux/rwlock_types.h>
+-
+ #endif /* __LINUX_SPINLOCK_TYPES_H */
+diff -Nur linux-3.18.8.orig/include/linux/spinlock_types_nort.h linux-3.18.8/include/linux/spinlock_types_nort.h
+--- linux-3.18.8.orig/include/linux/spinlock_types_nort.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/linux/spinlock_types_nort.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,33 @@
++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
++#define __LINUX_SPINLOCK_TYPES_NORT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * The non RT version maps spinlocks to raw_spinlocks
++ */
++typedef struct spinlock {
++ union {
++ struct raw_spinlock rlock;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
++ struct {
++ u8 __padding[LOCK_PADSIZE];
++ struct lockdep_map dep_map;
++ };
++#endif
++ };
++} spinlock_t;
++
++#define __SPIN_LOCK_INITIALIZER(lockname) \
++ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
++
++#define __SPIN_LOCK_UNLOCKED(lockname) \
++ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
++
++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
++
++#endif
+diff -Nur linux-3.18.8.orig/include/linux/spinlock_types_raw.h linux-3.18.8/include/linux/spinlock_types_raw.h
+--- linux-3.18.8.orig/include/linux/spinlock_types_raw.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/linux/spinlock_types_raw.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,56 @@
++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
++#define __LINUX_SPINLOCK_TYPES_RAW_H
++
++#if defined(CONFIG_SMP)
++# include <asm/spinlock_types.h>
++#else
++# include <linux/spinlock_types_up.h>
++#endif
++
++#include <linux/lockdep.h>
++
++typedef struct raw_spinlock {
++ arch_spinlock_t raw_lock;
++#ifdef CONFIG_GENERIC_LOCKBREAK
++ unsigned int break_lock;
++#endif
++#ifdef CONFIG_DEBUG_SPINLOCK
++ unsigned int magic, owner_cpu;
++ void *owner;
++#endif
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} raw_spinlock_t;
++
++#define SPINLOCK_MAGIC 0xdead4ead
++
++#define SPINLOCK_OWNER_INIT ((void *)-1L)
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
++#else
++# define SPIN_DEP_MAP_INIT(lockname)
++#endif
++
++#ifdef CONFIG_DEBUG_SPINLOCK
++# define SPIN_DEBUG_INIT(lockname) \
++ .magic = SPINLOCK_MAGIC, \
++ .owner_cpu = -1, \
++ .owner = SPINLOCK_OWNER_INIT,
++#else
++# define SPIN_DEBUG_INIT(lockname)
++#endif
++
++#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
++ { \
++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
++ SPIN_DEBUG_INIT(lockname) \
++ SPIN_DEP_MAP_INIT(lockname) }
++
++#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
++ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
++
++#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
++
++#endif
+diff -Nur linux-3.18.8.orig/include/linux/spinlock_types_rt.h linux-3.18.8/include/linux/spinlock_types_rt.h
+--- linux-3.18.8.orig/include/linux/spinlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/linux/spinlock_types_rt.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,51 @@
++#ifndef __LINUX_SPINLOCK_TYPES_RT_H
++#define __LINUX_SPINLOCK_TYPES_RT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++#include <linux/cache.h>
++
++/*
++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
++ */
++typedef struct spinlock {
++ struct rt_mutex lock;
++ unsigned int break_lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} spinlock_t;
++
++#ifdef CONFIG_DEBUG_RT_MUTEXES
++# define __RT_SPIN_INITIALIZER(name) \
++ { \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ .save_state = 1, \
++ .file = __FILE__, \
++ .line = __LINE__ , \
++ }
++#else
++# define __RT_SPIN_INITIALIZER(name) \
++ { \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ .save_state = 1, \
++ }
++#endif
++
++/*
++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
++*/
++
++#define __SPIN_LOCK_UNLOCKED(name) \
++ { .lock = __RT_SPIN_INITIALIZER(name.lock), \
++ SPIN_DEP_MAP_INIT(name) }
++
++#define __DEFINE_SPINLOCK(name) \
++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
++
++#define DEFINE_SPINLOCK(name) \
++ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
++
++#endif
+diff -Nur linux-3.18.8.orig/include/linux/srcu.h linux-3.18.8/include/linux/srcu.h
+--- linux-3.18.8.orig/include/linux/srcu.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/srcu.h 2015-03-03 08:05:18.000000000 +0100
+@@ -84,10 +84,10 @@
+
+ void process_srcu(struct work_struct *work);
+
+-#define __SRCU_STRUCT_INIT(name) \
++#define __SRCU_STRUCT_INIT(name, pcpu_name) \
+ { \
+ .completed = -300, \
+- .per_cpu_ref = &name##_srcu_array, \
++ .per_cpu_ref = &pcpu_name, \
+ .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
+ .running = false, \
+ .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
+@@ -104,11 +104,12 @@
+ */
+ #define DEFINE_SRCU(name) \
+ static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
+- struct srcu_struct name = __SRCU_STRUCT_INIT(name);
++ struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array);
+
+ #define DEFINE_STATIC_SRCU(name) \
+ static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
+- static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
++ static struct srcu_struct name = __SRCU_STRUCT_INIT(\
++ name, name##_srcu_array);
+
+ /**
+ * call_srcu() - Queue a callback for invocation after an SRCU grace period
+diff -Nur linux-3.18.8.orig/include/linux/swap.h linux-3.18.8/include/linux/swap.h
+--- linux-3.18.8.orig/include/linux/swap.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/swap.h 2015-03-03 08:05:18.000000000 +0100
+@@ -11,6 +11,7 @@
+ #include <linux/fs.h>
+ #include <linux/atomic.h>
+ #include <linux/page-flags.h>
++#include <linux/locallock.h>
+ #include <asm/page.h>
+
+ struct notifier_block;
+@@ -260,7 +261,8 @@
+ void *workingset_eviction(struct address_space *mapping, struct page *page);
+ bool workingset_refault(void *shadow);
+ void workingset_activation(struct page *page);
+-extern struct list_lru workingset_shadow_nodes;
++extern struct list_lru __workingset_shadow_nodes;
++DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
+
+ static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
+ {
+diff -Nur linux-3.18.8.orig/include/linux/sysctl.h linux-3.18.8/include/linux/sysctl.h
+--- linux-3.18.8.orig/include/linux/sysctl.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/sysctl.h 2015-03-03 08:05:18.000000000 +0100
+@@ -25,6 +25,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/wait.h>
+ #include <linux/rbtree.h>
++#include <linux/atomic.h>
+ #include <uapi/linux/sysctl.h>
+
+ /* For the /proc/sys support */
+diff -Nur linux-3.18.8.orig/include/linux/thread_info.h linux-3.18.8/include/linux/thread_info.h
+--- linux-3.18.8.orig/include/linux/thread_info.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/thread_info.h 2015-03-03 08:05:18.000000000 +0100
+@@ -102,7 +102,17 @@
+ #define test_thread_flag(flag) \
+ test_ti_thread_flag(current_thread_info(), flag)
+
+-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
++#ifdef CONFIG_PREEMPT_LAZY
++#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \
++ test_thread_flag(TIF_NEED_RESCHED_LAZY))
++#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED))
++#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY))
++
++#else
++#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
++#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED)
++#define tif_need_resched_lazy() 0
++#endif
+
+ #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
+ /*
+diff -Nur linux-3.18.8.orig/include/linux/timer.h linux-3.18.8/include/linux/timer.h
+--- linux-3.18.8.orig/include/linux/timer.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/timer.h 2015-03-03 08:05:18.000000000 +0100
+@@ -241,7 +241,7 @@
+
+ extern int try_to_del_timer_sync(struct timer_list *timer);
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ extern int del_timer_sync(struct timer_list *timer);
+ #else
+ # define del_timer_sync(t) del_timer(t)
+diff -Nur linux-3.18.8.orig/include/linux/uaccess.h linux-3.18.8/include/linux/uaccess.h
+--- linux-3.18.8.orig/include/linux/uaccess.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/uaccess.h 2015-03-03 08:05:18.000000000 +0100
+@@ -6,14 +6,9 @@
+
+ /*
+ * These routines enable/disable the pagefault handler in that
+- * it will not take any locks and go straight to the fixup table.
+- *
+- * They have great resemblance to the preempt_disable/enable calls
+- * and in fact they are identical; this is because currently there is
+- * no other way to make the pagefault handlers do this. So we do
+- * disable preemption but we don't necessarily care about that.
++ * it will not take any MM locks and go straight to the fixup table.
+ */
+-static inline void pagefault_disable(void)
++static inline void raw_pagefault_disable(void)
+ {
+ preempt_count_inc();
+ /*
+@@ -23,7 +18,7 @@
+ barrier();
+ }
+
+-static inline void pagefault_enable(void)
++static inline void raw_pagefault_enable(void)
+ {
+ #ifndef CONFIG_PREEMPT
+ /*
+@@ -37,6 +32,21 @@
+ #endif
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++static inline void pagefault_disable(void)
++{
++ raw_pagefault_disable();
++}
++
++static inline void pagefault_enable(void)
++{
++ raw_pagefault_enable();
++}
++#else
++extern void pagefault_disable(void);
++extern void pagefault_enable(void);
++#endif
++
+ #ifndef ARCH_HAS_NOCACHE_UACCESS
+
+ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
+@@ -76,9 +86,9 @@
+ mm_segment_t old_fs = get_fs(); \
+ \
+ set_fs(KERNEL_DS); \
+- pagefault_disable(); \
++ raw_pagefault_disable(); \
+ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
+- pagefault_enable(); \
++ raw_pagefault_enable(); \
+ set_fs(old_fs); \
+ ret; \
+ })
+diff -Nur linux-3.18.8.orig/include/linux/uprobes.h linux-3.18.8/include/linux/uprobes.h
+--- linux-3.18.8.orig/include/linux/uprobes.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/uprobes.h 2015-03-03 08:05:18.000000000 +0100
+@@ -27,6 +27,7 @@
+ #include <linux/errno.h>
+ #include <linux/rbtree.h>
+ #include <linux/types.h>
++#include <linux/wait.h>
+
+ struct vm_area_struct;
+ struct mm_struct;
+diff -Nur linux-3.18.8.orig/include/linux/vmstat.h linux-3.18.8/include/linux/vmstat.h
+--- linux-3.18.8.orig/include/linux/vmstat.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/vmstat.h 2015-03-03 08:05:18.000000000 +0100
+@@ -33,7 +33,9 @@
+ */
+ static inline void __count_vm_event(enum vm_event_item item)
+ {
++ preempt_disable_rt();
+ raw_cpu_inc(vm_event_states.event[item]);
++ preempt_enable_rt();
+ }
+
+ static inline void count_vm_event(enum vm_event_item item)
+@@ -43,7 +45,9 @@
+
+ static inline void __count_vm_events(enum vm_event_item item, long delta)
+ {
++ preempt_disable_rt();
+ raw_cpu_add(vm_event_states.event[item], delta);
++ preempt_enable_rt();
+ }
+
+ static inline void count_vm_events(enum vm_event_item item, long delta)
+diff -Nur linux-3.18.8.orig/include/linux/wait.h linux-3.18.8/include/linux/wait.h
+--- linux-3.18.8.orig/include/linux/wait.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/linux/wait.h 2015-03-03 08:05:18.000000000 +0100
+@@ -8,6 +8,7 @@
+ #include <linux/spinlock.h>
+ #include <asm/current.h>
+ #include <uapi/linux/wait.h>
++#include <linux/atomic.h>
+
+ typedef struct __wait_queue wait_queue_t;
+ typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
+diff -Nur linux-3.18.8.orig/include/linux/wait-simple.h linux-3.18.8/include/linux/wait-simple.h
+--- linux-3.18.8.orig/include/linux/wait-simple.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/linux/wait-simple.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,207 @@
++#ifndef _LINUX_WAIT_SIMPLE_H
++#define _LINUX_WAIT_SIMPLE_H
++
++#include <linux/spinlock.h>
++#include <linux/list.h>
++
++#include <asm/current.h>
++
++struct swaiter {
++ struct task_struct *task;
++ struct list_head node;
++};
++
++#define DEFINE_SWAITER(name) \
++ struct swaiter name = { \
++ .task = current, \
++ .node = LIST_HEAD_INIT((name).node), \
++ }
++
++struct swait_head {
++ raw_spinlock_t lock;
++ struct list_head list;
++};
++
++#define SWAIT_HEAD_INITIALIZER(name) { \
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
++ .list = LIST_HEAD_INIT((name).list), \
++ }
++
++#define DEFINE_SWAIT_HEAD(name) \
++ struct swait_head name = SWAIT_HEAD_INITIALIZER(name)
++
++extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
++
++#define init_swait_head(swh) \
++ do { \
++ static struct lock_class_key __key; \
++ \
++ __init_swait_head((swh), &__key); \
++ } while (0)
++
++/*
++ * Waiter functions
++ */
++extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w);
++extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
++extern void swait_finish_locked(struct swait_head *head, struct swaiter *w);
++extern void swait_finish(struct swait_head *head, struct swaiter *w);
++
++/* Check whether a head has waiters enqueued */
++static inline bool swaitqueue_active(struct swait_head *h)
++{
++ /* Make sure the condition is visible before checking list_empty() */
++ smp_mb();
++ return !list_empty(&h->list);
++}
++
++/*
++ * Wakeup functions
++ */
++extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num);
++extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num);
++
++#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1)
++#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1)
++#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0)
++#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0)
++
++/*
++ * Event API
++ */
++#define __swait_event(wq, condition) \
++do { \
++ DEFINE_SWAITER(__wait); \
++ \
++ for (;;) { \
++ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
++ if (condition) \
++ break; \
++ schedule(); \
++ } \
++ swait_finish(&wq, &__wait); \
++} while (0)
++
++/**
++ * swait_event - sleep until a condition gets true
++ * @wq: the waitqueue to wait on
++ * @condition: a C expression for the event to wait for
++ *
++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
++ * @condition evaluates to true. The @condition is checked each time
++ * the waitqueue @wq is woken up.
++ *
++ * wake_up() has to be called after changing any variable that could
++ * change the result of the wait condition.
++ */
++#define swait_event(wq, condition) \
++do { \
++ if (condition) \
++ break; \
++ __swait_event(wq, condition); \
++} while (0)
++
++#define __swait_event_interruptible(wq, condition, ret) \
++do { \
++ DEFINE_SWAITER(__wait); \
++ \
++ for (;;) { \
++ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
++ if (condition) \
++ break; \
++ if (signal_pending(current)) { \
++ ret = -ERESTARTSYS; \
++ break; \
++ } \
++ schedule(); \
++ } \
++ swait_finish(&wq, &__wait); \
++} while (0)
++
++#define __swait_event_interruptible_timeout(wq, condition, ret) \
++do { \
++ DEFINE_SWAITER(__wait); \
++ \
++ for (;;) { \
++ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \
++ if (condition) \
++ break; \
++ if (signal_pending(current)) { \
++ ret = -ERESTARTSYS; \
++ break; \
++ } \
++ ret = schedule_timeout(ret); \
++ if (!ret) \
++ break; \
++ } \
++ swait_finish(&wq, &__wait); \
++} while (0)
++
++/**
++ * swait_event_interruptible - sleep until a condition gets true
++ * @wq: the waitqueue to wait on
++ * @condition: a C expression for the event to wait for
++ *
++ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
++ * @condition evaluates to true. The @condition is checked each time
++ * the waitqueue @wq is woken up.
++ *
++ * wake_up() has to be called after changing any variable that could
++ * change the result of the wait condition.
++ */
++#define swait_event_interruptible(wq, condition) \
++({ \
++ int __ret = 0; \
++ if (!(condition)) \
++ __swait_event_interruptible(wq, condition, __ret); \
++ __ret; \
++})
++
++#define swait_event_interruptible_timeout(wq, condition, timeout) \
++({ \
++ int __ret = timeout; \
++ if (!(condition)) \
++ __swait_event_interruptible_timeout(wq, condition, __ret); \
++ __ret; \
++})
++
++#define __swait_event_timeout(wq, condition, ret) \
++do { \
++ DEFINE_SWAITER(__wait); \
++ \
++ for (;;) { \
++ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
++ if (condition) \
++ break; \
++ ret = schedule_timeout(ret); \
++ if (!ret) \
++ break; \
++ } \
++ swait_finish(&wq, &__wait); \
++} while (0)
++
++/**
++ * swait_event_timeout - sleep until a condition gets true or a timeout elapses
++ * @wq: the waitqueue to wait on
++ * @condition: a C expression for the event to wait for
++ * @timeout: timeout, in jiffies
++ *
++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
++ * @condition evaluates to true. The @condition is checked each time
++ * the waitqueue @wq is woken up.
++ *
++ * wake_up() has to be called after changing any variable that could
++ * change the result of the wait condition.
++ *
++ * The function returns 0 if the @timeout elapsed, and the remaining
++ * jiffies if the condition evaluated to true before the timeout elapsed.
++ */
++#define swait_event_timeout(wq, condition, timeout) \
++({ \
++ long __ret = timeout; \
++ if (!(condition)) \
++ __swait_event_timeout(wq, condition, __ret); \
++ __ret; \
++})
++
++#endif
+diff -Nur linux-3.18.8.orig/include/linux/work-simple.h linux-3.18.8/include/linux/work-simple.h
+--- linux-3.18.8.orig/include/linux/work-simple.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/linux/work-simple.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,24 @@
++#ifndef _LINUX_SWORK_H
++#define _LINUX_SWORK_H
++
++#include <linux/list.h>
++
++struct swork_event {
++ struct list_head item;
++ unsigned long flags;
++ void (*func)(struct swork_event *);
++};
++
++static inline void INIT_SWORK(struct swork_event *event,
++ void (*func)(struct swork_event *))
++{
++ event->flags = 0;
++ event->func = func;
++}
++
++bool swork_queue(struct swork_event *sev);
++
++int swork_get(void);
++void swork_put(void);
++
++#endif /* _LINUX_SWORK_H */
+diff -Nur linux-3.18.8.orig/include/net/dst.h linux-3.18.8/include/net/dst.h
+--- linux-3.18.8.orig/include/net/dst.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/net/dst.h 2015-03-03 08:05:18.000000000 +0100
+@@ -403,7 +403,7 @@
+ static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
+ struct sk_buff *skb)
+ {
+- const struct hh_cache *hh;
++ struct hh_cache *hh;
+
+ if (dst->pending_confirm) {
+ unsigned long now = jiffies;
+diff -Nur linux-3.18.8.orig/include/net/neighbour.h linux-3.18.8/include/net/neighbour.h
+--- linux-3.18.8.orig/include/net/neighbour.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/net/neighbour.h 2015-03-03 08:05:18.000000000 +0100
+@@ -387,7 +387,7 @@
+ }
+ #endif
+
+-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
++static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
+ {
+ unsigned int seq;
+ int hh_len;
+@@ -442,7 +442,7 @@
+
+ #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
+
+-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
++static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
+ const struct net_device *dev)
+ {
+ unsigned int seq;
+diff -Nur linux-3.18.8.orig/include/net/netns/ipv4.h linux-3.18.8/include/net/netns/ipv4.h
+--- linux-3.18.8.orig/include/net/netns/ipv4.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/include/net/netns/ipv4.h 2015-03-03 08:05:18.000000000 +0100
+@@ -67,6 +67,7 @@
+
+ int sysctl_icmp_echo_ignore_all;
+ int sysctl_icmp_echo_ignore_broadcasts;
++ int sysctl_icmp_echo_sysrq;
+ int sysctl_icmp_ignore_bogus_error_responses;
+ int sysctl_icmp_ratelimit;
+ int sysctl_icmp_ratemask;
+diff -Nur linux-3.18.8.orig/include/trace/events/hist.h linux-3.18.8/include/trace/events/hist.h
+--- linux-3.18.8.orig/include/trace/events/hist.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/trace/events/hist.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,72 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM hist
++
++#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_HIST_H
++
++#include "latency_hist.h"
++#include <linux/tracepoint.h>
++
++#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
++#define trace_preemptirqsoff_hist(a, b)
++#else
++TRACE_EVENT(preemptirqsoff_hist,
++
++ TP_PROTO(int reason, int starthist),
++
++ TP_ARGS(reason, starthist),
++
++ TP_STRUCT__entry(
++ __field(int, reason)
++ __field(int, starthist)
++ ),
++
++ TP_fast_assign(
++ __entry->reason = reason;
++ __entry->starthist = starthist;
++ ),
++
++ TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
++ __entry->starthist ? "start" : "stop")
++);
++#endif
++
++#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
++#define trace_hrtimer_interrupt(a, b, c, d)
++#else
++TRACE_EVENT(hrtimer_interrupt,
++
++ TP_PROTO(int cpu, long long offset, struct task_struct *curr,
++ struct task_struct *task),
++
++ TP_ARGS(cpu, offset, curr, task),
++
++ TP_STRUCT__entry(
++ __field(int, cpu)
++ __field(long long, offset)
++ __array(char, ccomm, TASK_COMM_LEN)
++ __field(int, cprio)
++ __array(char, tcomm, TASK_COMM_LEN)
++ __field(int, tprio)
++ ),
++
++ TP_fast_assign(
++ __entry->cpu = cpu;
++ __entry->offset = offset;
++ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
++ __entry->cprio = curr->prio;
++ memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>",
++ task != NULL ? TASK_COMM_LEN : 7);
++ __entry->tprio = task != NULL ? task->prio : -1;
++ ),
++
++ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
++ __entry->cpu, __entry->offset, __entry->ccomm,
++ __entry->cprio, __entry->tcomm, __entry->tprio)
++);
++#endif
++
++#endif /* _TRACE_HIST_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff -Nur linux-3.18.8.orig/include/trace/events/latency_hist.h linux-3.18.8/include/trace/events/latency_hist.h
+--- linux-3.18.8.orig/include/trace/events/latency_hist.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/include/trace/events/latency_hist.h 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,29 @@
++#ifndef _LATENCY_HIST_H
++#define _LATENCY_HIST_H
++
++enum hist_action {
++ IRQS_ON,
++ PREEMPT_ON,
++ TRACE_STOP,
++ IRQS_OFF,
++ PREEMPT_OFF,
++ TRACE_START,
++};
++
++static char *actions[] = {
++ "IRQS_ON",
++ "PREEMPT_ON",
++ "TRACE_STOP",
++ "IRQS_OFF",
++ "PREEMPT_OFF",
++ "TRACE_START",
++};
++
++static inline char *getaction(int action)
++{
++ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
++ return actions[action];
++ return "unknown";
++}
++
++#endif /* _LATENCY_HIST_H */
+diff -Nur linux-3.18.8.orig/init/Kconfig linux-3.18.8/init/Kconfig
+--- linux-3.18.8.orig/init/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/init/Kconfig 2015-03-03 08:05:18.000000000 +0100
+@@ -635,7 +635,7 @@
+
+ config RCU_FAST_NO_HZ
+ bool "Accelerate last non-dyntick-idle CPU's grace periods"
+- depends on NO_HZ_COMMON && SMP
++ depends on NO_HZ_COMMON && SMP && !PREEMPT_RT_FULL
+ default n
+ help
+ This option permits CPUs to enter dynticks-idle state even if
+@@ -662,7 +662,7 @@
+ config RCU_BOOST
+ bool "Enable RCU priority boosting"
+ depends on RT_MUTEXES && PREEMPT_RCU
+- default n
++ default y if PREEMPT_RT_FULL
+ help
+ This option boosts the priority of preempted RCU readers that
+ block the current preemptible RCU grace period for too long.
+@@ -1106,6 +1106,7 @@
+ config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on CGROUP_SCHED
++ depends on !PREEMPT_RT_FULL
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
+@@ -1677,6 +1678,7 @@
+
+ config SLAB
+ bool "SLAB"
++ depends on !PREEMPT_RT_FULL
+ help
+ The regular slab allocator that is established and known to work
+ well in all environments. It organizes cache hot objects in
+@@ -1695,6 +1697,7 @@
+ config SLOB
+ depends on EXPERT
+ bool "SLOB (Simple Allocator)"
++ depends on !PREEMPT_RT_FULL
+ help
+ SLOB replaces the stock allocator with a drastically simpler
+ allocator. SLOB is generally more space efficient but
+diff -Nur linux-3.18.8.orig/init/main.c linux-3.18.8/init/main.c
+--- linux-3.18.8.orig/init/main.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/init/main.c 2015-03-03 08:05:18.000000000 +0100
+@@ -533,6 +533,7 @@
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();
+ setup_per_cpu_areas();
++ softirq_early_init();
+ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+
+ build_all_zonelists(NULL, NULL);
+diff -Nur linux-3.18.8.orig/init/Makefile linux-3.18.8/init/Makefile
+--- linux-3.18.8.orig/init/Makefile 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/init/Makefile 2015-03-03 08:05:18.000000000 +0100
+@@ -33,4 +33,4 @@
+ include/generated/compile.h: FORCE
+ @$($(quiet)chk_compile.h)
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
+- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
+diff -Nur linux-3.18.8.orig/ipc/mqueue.c linux-3.18.8/ipc/mqueue.c
+--- linux-3.18.8.orig/ipc/mqueue.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/ipc/mqueue.c 2015-03-03 08:05:18.000000000 +0100
+@@ -923,12 +923,17 @@
+ struct msg_msg *message,
+ struct ext_wait_queue *receiver)
+ {
++ /*
++ * Keep them in one critical section for PREEMPT_RT:
++ */
++ preempt_disable_rt();
+ receiver->msg = message;
+ list_del(&receiver->list);
+ receiver->state = STATE_PENDING;
+ wake_up_process(receiver->task);
+ smp_wmb();
+ receiver->state = STATE_READY;
++ preempt_enable_rt();
+ }
+
+ /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
+@@ -942,13 +947,18 @@
+ wake_up_interruptible(&info->wait_q);
+ return;
+ }
+- if (msg_insert(sender->msg, info))
+- return;
+- list_del(&sender->list);
+- sender->state = STATE_PENDING;
+- wake_up_process(sender->task);
+- smp_wmb();
+- sender->state = STATE_READY;
++ /*
++ * Keep them in one critical section for PREEMPT_RT:
++ */
++ preempt_disable_rt();
++ if (!msg_insert(sender->msg, info)) {
++ list_del(&sender->list);
++ sender->state = STATE_PENDING;
++ wake_up_process(sender->task);
++ smp_wmb();
++ sender->state = STATE_READY;
++ }
++ preempt_enable_rt();
+ }
+
+ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
+diff -Nur linux-3.18.8.orig/ipc/msg.c linux-3.18.8/ipc/msg.c
+--- linux-3.18.8.orig/ipc/msg.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/ipc/msg.c 2015-03-03 08:05:18.000000000 +0100
+@@ -188,6 +188,12 @@
+ struct msg_receiver *msr, *t;
+
+ list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
++ /*
++ * Make sure that the wakeup doesnt preempt
++ * this CPU prematurely. (on PREEMPT_RT)
++ */
++ preempt_disable_rt();
++
+ msr->r_msg = NULL; /* initialize expunge ordering */
+ wake_up_process(msr->r_tsk);
+ /*
+@@ -198,6 +204,8 @@
+ */
+ smp_mb();
+ msr->r_msg = ERR_PTR(res);
++
++ preempt_enable_rt();
+ }
+ }
+
+@@ -574,6 +582,11 @@
+ if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
+ !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
+ msr->r_msgtype, msr->r_mode)) {
++ /*
++ * Make sure that the wakeup doesnt preempt
++ * this CPU prematurely. (on PREEMPT_RT)
++ */
++ preempt_disable_rt();
+
+ list_del(&msr->r_list);
+ if (msr->r_maxsize < msg->m_ts) {
+@@ -595,12 +608,13 @@
+ */
+ smp_mb();
+ msr->r_msg = msg;
++ preempt_enable_rt();
+
+ return 1;
+ }
++ preempt_enable_rt();
+ }
+ }
+-
+ return 0;
+ }
+
+diff -Nur linux-3.18.8.orig/ipc/sem.c linux-3.18.8/ipc/sem.c
+--- linux-3.18.8.orig/ipc/sem.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/ipc/sem.c 2015-03-03 08:05:18.000000000 +0100
+@@ -673,6 +673,13 @@
+ static void wake_up_sem_queue_prepare(struct list_head *pt,
+ struct sem_queue *q, int error)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct task_struct *p = q->sleeper;
++ get_task_struct(p);
++ q->status = error;
++ wake_up_process(p);
++ put_task_struct(p);
++#else
+ if (list_empty(pt)) {
+ /*
+ * Hold preempt off so that we don't get preempted and have the
+@@ -684,6 +691,7 @@
+ q->pid = error;
+
+ list_add_tail(&q->list, pt);
++#endif
+ }
+
+ /**
+@@ -697,6 +705,7 @@
+ */
+ static void wake_up_sem_queue_do(struct list_head *pt)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ struct sem_queue *q, *t;
+ int did_something;
+
+@@ -709,6 +718,7 @@
+ }
+ if (did_something)
+ preempt_enable();
++#endif
+ }
+
+ static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
+diff -Nur linux-3.18.8.orig/kernel/cgroup.c linux-3.18.8/kernel/cgroup.c
+--- linux-3.18.8.orig/kernel/cgroup.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/cgroup.c 2015-03-03 08:05:18.000000000 +0100
+@@ -4355,10 +4355,10 @@
+ queue_work(cgroup_destroy_wq, &css->destroy_work);
+ }
+
+-static void css_release_work_fn(struct work_struct *work)
++static void css_release_work_fn(struct swork_event *sev)
+ {
+ struct cgroup_subsys_state *css =
+- container_of(work, struct cgroup_subsys_state, destroy_work);
++ container_of(sev, struct cgroup_subsys_state, destroy_swork);
+ struct cgroup_subsys *ss = css->ss;
+ struct cgroup *cgrp = css->cgroup;
+
+@@ -4395,8 +4395,8 @@
+ struct cgroup_subsys_state *css =
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+- INIT_WORK(&css->destroy_work, css_release_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ INIT_SWORK(&css->destroy_swork, css_release_work_fn);
++ swork_queue(&css->destroy_swork);
+ }
+
+ static void init_and_link_css(struct cgroup_subsys_state *css,
+@@ -4997,6 +4997,7 @@
+ */
+ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+ BUG_ON(!cgroup_destroy_wq);
++ BUG_ON(swork_get());
+
+ /*
+ * Used to destroy pidlists and separate to serve as flush domain.
+diff -Nur linux-3.18.8.orig/kernel/cpu.c linux-3.18.8/kernel/cpu.c
+--- linux-3.18.8.orig/kernel/cpu.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/cpu.c 2015-03-03 08:05:18.000000000 +0100
+@@ -86,6 +86,290 @@
+ #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
+ #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
+
++/**
++ * hotplug_pcp - per cpu hotplug descriptor
++ * @unplug: set when pin_current_cpu() needs to sync tasks
++ * @sync_tsk: the task that waits for tasks to finish pinned sections
++ * @refcount: counter of tasks in pinned sections
++ * @grab_lock: set when the tasks entering pinned sections should wait
++ * @synced: notifier for @sync_tsk to tell cpu_down it's finished
++ * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
++ * @mutex_init: zero if the mutex hasn't been initialized yet.
++ *
++ * Although @unplug and @sync_tsk may point to the same task, the @unplug
++ * is used as a flag and still exists after @sync_tsk has exited and
++ * @sync_tsk set to NULL.
++ */
++struct hotplug_pcp {
++ struct task_struct *unplug;
++ struct task_struct *sync_tsk;
++ int refcount;
++ int grab_lock;
++ struct completion synced;
++ struct completion unplug_wait;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /*
++ * Note, on PREEMPT_RT, the hotplug lock must save the state of
++ * the task, otherwise the mutex will cause the task to fail
++ * to sleep when required. (Because it's called from migrate_disable())
++ *
++ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
++ * state.
++ */
++ spinlock_t lock;
++#else
++ struct mutex mutex;
++#endif
++ int mutex_init;
++};
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
++# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
++#else
++# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
++# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
++#endif
++
++static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
++
++/**
++ * pin_current_cpu - Prevent the current cpu from being unplugged
++ *
++ * Lightweight version of get_online_cpus() to prevent cpu from being
++ * unplugged when code runs in a migration disabled region.
++ *
++ * Must be called with preemption disabled (preempt_count = 1)!
++ */
++void pin_current_cpu(void)
++{
++ struct hotplug_pcp *hp;
++ int force = 0;
++
++retry:
++ hp = &__get_cpu_var(hotplug_pcp);
++
++ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
++ hp->unplug == current) {
++ hp->refcount++;
++ return;
++ }
++ if (hp->grab_lock) {
++ preempt_enable();
++ hotplug_lock(hp);
++ hotplug_unlock(hp);
++ } else {
++ preempt_enable();
++ /*
++ * Try to push this task off of this CPU.
++ */
++ if (!migrate_me()) {
++ preempt_disable();
++ hp = &__get_cpu_var(hotplug_pcp);
++ if (!hp->grab_lock) {
++ /*
++ * Just let it continue it's already pinned
++ * or about to sleep.
++ */
++ force = 1;
++ goto retry;
++ }
++ preempt_enable();
++ }
++ }
++ preempt_disable();
++ goto retry;
++}
++
++/**
++ * unpin_current_cpu - Allow unplug of current cpu
++ *
++ * Must be called with preemption or interrupts disabled!
++ */
++void unpin_current_cpu(void)
++{
++ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
++
++ WARN_ON(hp->refcount <= 0);
++
++ /* This is safe. sync_unplug_thread is pinned to this cpu */
++ if (!--hp->refcount && hp->unplug && hp->unplug != current)
++ wake_up_process(hp->unplug);
++}
++
++static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
++{
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (hp->refcount) {
++ schedule_preempt_disabled();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++}
++
++static int sync_unplug_thread(void *data)
++{
++ struct hotplug_pcp *hp = data;
++
++ wait_for_completion(&hp->unplug_wait);
++ preempt_disable();
++ hp->unplug = current;
++ wait_for_pinned_cpus(hp);
++
++ /*
++ * This thread will synchronize the cpu_down() with threads
++ * that have pinned the CPU. When the pinned CPU count reaches
++ * zero, we inform the cpu_down code to continue to the next step.
++ */
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ preempt_enable();
++ complete(&hp->synced);
++
++ /*
++ * If all succeeds, the next step will need tasks to wait till
++ * the CPU is offline before continuing. To do this, the grab_lock
++ * is set and tasks going into pin_current_cpu() will block on the
++ * mutex. But we still need to wait for those that are already in
++ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
++ * will kick this thread out.
++ */
++ while (!hp->grab_lock && !kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++
++ /* Make sure grab_lock is seen before we see a stale completion */
++ smp_mb();
++
++ /*
++ * Now just before cpu_down() enters stop machine, we need to make
++ * sure all tasks that are in pinned CPU sections are out, and new
++ * tasks will now grab the lock, keeping them from entering pinned
++ * CPU sections.
++ */
++ if (!kthread_should_stop()) {
++ preempt_disable();
++ wait_for_pinned_cpus(hp);
++ preempt_enable();
++ complete(&hp->synced);
++ }
++
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++ set_current_state(TASK_RUNNING);
++
++ /*
++ * Force this thread off this CPU as it's going down and
++ * we don't want any more work on this CPU.
++ */
++ current->flags &= ~PF_NO_SETAFFINITY;
++ do_set_cpus_allowed(current, cpu_present_mask);
++ migrate_me();
++ return 0;
++}
++
++static void __cpu_unplug_sync(struct hotplug_pcp *hp)
++{
++ wake_up_process(hp->sync_tsk);
++ wait_for_completion(&hp->synced);
++}
++
++static void __cpu_unplug_wait(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++
++ complete(&hp->unplug_wait);
++ wait_for_completion(&hp->synced);
++}
++
++/*
++ * Start the sync_unplug_thread on the target cpu and wait for it to
++ * complete.
++ */
++static int cpu_unplug_begin(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++ int err;
++
++ /* Protected by cpu_hotplug.lock */
++ if (!hp->mutex_init) {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ spin_lock_init(&hp->lock);
++#else
++ mutex_init(&hp->mutex);
++#endif
++ hp->mutex_init = 1;
++ }
++
++ /* Inform the scheduler to migrate tasks off this CPU */
++ tell_sched_cpu_down_begin(cpu);
++
++ init_completion(&hp->synced);
++ init_completion(&hp->unplug_wait);
++
++ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
++ if (IS_ERR(hp->sync_tsk)) {
++ err = PTR_ERR(hp->sync_tsk);
++ hp->sync_tsk = NULL;
++ return err;
++ }
++ kthread_bind(hp->sync_tsk, cpu);
++
++ /*
++ * Wait for tasks to get out of the pinned sections,
++ * it's still OK if new tasks enter. Some CPU notifiers will
++ * wait for tasks that are going to enter these sections and
++ * we must not have them block.
++ */
++ wake_up_process(hp->sync_tsk);
++ return 0;
++}
++
++static void cpu_unplug_sync(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++
++ init_completion(&hp->synced);
++ /* The completion needs to be initialzied before setting grab_lock */
++ smp_wmb();
++
++ /* Grab the mutex before setting grab_lock */
++ hotplug_lock(hp);
++ hp->grab_lock = 1;
++
++ /*
++ * The CPU notifiers have been completed.
++ * Wait for tasks to get out of pinned CPU sections and have new
++ * tasks block until the CPU is completely down.
++ */
++ __cpu_unplug_sync(hp);
++
++ /* All done with the sync thread */
++ kthread_stop(hp->sync_tsk);
++ hp->sync_tsk = NULL;
++}
++
++static void cpu_unplug_done(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++
++ hp->unplug = NULL;
++ /* Let all tasks know cpu unplug is finished before cleaning up */
++ smp_wmb();
++
++ if (hp->sync_tsk)
++ kthread_stop(hp->sync_tsk);
++
++ if (hp->grab_lock) {
++ hotplug_unlock(hp);
++ /* protected by cpu_hotplug.lock */
++ hp->grab_lock = 0;
++ }
++ tell_sched_cpu_down_done(cpu);
++}
++
+ void get_online_cpus(void)
+ {
+ might_sleep();
+@@ -102,6 +386,7 @@
+ {
+ if (cpu_hotplug.active_writer == current)
+ return true;
++
+ if (!mutex_trylock(&cpu_hotplug.lock))
+ return false;
+ cpuhp_lock_acquire_tryread();
+@@ -349,13 +634,15 @@
+ /* Requires cpu_add_remove_lock to be held */
+ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ {
+- int err, nr_calls = 0;
++ int mycpu, err, nr_calls = 0;
+ void *hcpu = (void *)(long)cpu;
+ unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
+ struct take_cpu_down_param tcd_param = {
+ .mod = mod,
+ .hcpu = hcpu,
+ };
++ cpumask_var_t cpumask;
++ cpumask_var_t cpumask_org;
+
+ if (num_online_cpus() == 1)
+ return -EBUSY;
+@@ -363,7 +650,34 @@
+ if (!cpu_online(cpu))
+ return -EINVAL;
+
++ /* Move the downtaker off the unplug cpu */
++ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
++ return -ENOMEM;
++ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
++ free_cpumask_var(cpumask);
++ return -ENOMEM;
++ }
++
++ cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
++ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
++ set_cpus_allowed_ptr(current, cpumask);
++ free_cpumask_var(cpumask);
++ migrate_disable();
++ mycpu = smp_processor_id();
++ if (mycpu == cpu) {
++ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
++ migrate_enable();
++ err = -EBUSY;
++ goto restore_cpus;
++ }
++ migrate_enable();
++
+ cpu_hotplug_begin();
++ err = cpu_unplug_begin(cpu);
++ if (err) {
++ printk("cpu_unplug_begin(%d) failed\n", cpu);
++ goto out_cancel;
++ }
+
+ err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
+ if (err) {
+@@ -389,8 +703,12 @@
+ #endif
+ synchronize_rcu();
+
++ __cpu_unplug_wait(cpu);
+ smpboot_park_threads(cpu);
+
++ /* Notifiers are done. Don't let any more tasks pin this CPU. */
++ cpu_unplug_sync(cpu);
++
+ /*
+ * So now all preempt/rcu users must observe !cpu_active().
+ */
+@@ -423,9 +741,14 @@
+ check_for_tasks(cpu);
+
+ out_release:
++ cpu_unplug_done(cpu);
++out_cancel:
+ cpu_hotplug_done();
+ if (!err)
+ cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
++restore_cpus:
++ set_cpus_allowed_ptr(current, cpumask_org);
++ free_cpumask_var(cpumask_org);
+ return err;
+ }
+
+diff -Nur linux-3.18.8.orig/kernel/debug/kdb/kdb_io.c linux-3.18.8/kernel/debug/kdb/kdb_io.c
+--- linux-3.18.8.orig/kernel/debug/kdb/kdb_io.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/debug/kdb/kdb_io.c 2015-03-03 08:05:18.000000000 +0100
+@@ -554,7 +554,6 @@
+ int linecount;
+ int colcount;
+ int logging, saved_loglevel = 0;
+- int saved_trap_printk;
+ int got_printf_lock = 0;
+ int retlen = 0;
+ int fnd, len;
+@@ -565,8 +564,6 @@
+ unsigned long uninitialized_var(flags);
+
+ preempt_disable();
+- saved_trap_printk = kdb_trap_printk;
+- kdb_trap_printk = 0;
+
+ /* Serialize kdb_printf if multiple cpus try to write at once.
+ * But if any cpu goes recursive in kdb, just print the output,
+@@ -833,7 +830,6 @@
+ } else {
+ __release(kdb_printf_lock);
+ }
+- kdb_trap_printk = saved_trap_printk;
+ preempt_enable();
+ return retlen;
+ }
+@@ -843,9 +839,11 @@
+ va_list ap;
+ int r;
+
++ kdb_trap_printk++;
+ va_start(ap, fmt);
+ r = vkdb_printf(fmt, ap);
+ va_end(ap);
++ kdb_trap_printk--;
+
+ return r;
+ }
+diff -Nur linux-3.18.8.orig/kernel/events/core.c linux-3.18.8/kernel/events/core.c
+--- linux-3.18.8.orig/kernel/events/core.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/events/core.c 2015-03-03 08:05:18.000000000 +0100
+@@ -6336,6 +6336,7 @@
+
+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hwc->hrtimer.function = perf_swevent_hrtimer;
++ hwc->hrtimer.irqsafe = 1;
+
+ /*
+ * Since hrtimers have a fixed rate, we can do a static freq->period
+diff -Nur linux-3.18.8.orig/kernel/exit.c linux-3.18.8/kernel/exit.c
+--- linux-3.18.8.orig/kernel/exit.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/exit.c 2015-03-03 08:05:18.000000000 +0100
+@@ -147,7 +147,7 @@
+ * Do this under ->siglock, we can race with another thread
+ * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
+ */
+- flush_sigqueue(&tsk->pending);
++ flush_task_sigqueue(tsk);
+ tsk->sighand = NULL;
+ spin_unlock(&sighand->siglock);
+
+diff -Nur linux-3.18.8.orig/kernel/fork.c linux-3.18.8/kernel/fork.c
+--- linux-3.18.8.orig/kernel/fork.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/fork.c 2015-03-03 08:05:18.000000000 +0100
+@@ -97,7 +97,7 @@
+
+ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
+
+-__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
++DEFINE_RWLOCK(tasklist_lock); /* outer */
+
+ #ifdef CONFIG_PROVE_RCU
+ int lockdep_tasklist_lock_is_held(void)
+@@ -233,7 +233,9 @@
+ if (atomic_dec_and_test(&sig->sigcnt))
+ free_signal_struct(sig);
+ }
+-
++#ifdef CONFIG_PREEMPT_RT_BASE
++static
++#endif
+ void __put_task_struct(struct task_struct *tsk)
+ {
+ WARN_ON(!tsk->exit_state);
+@@ -249,7 +251,18 @@
+ if (!profile_handoff_task(tsk))
+ free_task(tsk);
+ }
++#ifndef CONFIG_PREEMPT_RT_BASE
+ EXPORT_SYMBOL_GPL(__put_task_struct);
++#else
++void __put_task_struct_cb(struct rcu_head *rhp)
++{
++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
++
++ __put_task_struct(tsk);
++
++}
++EXPORT_SYMBOL_GPL(__put_task_struct_cb);
++#endif
+
+ void __init __weak arch_task_cache_init(void) { }
+
+@@ -643,6 +656,19 @@
+ }
+ EXPORT_SYMBOL_GPL(__mmdrop);
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++/*
++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
++ * want another facility to make this work.
++ */
++void __mmdrop_delayed(struct rcu_head *rhp)
++{
++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
++
++ __mmdrop(mm);
++}
++#endif
++
+ /*
+ * Decrement the use count and release all resources for an mm.
+ */
+@@ -1157,6 +1183,9 @@
+ */
+ static void posix_cpu_timers_init(struct task_struct *tsk)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ tsk->posix_timer_list = NULL;
++#endif
+ tsk->cputime_expires.prof_exp = 0;
+ tsk->cputime_expires.virt_exp = 0;
+ tsk->cputime_expires.sched_exp = 0;
+@@ -1284,6 +1313,7 @@
+ spin_lock_init(&p->alloc_lock);
+
+ init_sigpending(&p->pending);
++ p->sigqueue_cache = NULL;
+
+ p->utime = p->stime = p->gtime = 0;
+ p->utimescaled = p->stimescaled = 0;
+@@ -1291,7 +1321,8 @@
+ p->prev_cputime.utime = p->prev_cputime.stime = 0;
+ #endif
+ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+- seqlock_init(&p->vtime_seqlock);
++ raw_spin_lock_init(&p->vtime_lock);
++ seqcount_init(&p->vtime_seq);
+ p->vtime_snap = 0;
+ p->vtime_snap_whence = VTIME_SLEEPING;
+ #endif
+@@ -1342,6 +1373,9 @@
+ p->hardirq_context = 0;
+ p->softirq_context = 0;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ p->pagefault_disabled = 0;
++#endif
+ #ifdef CONFIG_LOCKDEP
+ p->lockdep_depth = 0; /* no locks held yet */
+ p->curr_chain_key = 0;
+diff -Nur linux-3.18.8.orig/kernel/futex.c linux-3.18.8/kernel/futex.c
+--- linux-3.18.8.orig/kernel/futex.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/futex.c 2015-03-03 08:05:18.000000000 +0100
+@@ -738,7 +738,9 @@
+ * task still owns the PI-state:
+ */
+ if (head->next != next) {
++ raw_spin_unlock_irq(&curr->pi_lock);
+ spin_unlock(&hb->lock);
++ raw_spin_lock_irq(&curr->pi_lock);
+ continue;
+ }
+
+@@ -1705,6 +1707,16 @@
+ requeue_pi_wake_futex(this, &key2, hb2);
+ drop_count++;
+ continue;
++ } else if (ret == -EAGAIN) {
++ /*
++ * Waiter was woken by timeout or
++ * signal and has set pi_blocked_on to
++ * PI_WAKEUP_INPROGRESS before we
++ * tried to enqueue it on the rtmutex.
++ */
++ this->pi_state = NULL;
++ free_pi_state(pi_state);
++ continue;
+ } else if (ret) {
+ /* -EDEADLK */
+ this->pi_state = NULL;
+@@ -2549,7 +2561,7 @@
+ struct hrtimer_sleeper timeout, *to = NULL;
+ struct rt_mutex_waiter rt_waiter;
+ struct rt_mutex *pi_mutex = NULL;
+- struct futex_hash_bucket *hb;
++ struct futex_hash_bucket *hb, *hb2;
+ union futex_key key2 = FUTEX_KEY_INIT;
+ struct futex_q q = futex_q_init;
+ int res, ret;
+@@ -2574,10 +2586,7 @@
+ * The waiter is allocated on our stack, manipulated by the requeue
+ * code while we sleep on uaddr.
+ */
+- debug_rt_mutex_init_waiter(&rt_waiter);
+- RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
+- RB_CLEAR_NODE(&rt_waiter.tree_entry);
+- rt_waiter.task = NULL;
++ rt_mutex_init_waiter(&rt_waiter, false);
+
+ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
+ if (unlikely(ret != 0))
+@@ -2608,20 +2617,55 @@
+ /* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ futex_wait_queue_me(hb, &q, to);
+
+- spin_lock(&hb->lock);
+- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+- spin_unlock(&hb->lock);
+- if (ret)
+- goto out_put_keys;
++ /*
++ * On RT we must avoid races with requeue and trying to block
++ * on two mutexes (hb->lock and uaddr2's rtmutex) by
++ * serializing access to pi_blocked_on with pi_lock.
++ */
++ raw_spin_lock_irq(&current->pi_lock);
++ if (current->pi_blocked_on) {
++ /*
++ * We have been requeued or are in the process of
++ * being requeued.
++ */
++ raw_spin_unlock_irq(&current->pi_lock);
++ } else {
++ /*
++ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
++ * prevents a concurrent requeue from moving us to the
++ * uaddr2 rtmutex. After that we can safely acquire
++ * (and possibly block on) hb->lock.
++ */
++ current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
++ raw_spin_unlock_irq(&current->pi_lock);
++
++ spin_lock(&hb->lock);
++
++ /*
++ * Clean up pi_blocked_on. We might leak it otherwise
++ * when we succeeded with the hb->lock in the fast
++ * path.
++ */
++ raw_spin_lock_irq(&current->pi_lock);
++ current->pi_blocked_on = NULL;
++ raw_spin_unlock_irq(&current->pi_lock);
++
++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
++ spin_unlock(&hb->lock);
++ if (ret)
++ goto out_put_keys;
++ }
+
+ /*
+- * In order for us to be here, we know our q.key == key2, and since
+- * we took the hb->lock above, we also know that futex_requeue() has
+- * completed and we no longer have to concern ourselves with a wakeup
+- * race with the atomic proxy lock acquisition by the requeue code. The
+- * futex_requeue dropped our key1 reference and incremented our key2
+- * reference count.
++ * In order to be here, we have either been requeued, are in
++ * the process of being requeued, or requeue successfully
++ * acquired uaddr2 on our behalf. If pi_blocked_on was
++ * non-null above, we may be racing with a requeue. Do not
++ * rely on q->lock_ptr to be hb2->lock until after blocking on
++ * hb->lock or hb2->lock. The futex_requeue dropped our key1
++ * reference and incremented our key2 reference count.
+ */
++ hb2 = hash_futex(&key2);
+
+ /* Check if the requeue code acquired the second futex for us. */
+ if (!q.rt_waiter) {
+@@ -2630,9 +2674,10 @@
+ * did a lock-steal - fix up the PI-state in that case.
+ */
+ if (q.pi_state && (q.pi_state->owner != current)) {
+- spin_lock(q.lock_ptr);
++ spin_lock(&hb2->lock);
++ BUG_ON(&hb2->lock != q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current);
+- spin_unlock(q.lock_ptr);
++ spin_unlock(&hb2->lock);
+ }
+ } else {
+ /*
+@@ -2645,7 +2690,8 @@
+ ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
+ debug_rt_mutex_free_waiter(&rt_waiter);
+
+- spin_lock(q.lock_ptr);
++ spin_lock(&hb2->lock);
++ BUG_ON(&hb2->lock != q.lock_ptr);
+ /*
+ * Fixup the pi_state owner and possibly acquire the lock if we
+ * haven't already.
+diff -Nur linux-3.18.8.orig/kernel/irq/handle.c linux-3.18.8/kernel/irq/handle.c
+--- linux-3.18.8.orig/kernel/irq/handle.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/irq/handle.c 2015-03-03 08:05:18.000000000 +0100
+@@ -133,6 +133,8 @@
+ irqreturn_t
+ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+ {
++ struct pt_regs *regs = get_irq_regs();
++ u64 ip = regs ? instruction_pointer(regs) : 0;
+ irqreturn_t retval = IRQ_NONE;
+ unsigned int flags = 0, irq = desc->irq_data.irq;
+
+@@ -173,7 +175,11 @@
+ action = action->next;
+ } while (action);
+
+- add_interrupt_randomness(irq, flags);
++#ifndef CONFIG_PREEMPT_RT_FULL
++ add_interrupt_randomness(irq, flags, ip);
++#else
++ desc->random_ip = ip;
++#endif
+
+ if (!noirqdebug)
+ note_interrupt(irq, desc, retval);
+diff -Nur linux-3.18.8.orig/kernel/irq/manage.c linux-3.18.8/kernel/irq/manage.c
+--- linux-3.18.8.orig/kernel/irq/manage.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/irq/manage.c 2015-03-03 08:05:18.000000000 +0100
+@@ -22,6 +22,7 @@
+ #include "internals.h"
+
+ #ifdef CONFIG_IRQ_FORCED_THREADING
++# ifndef CONFIG_PREEMPT_RT_BASE
+ __read_mostly bool force_irqthreads;
+
+ static int __init setup_forced_irqthreads(char *arg)
+@@ -30,6 +31,7 @@
+ return 0;
+ }
+ early_param("threadirqs", setup_forced_irqthreads);
++# endif
+ #endif
+
+ static void __synchronize_hardirq(struct irq_desc *desc)
+@@ -173,6 +175,62 @@
+ irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void _irq_affinity_notify(struct irq_affinity_notify *notify);
++static struct task_struct *set_affinity_helper;
++static LIST_HEAD(affinity_list);
++static DEFINE_RAW_SPINLOCK(affinity_list_lock);
++
++static int set_affinity_thread(void *unused)
++{
++ while (1) {
++ struct irq_affinity_notify *notify;
++ int empty;
++
++ set_current_state(TASK_INTERRUPTIBLE);
++
++ raw_spin_lock_irq(&affinity_list_lock);
++ empty = list_empty(&affinity_list);
++ raw_spin_unlock_irq(&affinity_list_lock);
++
++ if (empty)
++ schedule();
++ if (kthread_should_stop())
++ break;
++ set_current_state(TASK_RUNNING);
++try_next:
++ notify = NULL;
++
++ raw_spin_lock_irq(&affinity_list_lock);
++ if (!list_empty(&affinity_list)) {
++ notify = list_first_entry(&affinity_list,
++ struct irq_affinity_notify, list);
++ list_del_init(&notify->list);
++ }
++ raw_spin_unlock_irq(&affinity_list_lock);
++
++ if (!notify)
++ continue;
++ _irq_affinity_notify(notify);
++ goto try_next;
++ }
++ return 0;
++}
++
++static void init_helper_thread(void)
++{
++ if (set_affinity_helper)
++ return;
++ set_affinity_helper = kthread_run(set_affinity_thread, NULL,
++ "affinity-cb");
++ WARN_ON(IS_ERR(set_affinity_helper));
++}
++#else
++
++static inline void init_helper_thread(void) { }
++
++#endif
++
+ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+ {
+@@ -211,7 +269,17 @@
+
+ if (desc->affinity_notify) {
+ kref_get(&desc->affinity_notify->kref);
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++ raw_spin_lock(&affinity_list_lock);
++ if (list_empty(&desc->affinity_notify->list))
++ list_add_tail(&affinity_list,
++ &desc->affinity_notify->list);
++ raw_spin_unlock(&affinity_list_lock);
++ wake_up_process(set_affinity_helper);
++#else
+ schedule_work(&desc->affinity_notify->work);
++#endif
+ }
+ irqd_set(data, IRQD_AFFINITY_SET);
+
+@@ -246,10 +314,8 @@
+ }
+ EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+
+-static void irq_affinity_notify(struct work_struct *work)
++static void _irq_affinity_notify(struct irq_affinity_notify *notify)
+ {
+- struct irq_affinity_notify *notify =
+- container_of(work, struct irq_affinity_notify, work);
+ struct irq_desc *desc = irq_to_desc(notify->irq);
+ cpumask_var_t cpumask;
+ unsigned long flags;
+@@ -271,6 +337,13 @@
+ kref_put(&notify->kref, notify->release);
+ }
+
++static void irq_affinity_notify(struct work_struct *work)
++{
++ struct irq_affinity_notify *notify =
++ container_of(work, struct irq_affinity_notify, work);
++ _irq_affinity_notify(notify);
++}
++
+ /**
+ * irq_set_affinity_notifier - control notification of IRQ affinity changes
+ * @irq: Interrupt for which to enable/disable notification
+@@ -300,6 +373,8 @@
+ notify->irq = irq;
+ kref_init(&notify->kref);
+ INIT_WORK(&notify->work, irq_affinity_notify);
++ INIT_LIST_HEAD(&notify->list);
++ init_helper_thread();
+ }
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+@@ -788,7 +863,15 @@
+ local_bh_disable();
+ ret = action->thread_fn(action->irq, action->dev_id);
+ irq_finalize_oneshot(desc, action);
+- local_bh_enable();
++ /*
++ * Interrupts which have real time requirements can be set up
++ * to avoid softirq processing in the thread handler. This is
++ * safe as these interrupts do not raise soft interrupts.
++ */
++ if (irq_settings_no_softirq_call(desc))
++ _local_bh_enable();
++ else
++ local_bh_enable();
+ return ret;
+ }
+
+@@ -871,6 +954,12 @@
+ if (action_ret == IRQ_HANDLED)
+ atomic_inc(&desc->threads_handled);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ migrate_disable();
++ add_interrupt_randomness(action->irq, 0,
++ desc->random_ip ^ (unsigned long) action);
++ migrate_enable();
++#endif
+ wake_threads_waitq(desc);
+ }
+
+@@ -1184,6 +1273,9 @@
+ irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+ }
+
++ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
++ irq_settings_set_no_softirq_call(desc);
++
+ /* Set default affinity mask once everything is setup */
+ setup_affinity(irq, desc, mask);
+
+diff -Nur linux-3.18.8.orig/kernel/irq/settings.h linux-3.18.8/kernel/irq/settings.h
+--- linux-3.18.8.orig/kernel/irq/settings.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/irq/settings.h 2015-03-03 08:05:18.000000000 +0100
+@@ -15,6 +15,7 @@
+ _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
+ _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
+ _IRQ_IS_POLLED = IRQ_IS_POLLED,
++ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
+ _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
+ };
+
+@@ -28,6 +29,7 @@
+ #define IRQ_NESTED_THREAD GOT_YOU_MORON
+ #define IRQ_PER_CPU_DEVID GOT_YOU_MORON
+ #define IRQ_IS_POLLED GOT_YOU_MORON
++#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
+ #undef IRQF_MODIFY_MASK
+ #define IRQF_MODIFY_MASK GOT_YOU_MORON
+
+@@ -38,6 +40,16 @@
+ desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
+ }
+
++static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
++{
++ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
++}
++
++static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
++{
++ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
++}
++
+ static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
+ {
+ return desc->status_use_accessors & _IRQ_PER_CPU;
+diff -Nur linux-3.18.8.orig/kernel/irq/spurious.c linux-3.18.8/kernel/irq/spurious.c
+--- linux-3.18.8.orig/kernel/irq/spurious.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/irq/spurious.c 2015-03-03 08:05:18.000000000 +0100
+@@ -444,6 +444,10 @@
+
+ static int __init irqfixup_setup(char *str)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
++ return 1;
++#endif
+ irqfixup = 1;
+ printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
+ printk(KERN_WARNING "This may impact system performance.\n");
+@@ -456,6 +460,10 @@
+
+ static int __init irqpoll_setup(char *str)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
++ return 1;
++#endif
+ irqfixup = 2;
+ printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
+ "enabled\n");
+diff -Nur linux-3.18.8.orig/kernel/irq_work.c linux-3.18.8/kernel/irq_work.c
+--- linux-3.18.8.orig/kernel/irq_work.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/irq_work.c 2015-03-03 08:05:18.000000000 +0100
+@@ -22,7 +22,9 @@
+
+ static DEFINE_PER_CPU(struct llist_head, raised_list);
+ static DEFINE_PER_CPU(struct llist_head, lazy_list);
+-
++#ifdef CONFIG_PREEMPT_RT_FULL
++static DEFINE_PER_CPU(struct llist_head, hirq_work_list);
++#endif
+ /*
+ * Claim the entry so that no one else will poke at it.
+ */
+@@ -49,7 +51,11 @@
+ return true;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++void arch_irq_work_raise(void)
++#else
+ void __weak arch_irq_work_raise(void)
++#endif
+ {
+ /*
+ * Lame architectures will get the timer tick callback
+@@ -93,8 +99,21 @@
+ /* Queue the entry and raise the IPI if needed. */
+ preempt_disable();
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (work->flags & IRQ_WORK_HARD_IRQ) {
++ if (llist_add(&work->llnode, this_cpu_ptr(&hirq_work_list))) {
++ if (work->flags & IRQ_WORK_LAZY) {
++ if (tick_nohz_tick_stopped())
++ arch_irq_work_raise();
++ } else {
++ arch_irq_work_raise();
++ }
++ }
+ /* If the work is "lazy", handle it from next tick if any */
++ } else if (work->flags & IRQ_WORK_LAZY) {
++#else
+ if (work->flags & IRQ_WORK_LAZY) {
++#endif
+ if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
+ tick_nohz_tick_stopped())
+ arch_irq_work_raise();
+@@ -116,7 +135,7 @@
+ raised = this_cpu_ptr(&raised_list);
+ lazy = this_cpu_ptr(&lazy_list);
+
+- if (llist_empty(raised) || arch_irq_work_has_interrupt())
++ if (llist_empty(raised))
+ if (llist_empty(lazy))
+ return false;
+
+@@ -132,7 +151,9 @@
+ struct irq_work *work;
+ struct llist_node *llnode;
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ BUG_ON(!irqs_disabled());
++#endif
+
+ if (llist_empty(list))
+ return;
+@@ -168,6 +189,12 @@
+ */
+ void irq_work_run(void)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (in_irq()) {
++ irq_work_run_list(this_cpu_ptr(&hirq_work_list));
++ return;
++ }
++#endif
+ irq_work_run_list(this_cpu_ptr(&raised_list));
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
+ }
+@@ -175,9 +202,16 @@
+
+ void irq_work_tick(void)
+ {
+- struct llist_head *raised = &__get_cpu_var(raised_list);
++ struct llist_head *raised;
+
+- if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (in_irq()) {
++ irq_work_run_list(this_cpu_ptr(&hirq_work_list));
++ return;
++ }
++#endif
++ raised = &__get_cpu_var(raised_list);
++ if (!llist_empty(raised))
+ irq_work_run_list(raised);
+ irq_work_run_list(&__get_cpu_var(lazy_list));
+ }
+diff -Nur linux-3.18.8.orig/kernel/Kconfig.locks linux-3.18.8/kernel/Kconfig.locks
+--- linux-3.18.8.orig/kernel/Kconfig.locks 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/Kconfig.locks 2015-03-03 08:05:18.000000000 +0100
+@@ -225,11 +225,11 @@
+
+ config MUTEX_SPIN_ON_OWNER
+ def_bool y
+- depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
++ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
+
+ config RWSEM_SPIN_ON_OWNER
+ def_bool y
+- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
++ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
+
+ config ARCH_USE_QUEUE_RWLOCK
+ bool
+diff -Nur linux-3.18.8.orig/kernel/Kconfig.preempt linux-3.18.8/kernel/Kconfig.preempt
+--- linux-3.18.8.orig/kernel/Kconfig.preempt 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/Kconfig.preempt 2015-03-03 08:05:18.000000000 +0100
+@@ -1,3 +1,16 @@
++config PREEMPT
++ bool
++ select PREEMPT_COUNT
++
++config PREEMPT_RT_BASE
++ bool
++ select PREEMPT
++
++config HAVE_PREEMPT_LAZY
++ bool
++
++config PREEMPT_LAZY
++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
+
+ choice
+ prompt "Preemption Model"
+@@ -33,9 +46,9 @@
+
+ Select this if you are building a kernel for a desktop system.
+
+-config PREEMPT
++config PREEMPT__LL
+ bool "Preemptible Kernel (Low-Latency Desktop)"
+- select PREEMPT_COUNT
++ select PREEMPT
+ select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
+ help
+ This option reduces the latency of the kernel by making
+@@ -52,6 +65,22 @@
+ embedded system with latency requirements in the milliseconds
+ range.
+
++config PREEMPT_RTB
++ bool "Preemptible Kernel (Basic RT)"
++ select PREEMPT_RT_BASE
++ help
++ This option is basically the same as (Low-Latency Desktop) but
++ enables changes which are preliminary for the full preemptible
++ RT kernel.
++
++config PREEMPT_RT_FULL
++ bool "Fully Preemptible Kernel (RT)"
++ depends on IRQ_FORCED_THREADING
++ select PREEMPT_RT_BASE
++ select PREEMPT_RCU
++ help
++ All and everything
++
+ endchoice
+
+ config PREEMPT_COUNT
+diff -Nur linux-3.18.8.orig/kernel/ksysfs.c linux-3.18.8/kernel/ksysfs.c
+--- linux-3.18.8.orig/kernel/ksysfs.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/ksysfs.c 2015-03-03 08:05:18.000000000 +0100
+@@ -136,6 +136,15 @@
+
+ #endif /* CONFIG_KEXEC */
+
++#if defined(CONFIG_PREEMPT_RT_FULL)
++static ssize_t realtime_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%d\n", 1);
++}
++KERNEL_ATTR_RO(realtime);
++#endif
++
+ /* whether file capabilities are enabled */
+ static ssize_t fscaps_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+@@ -203,6 +212,9 @@
+ &vmcoreinfo_attr.attr,
+ #endif
+ &rcu_expedited_attr.attr,
++#ifdef CONFIG_PREEMPT_RT_FULL
++ &realtime_attr.attr,
++#endif
+ NULL
+ };
+
+diff -Nur linux-3.18.8.orig/kernel/locking/lglock.c linux-3.18.8/kernel/locking/lglock.c
+--- linux-3.18.8.orig/kernel/locking/lglock.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/locking/lglock.c 2015-03-03 08:05:18.000000000 +0100
+@@ -4,6 +4,15 @@
+ #include <linux/cpu.h>
+ #include <linux/string.h>
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define lg_lock_ptr arch_spinlock_t
++# define lg_do_lock(l) arch_spin_lock(l)
++# define lg_do_unlock(l) arch_spin_unlock(l)
++#else
++# define lg_lock_ptr struct rt_mutex
++# define lg_do_lock(l) __rt_spin_lock(l)
++# define lg_do_unlock(l) __rt_spin_unlock(l)
++#endif
+ /*
+ * Note there is no uninit, so lglocks cannot be defined in
+ * modules (but it's fine to use them from there)
+@@ -12,51 +21,60 @@
+
+ void lg_lock_init(struct lglock *lg, char *name)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int i;
++
++ for_each_possible_cpu(i) {
++ struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);
++
++ rt_mutex_init(lock);
++ }
++#endif
+ LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
+ }
+ EXPORT_SYMBOL(lg_lock_init);
+
+ void lg_local_lock(struct lglock *lg)
+ {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+
+- preempt_disable();
++ migrate_disable();
+ lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+ lock = this_cpu_ptr(lg->lock);
+- arch_spin_lock(lock);
++ lg_do_lock(lock);
+ }
+ EXPORT_SYMBOL(lg_local_lock);
+
+ void lg_local_unlock(struct lglock *lg)
+ {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock = this_cpu_ptr(lg->lock);
+- arch_spin_unlock(lock);
+- preempt_enable();
++ lg_do_unlock(lock);
++ migrate_enable();
+ }
+ EXPORT_SYMBOL(lg_local_unlock);
+
+ void lg_local_lock_cpu(struct lglock *lg, int cpu)
+ {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+
+- preempt_disable();
++ preempt_disable_nort();
+ lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+ lock = per_cpu_ptr(lg->lock, cpu);
+- arch_spin_lock(lock);
++ lg_do_lock(lock);
+ }
+ EXPORT_SYMBOL(lg_local_lock_cpu);
+
+ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
+ {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock = per_cpu_ptr(lg->lock, cpu);
+- arch_spin_unlock(lock);
+- preempt_enable();
++ lg_do_unlock(lock);
++ preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(lg_local_unlock_cpu);
+
+@@ -64,12 +82,12 @@
+ {
+ int i;
+
+- preempt_disable();
++ preempt_disable_nort();
+ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+ for_each_possible_cpu(i) {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+ lock = per_cpu_ptr(lg->lock, i);
+- arch_spin_lock(lock);
++ lg_do_lock(lock);
+ }
+ }
+ EXPORT_SYMBOL(lg_global_lock);
+@@ -80,10 +98,35 @@
+
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ for_each_possible_cpu(i) {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+ lock = per_cpu_ptr(lg->lock, i);
+- arch_spin_unlock(lock);
++ lg_do_unlock(lock);
+ }
+- preempt_enable();
++ preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(lg_global_unlock);
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * HACK: If you use this, you get to keep the pieces.
++ * Used in queue_stop_cpus_work() when stop machinery
++ * is called from inactive CPU, so we can't schedule.
++ */
++# define lg_do_trylock_relax(l) \
++ do { \
++ while (!__rt_spin_trylock(l)) \
++ cpu_relax(); \
++ } while (0)
++
++void lg_global_trylock_relax(struct lglock *lg)
++{
++ int i;
++
++ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
++ for_each_possible_cpu(i) {
++ lg_lock_ptr *lock;
++ lock = per_cpu_ptr(lg->lock, i);
++ lg_do_trylock_relax(lock);
++ }
++}
++#endif
+diff -Nur linux-3.18.8.orig/kernel/locking/lockdep.c linux-3.18.8/kernel/locking/lockdep.c
+--- linux-3.18.8.orig/kernel/locking/lockdep.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/locking/lockdep.c 2015-03-03 08:05:18.000000000 +0100
+@@ -3542,6 +3542,7 @@
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * We dont accurately track softirq state in e.g.
+ * hardirq contexts (such as on 4KSTACKS), so only
+@@ -3556,6 +3557,7 @@
+ DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
+ }
+ }
++#endif
+
+ if (!debug_locks)
+ print_irqtrace_events(current);
+diff -Nur linux-3.18.8.orig/kernel/locking/Makefile linux-3.18.8/kernel/locking/Makefile
+--- linux-3.18.8.orig/kernel/locking/Makefile 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/locking/Makefile 2015-03-03 08:05:18.000000000 +0100
+@@ -1,5 +1,5 @@
+
+-obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o
++obj-y += semaphore.o mcs_spinlock.o
+
+ ifdef CONFIG_FUNCTION_TRACER
+ CFLAGS_REMOVE_lockdep.o = -pg
+@@ -8,7 +8,11 @@
+ CFLAGS_REMOVE_rtmutex-debug.o = -pg
+ endif
+
++ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
++obj-y += mutex.o
+ obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
++obj-y += rwsem.o
++endif
+ obj-$(CONFIG_LOCKDEP) += lockdep.o
+ ifeq ($(CONFIG_PROC_FS),y)
+ obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
+@@ -21,8 +25,11 @@
+ obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
++ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+ obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+ obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
++endif
+ obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
+ obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o
+ obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+diff -Nur linux-3.18.8.orig/kernel/locking/percpu-rwsem.c linux-3.18.8/kernel/locking/percpu-rwsem.c
+--- linux-3.18.8.orig/kernel/locking/percpu-rwsem.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/locking/percpu-rwsem.c 2015-03-03 08:05:18.000000000 +0100
+@@ -84,8 +84,12 @@
+
+ down_read(&brw->rw_sem);
+ atomic_inc(&brw->slow_read_ctr);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ up_read(&brw->rw_sem);
++#else
+ /* avoid up_read()->rwsem_release() */
+ __up_read(&brw->rw_sem);
++#endif
+ }
+
+ void percpu_up_read(struct percpu_rw_semaphore *brw)
+diff -Nur linux-3.18.8.orig/kernel/locking/rt.c linux-3.18.8/kernel/locking/rt.c
+--- linux-3.18.8.orig/kernel/locking/rt.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/kernel/locking/rt.c 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,437 @@
++/*
++ * kernel/rt.c
++ *
++ * Real-Time Preemption Support
++ *
++ * started by Ingo Molnar:
++ *
++ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
++ *
++ * historic credit for proving that Linux spinlocks can be implemented via
++ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
++ * and others) who prototyped it on 2.4 and did lots of comparative
++ * research and analysis; TimeSys, for proving that you can implement a
++ * fully preemptible kernel via the use of IRQ threading and mutexes;
++ * Bill Huey for persuasively arguing on lkml that the mutex model is the
++ * right one; and to MontaVista, who ported pmutexes to 2.6.
++ *
++ * This code is a from-scratch implementation and is not based on pmutexes,
++ * but the idea of converting spinlocks to mutexes is used here too.
++ *
++ * lock debugging, locking tree, deadlock detection:
++ *
++ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
++ * Released under the General Public License (GPL).
++ *
++ * Includes portions of the generic R/W semaphore implementation from:
++ *
++ * Copyright (c) 2001 David Howells (dhowells@redhat.com).
++ * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
++ * - Derived also from comments by Linus
++ *
++ * Pending ownership of locks and ownership stealing:
++ *
++ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
++ *
++ * (also by Steven Rostedt)
++ * - Converted single pi_lock to individual task locks.
++ *
++ * By Esben Nielsen:
++ * Doing priority inheritance with help of the scheduler.
++ *
++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
++ * - major rework based on Esben Nielsens initial patch
++ * - replaced thread_info references by task_struct refs
++ * - removed task->pending_owner dependency
++ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
++ * in the scheduler return path as discussed with Steven Rostedt
++ *
++ * Copyright (C) 2006, Kihon Technologies Inc.
++ * Steven Rostedt <rostedt@goodmis.org>
++ * - debugged and patched Thomas Gleixner's rework.
++ * - added back the cmpxchg to the rework.
++ * - turned atomic require back on for SMP.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/rtmutex.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/syscalls.h>
++#include <linux/interrupt.h>
++#include <linux/plist.h>
++#include <linux/fs.h>
++#include <linux/futex.h>
++#include <linux/hrtimer.h>
++
++#include "rtmutex_common.h"
++
++/*
++ * struct mutex functions
++ */
++void __mutex_do_init(struct mutex *mutex, const char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
++ lockdep_init_map(&mutex->dep_map, name, key, 0);
++#endif
++ mutex->lock.save_state = 0;
++}
++EXPORT_SYMBOL(__mutex_do_init);
++
++void __lockfunc _mutex_lock(struct mutex *lock)
++{
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock);
++
++int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ ret = rt_mutex_lock_interruptible(&lock->lock);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_interruptible);
++
++int __lockfunc _mutex_lock_killable(struct mutex *lock)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ ret = rt_mutex_lock_killable(&lock->lock);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_killable);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
++{
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock_nested);
++
++void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
++{
++ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock_nest_lock);
++
++int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
++{
++ int ret;
++
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ ret = rt_mutex_lock_interruptible(&lock->lock);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
++
++int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++ ret = rt_mutex_lock_killable(&lock->lock);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_killable_nested);
++#endif
++
++int __lockfunc _mutex_trylock(struct mutex *lock)
++{
++ int ret = rt_mutex_trylock(&lock->lock);
++
++ if (ret)
++ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_trylock);
++
++void __lockfunc _mutex_unlock(struct mutex *lock)
++{
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ rt_mutex_unlock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_unlock);
++
++/*
++ * rwlock_t functions
++ */
++int __lockfunc rt_write_trylock(rwlock_t *rwlock)
++{
++ int ret;
++
++ migrate_disable();
++ ret = rt_mutex_trylock(&rwlock->lock);
++ if (ret)
++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++
++ return ret;
++}
++EXPORT_SYMBOL(rt_write_trylock);
++
++int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
++{
++ int ret;
++
++ *flags = 0;
++ ret = rt_write_trylock(rwlock);
++ return ret;
++}
++EXPORT_SYMBOL(rt_write_trylock_irqsave);
++
++int __lockfunc rt_read_trylock(rwlock_t *rwlock)
++{
++ struct rt_mutex *lock = &rwlock->lock;
++ int ret = 1;
++
++ /*
++ * recursive read locks succeed when current owns the lock,
++ * but not when read_depth == 0 which means that the lock is
++ * write locked.
++ */
++ if (rt_mutex_owner(lock) != current) {
++ migrate_disable();
++ ret = rt_mutex_trylock(lock);
++ if (ret)
++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++
++ } else if (!rwlock->read_depth) {
++ ret = 0;
++ }
++
++ if (ret)
++ rwlock->read_depth++;
++
++ return ret;
++}
++EXPORT_SYMBOL(rt_read_trylock);
++
++void __lockfunc rt_write_lock(rwlock_t *rwlock)
++{
++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
++ migrate_disable();
++ __rt_spin_lock(&rwlock->lock);
++}
++EXPORT_SYMBOL(rt_write_lock);
++
++void __lockfunc rt_read_lock(rwlock_t *rwlock)
++{
++ struct rt_mutex *lock = &rwlock->lock;
++
++
++ /*
++ * recursive read locks succeed when current owns the lock
++ */
++ if (rt_mutex_owner(lock) != current) {
++ migrate_disable();
++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
++ __rt_spin_lock(lock);
++ }
++ rwlock->read_depth++;
++}
++
++EXPORT_SYMBOL(rt_read_lock);
++
++void __lockfunc rt_write_unlock(rwlock_t *rwlock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++ __rt_spin_unlock(&rwlock->lock);
++ migrate_enable();
++}
++EXPORT_SYMBOL(rt_write_unlock);
++
++void __lockfunc rt_read_unlock(rwlock_t *rwlock)
++{
++ /* Release the lock only when read_depth is down to 0 */
++ if (--rwlock->read_depth == 0) {
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++ __rt_spin_unlock(&rwlock->lock);
++ migrate_enable();
++ }
++}
++EXPORT_SYMBOL(rt_read_unlock);
++
++unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
++{
++ rt_write_lock(rwlock);
++
++ return 0;
++}
++EXPORT_SYMBOL(rt_write_lock_irqsave);
++
++unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
++{
++ rt_read_lock(rwlock);
++
++ return 0;
++}
++EXPORT_SYMBOL(rt_read_lock_irqsave);
++
++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
++ lockdep_init_map(&rwlock->dep_map, name, key, 0);
++#endif
++ rwlock->lock.save_state = 1;
++ rwlock->read_depth = 0;
++}
++EXPORT_SYMBOL(__rt_rwlock_init);
++
++/*
++ * rw_semaphores
++ */
++
++void rt_up_write(struct rw_semaphore *rwsem)
++{
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++ rt_mutex_unlock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_up_write);
++
++void rt_up_read(struct rw_semaphore *rwsem)
++{
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++ rt_mutex_unlock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_up_read);
++
++/*
++ * downgrade a write lock into a read lock
++ * - just wake up any readers at the front of the queue
++ */
++void rt_downgrade_write(struct rw_semaphore *rwsem)
++{
++ BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
++}
++EXPORT_SYMBOL(rt_downgrade_write);
++
++int rt_down_write_trylock(struct rw_semaphore *rwsem)
++{
++ int ret = rt_mutex_trylock(&rwsem->lock);
++
++ if (ret)
++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(rt_down_write_trylock);
++
++void rt_down_write(struct rw_semaphore *rwsem)
++{
++ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
++ rt_mutex_lock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_down_write);
++
++void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
++{
++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
++ rt_mutex_lock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_down_write_nested);
++
++void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
++ struct lockdep_map *nest)
++{
++ rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_);
++ rt_mutex_lock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_down_write_nested_lock);
++
++int rt_down_read_trylock(struct rw_semaphore *rwsem)
++{
++ int ret;
++
++ ret = rt_mutex_trylock(&rwsem->lock);
++ if (ret)
++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
++
++ return ret;
++}
++EXPORT_SYMBOL(rt_down_read_trylock);
++
++static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
++{
++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
++ rt_mutex_lock(&rwsem->lock);
++}
++
++void rt_down_read(struct rw_semaphore *rwsem)
++{
++ __rt_down_read(rwsem, 0);
++}
++EXPORT_SYMBOL(rt_down_read);
++
++void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
++{
++ __rt_down_read(rwsem, subclass);
++}
++EXPORT_SYMBOL(rt_down_read_nested);
++
++void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
++ lockdep_init_map(&rwsem->dep_map, name, key, 0);
++#endif
++ rwsem->lock.save_state = 0;
++}
++EXPORT_SYMBOL(__rt_rwsem_init);
++
++/**
++ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
++ * @cnt: the atomic which we are to dec
++ * @lock: the mutex to return holding if we dec to 0
++ *
++ * return true and hold lock if we dec to 0, return false otherwise
++ */
++int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
++{
++ /* dec if we can't possibly hit 0 */
++ if (atomic_add_unless(cnt, -1, 1))
++ return 0;
++ /* we might hit 0, so take the lock */
++ mutex_lock(lock);
++ if (!atomic_dec_and_test(cnt)) {
++ /* when we actually did the dec, we didn't hit 0 */
++ mutex_unlock(lock);
++ return 0;
++ }
++ /* we hit 0, and we hold the lock */
++ return 1;
++}
++EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
+diff -Nur linux-3.18.8.orig/kernel/locking/rtmutex.c linux-3.18.8/kernel/locking/rtmutex.c
+--- linux-3.18.8.orig/kernel/locking/rtmutex.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/locking/rtmutex.c 2015-03-03 08:05:18.000000000 +0100
+@@ -7,6 +7,11 @@
+ * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
+ * Copyright (C) 2006 Esben Nielsen
++ * Adaptive Spinlocks:
++ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
++ * and Peter Morreale,
++ * Adaptive Spinlocks simplification:
++ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
+ *
+ * See Documentation/locking/rt-mutex-design.txt for details.
+ */
+@@ -16,6 +21,7 @@
+ #include <linux/sched/rt.h>
+ #include <linux/sched/deadline.h>
+ #include <linux/timer.h>
++#include <linux/ww_mutex.h>
+
+ #include "rtmutex_common.h"
+
+@@ -69,6 +75,12 @@
+ clear_rt_mutex_waiters(lock);
+ }
+
++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
++{
++ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
++ waiter != PI_REQUEUE_INPROGRESS;
++}
++
+ /*
+ * We can speed up the acquire/release, if the architecture
+ * supports cmpxchg and if there's no debugging state to be set up
+@@ -333,6 +345,14 @@
+ return debug_rt_mutex_detect_deadlock(waiter, chwalk);
+ }
+
++static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
++{
++ if (waiter->savestate)
++ wake_up_lock_sleeper(waiter->task);
++ else
++ wake_up_process(waiter->task);
++}
++
+ /*
+ * Max number of times we'll walk the boosting chain:
+ */
+@@ -340,7 +360,8 @@
+
+ static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
+ {
+- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
++ return rt_mutex_real_waiter(p->pi_blocked_on) ?
++ p->pi_blocked_on->lock : NULL;
+ }
+
+ /*
+@@ -477,7 +498,7 @@
+ * reached or the state of the chain has changed while we
+ * dropped the locks.
+ */
+- if (!waiter)
++ if (!rt_mutex_real_waiter(waiter))
+ goto out_unlock_pi;
+
+ /*
+@@ -639,13 +660,16 @@
+ * follow here. This is the end of the chain we are walking.
+ */
+ if (!rt_mutex_owner(lock)) {
++ struct rt_mutex_waiter *lock_top_waiter;
++
+ /*
+ * If the requeue [7] above changed the top waiter,
+ * then we need to wake the new top waiter up to try
+ * to get the lock.
+ */
+- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
+- wake_up_process(rt_mutex_top_waiter(lock)->task);
++ lock_top_waiter = rt_mutex_top_waiter(lock);
++ if (prerequeue_top_waiter != lock_top_waiter)
++ rt_mutex_wake_waiter(lock_top_waiter);
+ raw_spin_unlock(&lock->wait_lock);
+ return 0;
+ }
+@@ -738,6 +762,25 @@
+ return ret;
+ }
+
++
++#define STEAL_NORMAL 0
++#define STEAL_LATERAL 1
++
++/*
++ * Note that RT tasks are excluded from lateral-steals to prevent the
++ * introduction of an unbounded latency
++ */
++static inline int lock_is_stealable(struct task_struct *task,
++ struct task_struct *pendowner, int mode)
++{
++ if (mode == STEAL_NORMAL || rt_task(task)) {
++ if (task->prio >= pendowner->prio)
++ return 0;
++ } else if (task->prio > pendowner->prio)
++ return 0;
++ return 1;
++}
++
+ /*
+ * Try to take an rt-mutex
+ *
+@@ -748,8 +791,9 @@
+ * @waiter: The waiter that is queued to the lock's wait list if the
+ * callsite called task_blocked_on_lock(), otherwise NULL
+ */
+-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+- struct rt_mutex_waiter *waiter)
++static int __try_to_take_rt_mutex(struct rt_mutex *lock,
++ struct task_struct *task,
++ struct rt_mutex_waiter *waiter, int mode)
+ {
+ unsigned long flags;
+
+@@ -788,8 +832,10 @@
+ * If waiter is not the highest priority waiter of
+ * @lock, give up.
+ */
+- if (waiter != rt_mutex_top_waiter(lock))
++ if (waiter != rt_mutex_top_waiter(lock)) {
++ /* XXX lock_is_stealable() ? */
+ return 0;
++ }
+
+ /*
+ * We can acquire the lock. Remove the waiter from the
+@@ -807,14 +853,10 @@
+ * not need to be dequeued.
+ */
+ if (rt_mutex_has_waiters(lock)) {
+- /*
+- * If @task->prio is greater than or equal to
+- * the top waiter priority (kernel view),
+- * @task lost.
+- */
+- if (task->prio >= rt_mutex_top_waiter(lock)->prio)
+- return 0;
++ struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
+
++ if (task != pown && !lock_is_stealable(task, pown, mode))
++ return 0;
+ /*
+ * The current top waiter stays enqueued. We
+ * don't have to change anything in the lock
+@@ -863,6 +905,369 @@
+ return 1;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * preemptible spin_lock functions:
++ */
++static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
++ void (*slowfn)(struct rt_mutex *lock))
++{
++ might_sleep();
++
++ if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
++ rt_mutex_deadlock_account_lock(lock, current);
++ else
++ slowfn(lock);
++}
++
++static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
++ void (*slowfn)(struct rt_mutex *lock))
++{
++ if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
++ rt_mutex_deadlock_account_unlock(current);
++ else
++ slowfn(lock);
++}
++#ifdef CONFIG_SMP
++/*
++ * Note that owner is a speculative pointer and dereferencing relies
++ * on rcu_read_lock() and the check against the lock owner.
++ */
++static int adaptive_wait(struct rt_mutex *lock,
++ struct task_struct *owner)
++{
++ int res = 0;
++
++ rcu_read_lock();
++ for (;;) {
++ if (owner != rt_mutex_owner(lock))
++ break;
++ /*
++ * Ensure that owner->on_cpu is dereferenced _after_
++ * checking the above to be valid.
++ */
++ barrier();
++ if (!owner->on_cpu) {
++ res = 1;
++ break;
++ }
++ cpu_relax();
++ }
++ rcu_read_unlock();
++ return res;
++}
++#else
++static int adaptive_wait(struct rt_mutex *lock,
++ struct task_struct *orig_owner)
++{
++ return 1;
++}
++#endif
++
++# define pi_lock(lock) raw_spin_lock_irq(lock)
++# define pi_unlock(lock) raw_spin_unlock_irq(lock)
++
++static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
++ struct rt_mutex_waiter *waiter,
++ struct task_struct *task,
++ enum rtmutex_chainwalk chwalk);
++/*
++ * Slow path lock function spin_lock style: this variant is very
++ * careful not to miss any non-lock wakeups.
++ *
++ * We store the current state under p->pi_lock in p->saved_state and
++ * the try_to_wake_up() code handles this accordingly.
++ */
++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
++{
++ struct task_struct *lock_owner, *self = current;
++ struct rt_mutex_waiter waiter, *top_waiter;
++ int ret;
++
++ rt_mutex_init_waiter(&waiter, true);
++
++ raw_spin_lock(&lock->wait_lock);
++
++ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
++ raw_spin_unlock(&lock->wait_lock);
++ return;
++ }
++
++ BUG_ON(rt_mutex_owner(lock) == self);
++
++ /*
++ * We save whatever state the task is in and we'll restore it
++ * after acquiring the lock taking real wakeups into account
++ * as well. We are serialized via pi_lock against wakeups. See
++ * try_to_wake_up().
++ */
++ pi_lock(&self->pi_lock);
++ self->saved_state = self->state;
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ pi_unlock(&self->pi_lock);
++
++ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
++ BUG_ON(ret);
++
++ for (;;) {
++ /* Try to acquire the lock again. */
++ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
++ break;
++
++ top_waiter = rt_mutex_top_waiter(lock);
++ lock_owner = rt_mutex_owner(lock);
++
++ raw_spin_unlock(&lock->wait_lock);
++
++ debug_rt_mutex_print_deadlock(&waiter);
++
++ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
++ schedule_rt_mutex(lock);
++
++ raw_spin_lock(&lock->wait_lock);
++
++ pi_lock(&self->pi_lock);
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ pi_unlock(&self->pi_lock);
++ }
++
++ /*
++ * Restore the task state to current->saved_state. We set it
++ * to the original state above and the try_to_wake_up() code
++ * has possibly updated it when a real (non-rtmutex) wakeup
++ * happened while we were blocked. Clear saved_state so
++ * try_to_wakeup() does not get confused.
++ */
++ pi_lock(&self->pi_lock);
++ __set_current_state(self->saved_state);
++ self->saved_state = TASK_RUNNING;
++ pi_unlock(&self->pi_lock);
++
++ /*
++ * try_to_take_rt_mutex() sets the waiter bit
++ * unconditionally. We might have to fix that up:
++ */
++ fixup_rt_mutex_waiters(lock);
++
++ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
++ BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
++
++ raw_spin_unlock(&lock->wait_lock);
++
++ debug_rt_mutex_free_waiter(&waiter);
++}
++
++static void wakeup_next_waiter(struct rt_mutex *lock);
++/*
++ * Slow path to release a rt_mutex spin_lock style
++ */
++static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock)
++{
++ debug_rt_mutex_unlock(lock);
++
++ rt_mutex_deadlock_account_unlock(current);
++
++ if (!rt_mutex_has_waiters(lock)) {
++ lock->owner = NULL;
++ raw_spin_unlock(&lock->wait_lock);
++ return;
++ }
++
++ wakeup_next_waiter(lock);
++
++ raw_spin_unlock(&lock->wait_lock);
++
++ /* Undo pi boosting.when necessary */
++ rt_mutex_adjust_prio(current);
++}
++
++static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
++{
++ raw_spin_lock(&lock->wait_lock);
++ __rt_spin_lock_slowunlock(lock);
++}
++
++static void noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex *lock)
++{
++ int ret;
++
++ do {
++ ret = raw_spin_trylock(&lock->wait_lock);
++ } while (!ret);
++
++ __rt_spin_lock_slowunlock(lock);
++}
++
++void __lockfunc rt_spin_lock(spinlock_t *lock)
++{
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock);
++
++void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
++}
++EXPORT_SYMBOL(__rt_spin_lock);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
++{
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock_nested);
++#endif
++
++void __lockfunc rt_spin_unlock(spinlock_t *lock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ spin_release(&lock->dep_map, 1, _RET_IP_);
++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(rt_spin_unlock);
++
++void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ spin_release(&lock->dep_map, 1, _RET_IP_);
++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq);
++}
++
++void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(__rt_spin_unlock);
++
++/*
++ * Wait for the lock to get unlocked: instead of polling for an unlock
++ * (like raw spinlocks do), we lock and unlock, to force the kernel to
++ * schedule if there's contention:
++ */
++void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
++{
++ spin_lock(lock);
++ spin_unlock(lock);
++}
++EXPORT_SYMBOL(rt_spin_unlock_wait);
++
++int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
++{
++ return rt_mutex_trylock(lock);
++}
++
++int __lockfunc rt_spin_trylock(spinlock_t *lock)
++{
++ int ret = rt_mutex_trylock(&lock->lock);
++
++ if (ret)
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock);
++
++int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
++{
++ int ret;
++
++ local_bh_disable();
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret) {
++ migrate_disable();
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ } else
++ local_bh_enable();
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock_bh);
++
++int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
++{
++ int ret;
++
++ *flags = 0;
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret) {
++ migrate_disable();
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock_irqsave);
++
++int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
++{
++ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
++ if (atomic_add_unless(atomic, -1, 1))
++ return 0;
++ migrate_disable();
++ rt_spin_lock(lock);
++ if (atomic_dec_and_test(atomic))
++ return 1;
++ rt_spin_unlock(lock);
++ migrate_enable();
++ return 0;
++}
++EXPORT_SYMBOL(atomic_dec_and_spin_lock);
++
++ void
++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
++ lockdep_init_map(&lock->dep_map, name, key, 0);
++#endif
++}
++EXPORT_SYMBOL(__rt_spin_lock_init);
++
++#endif /* PREEMPT_RT_FULL */
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++ static inline int __sched
++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
++{
++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
++ struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
++
++ if (!hold_ctx)
++ return 0;
++
++ if (unlikely(ctx == hold_ctx))
++ return -EALREADY;
++
++ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
++ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
++#ifdef CONFIG_DEBUG_MUTEXES
++ DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
++ ctx->contending_lock = ww;
++#endif
++ return -EDEADLK;
++ }
++
++ return 0;
++}
++#else
++ static inline int __sched
++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
++{
++ BUG();
++ return 0;
++}
++
++#endif
++
++static inline int
++try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
++ struct rt_mutex_waiter *waiter)
++{
++ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
++}
++
+ /*
+ * Task blocks on lock.
+ *
+@@ -894,6 +1299,23 @@
+ return -EDEADLK;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
++
++ /*
++ * In the case of futex requeue PI, this will be a proxy
++ * lock. The task will wake unaware that it is enqueueed on
++ * this lock. Avoid blocking on two locks and corrupting
++ * pi_blocked_on via the PI_WAKEUP_INPROGRESS
++ * flag. futex_wait_requeue_pi() sets this when it wakes up
++ * before requeue (due to a signal or timeout). Do not enqueue
++ * the task if PI_WAKEUP_INPROGRESS is set.
++ */
++ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++ return -EAGAIN;
++ }
++
++ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
++
+ __rt_mutex_adjust_prio(task);
+ waiter->task = task;
+ waiter->lock = lock;
+@@ -917,7 +1339,7 @@
+ rt_mutex_enqueue_pi(owner, waiter);
+
+ __rt_mutex_adjust_prio(owner);
+- if (owner->pi_blocked_on)
++ if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ chain_walk = 1;
+ } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
+ chain_walk = 1;
+@@ -994,7 +1416,7 @@
+ * long as we hold lock->wait_lock. The waiter task needs to
+ * acquire it in order to dequeue the waiter.
+ */
+- wake_up_process(waiter->task);
++ rt_mutex_wake_waiter(waiter);
+ }
+
+ /*
+@@ -1008,7 +1430,7 @@
+ {
+ bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
+ struct task_struct *owner = rt_mutex_owner(lock);
+- struct rt_mutex *next_lock;
++ struct rt_mutex *next_lock = NULL;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&current->pi_lock, flags);
+@@ -1033,7 +1455,8 @@
+ __rt_mutex_adjust_prio(owner);
+
+ /* Store the lock on which owner is blocked or NULL */
+- next_lock = task_blocked_on_lock(owner);
++ if (rt_mutex_real_waiter(owner->pi_blocked_on))
++ next_lock = task_blocked_on_lock(owner);
+
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+
+@@ -1069,17 +1492,17 @@
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+ waiter = task->pi_blocked_on;
+- if (!waiter || (waiter->prio == task->prio &&
++ if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio &&
+ !dl_prio(task->prio))) {
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ return;
+ }
+ next_lock = waiter->lock;
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(task);
+
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
+ next_lock, NULL, task);
+ }
+@@ -1097,7 +1520,8 @@
+ static int __sched
+ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+- struct rt_mutex_waiter *waiter)
++ struct rt_mutex_waiter *waiter,
++ struct ww_acquire_ctx *ww_ctx)
+ {
+ int ret = 0;
+
+@@ -1120,6 +1544,12 @@
+ break;
+ }
+
++ if (ww_ctx && ww_ctx->acquired > 0) {
++ ret = __mutex_lock_check_stamp(lock, ww_ctx);
++ if (ret)
++ break;
++ }
++
+ raw_spin_unlock(&lock->wait_lock);
+
+ debug_rt_mutex_print_deadlock(waiter);
+@@ -1153,25 +1583,102 @@
+ }
+ }
+
++static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
++ struct ww_acquire_ctx *ww_ctx)
++{
++#ifdef CONFIG_DEBUG_MUTEXES
++ /*
++ * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
++ * but released with a normal mutex_unlock in this call.
++ *
++ * This should never happen, always use ww_mutex_unlock.
++ */
++ DEBUG_LOCKS_WARN_ON(ww->ctx);
++
++ /*
++ * Not quite done after calling ww_acquire_done() ?
++ */
++ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
++
++ if (ww_ctx->contending_lock) {
++ /*
++ * After -EDEADLK you tried to
++ * acquire a different ww_mutex? Bad!
++ */
++ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
++
++ /*
++ * You called ww_mutex_lock after receiving -EDEADLK,
++ * but 'forgot' to unlock everything else first?
++ */
++ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
++ ww_ctx->contending_lock = NULL;
++ }
++
++ /*
++ * Naughty, using a different class will lead to undefined behavior!
++ */
++ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
++#endif
++ ww_ctx->acquired++;
++}
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void ww_mutex_account_lock(struct rt_mutex *lock,
++ struct ww_acquire_ctx *ww_ctx)
++{
++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
++ struct rt_mutex_waiter *waiter, *n;
++
++ /*
++ * This branch gets optimized out for the common case,
++ * and is only important for ww_mutex_lock.
++ */
++ ww_mutex_lock_acquired(ww, ww_ctx);
++ ww->ctx = ww_ctx;
++
++ /*
++ * Give any possible sleeping processes the chance to wake up,
++ * so they can recheck if they have to back off.
++ */
++ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
++ tree_entry) {
++ /* XXX debug rt mutex waiter wakeup */
++
++ BUG_ON(waiter->lock != lock);
++ rt_mutex_wake_waiter(waiter);
++ }
++}
++
++#else
++
++static void ww_mutex_account_lock(struct rt_mutex *lock,
++ struct ww_acquire_ctx *ww_ctx)
++{
++ BUG();
++}
++#endif
++
+ /*
+ * Slow path lock function:
+ */
+ static int __sched
+ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk)
++ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx)
+ {
+ struct rt_mutex_waiter waiter;
+ int ret = 0;
+
+- debug_rt_mutex_init_waiter(&waiter);
+- RB_CLEAR_NODE(&waiter.pi_tree_entry);
+- RB_CLEAR_NODE(&waiter.tree_entry);
++ rt_mutex_init_waiter(&waiter, false);
+
+ raw_spin_lock(&lock->wait_lock);
+
+ /* Try to acquire the lock again: */
+ if (try_to_take_rt_mutex(lock, current, NULL)) {
++ if (ww_ctx)
++ ww_mutex_account_lock(lock, ww_ctx);
+ raw_spin_unlock(&lock->wait_lock);
+ return 0;
+ }
+@@ -1188,13 +1695,17 @@
+ ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
+
+ if (likely(!ret))
+- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
++ ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
++ ww_ctx);
+
+ set_current_state(TASK_RUNNING);
+
+ if (unlikely(ret)) {
+- remove_waiter(lock, &waiter);
++ if (rt_mutex_has_waiters(lock))
++ remove_waiter(lock, &waiter);
+ rt_mutex_handle_deadlock(ret, chwalk, &waiter);
++ } else if (ww_ctx) {
++ ww_mutex_account_lock(lock, ww_ctx);
+ }
+
+ /*
+@@ -1233,7 +1744,8 @@
+ * The mutex has currently no owner. Lock the wait lock and
+ * try to acquire the lock.
+ */
+- raw_spin_lock(&lock->wait_lock);
++ if (!raw_spin_trylock(&lock->wait_lock))
++ return 0;
+
+ ret = try_to_take_rt_mutex(lock, current, NULL);
+
+@@ -1319,31 +1831,36 @@
+ */
+ static inline int
+ rt_mutex_fastlock(struct rt_mutex *lock, int state,
++ struct ww_acquire_ctx *ww_ctx,
+ int (*slowfn)(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk))
++ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx))
+ {
+ if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+ rt_mutex_deadlock_account_lock(lock, current);
+ return 0;
+ } else
+- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
++ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK,
++ ww_ctx);
+ }
+
+ static inline int
+ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx,
+ int (*slowfn)(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk))
++ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx))
+ {
+ if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
+ likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+ rt_mutex_deadlock_account_lock(lock, current);
+ return 0;
+ } else
+- return slowfn(lock, state, timeout, chwalk);
++ return slowfn(lock, state, timeout, chwalk, ww_ctx);
+ }
+
+ static inline int
+@@ -1376,7 +1893,7 @@
+ {
+ might_sleep();
+
+- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
++ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+
+@@ -1393,7 +1910,7 @@
+ {
+ might_sleep();
+
+- return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
++ return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
+@@ -1406,11 +1923,30 @@
+ might_sleep();
+
+ return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+- RT_MUTEX_FULL_CHAINWALK,
++ RT_MUTEX_FULL_CHAINWALK, NULL,
+ rt_mutex_slowlock);
+ }
+
+ /**
++ * rt_mutex_lock_killable - lock a rt_mutex killable
++ *
++ * @lock: the rt_mutex to be locked
++ * @detect_deadlock: deadlock detection on/off
++ *
++ * Returns:
++ * 0 on success
++ * -EINTR when interrupted by a signal
++ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
++ */
++int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
++{
++ might_sleep();
++
++ return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock);
++}
++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
++
++/**
+ * rt_mutex_timed_lock - lock a rt_mutex interruptible
+ * the timeout structure is provided
+ * by the caller
+@@ -1430,6 +1966,7 @@
+
+ return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+ RT_MUTEX_MIN_CHAINWALK,
++ NULL,
+ rt_mutex_slowlock);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+@@ -1488,13 +2025,12 @@
+ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+ {
+ lock->owner = NULL;
+- raw_spin_lock_init(&lock->wait_lock);
+ lock->waiters = RB_ROOT;
+ lock->waiters_leftmost = NULL;
+
+ debug_rt_mutex_init(lock, name);
+ }
+-EXPORT_SYMBOL_GPL(__rt_mutex_init);
++EXPORT_SYMBOL(__rt_mutex_init);
+
+ /**
+ * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
+@@ -1509,7 +2045,7 @@
+ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner)
+ {
+- __rt_mutex_init(lock, NULL);
++ rt_mutex_init(lock);
+ debug_rt_mutex_proxy_lock(lock, proxy_owner);
+ rt_mutex_set_owner(lock, proxy_owner);
+ rt_mutex_deadlock_account_lock(lock, proxy_owner);
+@@ -1557,6 +2093,35 @@
+ return 1;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /*
++ * In PREEMPT_RT there's an added race.
++ * If the task, that we are about to requeue, times out,
++ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
++ * to skip this task. But right after the task sets
++ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
++ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
++ * This will replace the PI_WAKEUP_INPROGRESS with the actual
++ * lock that it blocks on. We *must not* place this task
++ * on this proxy lock in that case.
++ *
++ * To prevent this race, we first take the task's pi_lock
++ * and check if it has updated its pi_blocked_on. If it has,
++ * we assume that it woke up and we return -EAGAIN.
++ * Otherwise, we set the task's pi_blocked_on to
++ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
++ * it will know that we are in the process of requeuing it.
++ */
++ raw_spin_lock_irq(&task->pi_lock);
++ if (task->pi_blocked_on) {
++ raw_spin_unlock_irq(&task->pi_lock);
++ raw_spin_unlock(&lock->wait_lock);
++ return -EAGAIN;
++ }
++ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
++ raw_spin_unlock_irq(&task->pi_lock);
++#endif
++
+ /* We enforce deadlock detection for futexes */
+ ret = task_blocks_on_rt_mutex(lock, waiter, task,
+ RT_MUTEX_FULL_CHAINWALK);
+@@ -1626,7 +2191,7 @@
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
++ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
+
+ set_current_state(TASK_RUNNING);
+
+@@ -1643,3 +2208,89 @@
+
+ return ret;
+ }
++
++static inline int
++ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
++{
++#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
++ unsigned tmp;
++
++ if (ctx->deadlock_inject_countdown-- == 0) {
++ tmp = ctx->deadlock_inject_interval;
++ if (tmp > UINT_MAX/4)
++ tmp = UINT_MAX;
++ else
++ tmp = tmp*2 + tmp + tmp/2;
++
++ ctx->deadlock_inject_interval = tmp;
++ ctx->deadlock_inject_countdown = tmp;
++ ctx->contending_lock = lock;
++
++ ww_mutex_unlock(lock);
++
++ return -EDEADLK;
++ }
++#endif
++
++ return 0;
++}
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++int __sched
++__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
++{
++ int ret;
++
++ might_sleep();
++
++ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL,
++ RT_MUTEX_FULL_CHAINWALK, ww_ctx);
++ if (ret)
++ mutex_release(&lock->base.dep_map, 1, _RET_IP_);
++ else if (!ret && ww_ctx->acquired > 1)
++ return ww_mutex_deadlock_injection(lock, ww_ctx);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
++
++int __sched
++__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
++{
++ int ret;
++
++ might_sleep();
++
++ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL,
++ RT_MUTEX_FULL_CHAINWALK, ww_ctx);
++ if (ret)
++ mutex_release(&lock->base.dep_map, 1, _RET_IP_);
++ else if (!ret && ww_ctx->acquired > 1)
++ return ww_mutex_deadlock_injection(lock, ww_ctx);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(__ww_mutex_lock);
++
++void __sched ww_mutex_unlock(struct ww_mutex *lock)
++{
++ /*
++ * The unlocking fastpath is the 0->1 transition from 'locked'
++ * into 'unlocked' state:
++ */
++ if (lock->ctx) {
++#ifdef CONFIG_DEBUG_MUTEXES
++ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
++#endif
++ if (lock->ctx->acquired > 0)
++ lock->ctx->acquired--;
++ lock->ctx = NULL;
++ }
++
++ mutex_release(&lock->base.dep_map, 1, _RET_IP_);
++ rt_mutex_unlock(&lock->base.lock);
++}
++EXPORT_SYMBOL(ww_mutex_unlock);
++#endif
+diff -Nur linux-3.18.8.orig/kernel/locking/rtmutex_common.h linux-3.18.8/kernel/locking/rtmutex_common.h
+--- linux-3.18.8.orig/kernel/locking/rtmutex_common.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/locking/rtmutex_common.h 2015-03-03 08:05:18.000000000 +0100
+@@ -49,6 +49,7 @@
+ struct rb_node pi_tree_entry;
+ struct task_struct *task;
+ struct rt_mutex *lock;
++ bool savestate;
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ unsigned long ip;
+ struct pid *deadlock_task_pid;
+@@ -119,6 +120,9 @@
+ /*
+ * PI-futex support (proxy locking functions, etc.):
+ */
++#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
++#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
++
+ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner);
+@@ -138,4 +142,14 @@
+ # include "rtmutex.h"
+ #endif
+
++static inline void
++rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
++{
++ debug_rt_mutex_init_waiter(waiter);
++ waiter->task = NULL;
++ waiter->savestate = savestate;
++ RB_CLEAR_NODE(&waiter->pi_tree_entry);
++ RB_CLEAR_NODE(&waiter->tree_entry);
++}
++
+ #endif
+diff -Nur linux-3.18.8.orig/kernel/locking/spinlock.c linux-3.18.8/kernel/locking/spinlock.c
+--- linux-3.18.8.orig/kernel/locking/spinlock.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/locking/spinlock.c 2015-03-03 08:05:18.000000000 +0100
+@@ -124,8 +124,11 @@
+ * __[spin|read|write]_lock_bh()
+ */
+ BUILD_LOCK_OPS(spin, raw_spinlock);
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ BUILD_LOCK_OPS(read, rwlock);
+ BUILD_LOCK_OPS(write, rwlock);
++#endif
+
+ #endif
+
+@@ -209,6 +212,8 @@
+ EXPORT_SYMBOL(_raw_spin_unlock_bh);
+ #endif
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #ifndef CONFIG_INLINE_READ_TRYLOCK
+ int __lockfunc _raw_read_trylock(rwlock_t *lock)
+ {
+@@ -353,6 +358,8 @@
+ EXPORT_SYMBOL(_raw_write_unlock_bh);
+ #endif
+
++#endif /* !PREEMPT_RT_FULL */
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+diff -Nur linux-3.18.8.orig/kernel/locking/spinlock_debug.c linux-3.18.8/kernel/locking/spinlock_debug.c
+--- linux-3.18.8.orig/kernel/locking/spinlock_debug.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/locking/spinlock_debug.c 2015-03-03 08:05:18.000000000 +0100
+@@ -31,6 +31,7 @@
+
+ EXPORT_SYMBOL(__raw_spin_lock_init);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key)
+ {
+@@ -48,6 +49,7 @@
+ }
+
+ EXPORT_SYMBOL(__rwlock_init);
++#endif
+
+ static void spin_dump(raw_spinlock_t *lock, const char *msg)
+ {
+@@ -159,6 +161,7 @@
+ arch_spin_unlock(&lock->raw_lock);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static void rwlock_bug(rwlock_t *lock, const char *msg)
+ {
+ if (!debug_locks_off())
+@@ -300,3 +303,5 @@
+ debug_write_unlock(lock);
+ arch_write_unlock(&lock->raw_lock);
+ }
++
++#endif
+diff -Nur linux-3.18.8.orig/kernel/panic.c linux-3.18.8/kernel/panic.c
+--- linux-3.18.8.orig/kernel/panic.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/panic.c 2015-03-03 08:05:18.000000000 +0100
+@@ -384,9 +384,11 @@
+
+ static int init_oops_id(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (!oops_id)
+ get_random_bytes(&oops_id, sizeof(oops_id));
+ else
++#endif
+ oops_id++;
+
+ return 0;
+diff -Nur linux-3.18.8.orig/kernel/power/hibernate.c linux-3.18.8/kernel/power/hibernate.c
+--- linux-3.18.8.orig/kernel/power/hibernate.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/power/hibernate.c 2015-03-03 08:05:18.000000000 +0100
+@@ -287,6 +287,8 @@
+
+ local_irq_disable();
+
++ system_state = SYSTEM_SUSPEND;
++
+ error = syscore_suspend();
+ if (error) {
+ printk(KERN_ERR "PM: Some system devices failed to power down, "
+@@ -316,6 +318,7 @@
+ syscore_resume();
+
+ Enable_irqs:
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
+
+ Enable_cpus:
+@@ -439,6 +442,7 @@
+ goto Enable_cpus;
+
+ local_irq_disable();
++ system_state = SYSTEM_SUSPEND;
+
+ error = syscore_suspend();
+ if (error)
+@@ -472,6 +476,7 @@
+ syscore_resume();
+
+ Enable_irqs:
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
+
+ Enable_cpus:
+@@ -557,6 +562,7 @@
+ goto Platform_finish;
+
+ local_irq_disable();
++ system_state = SYSTEM_SUSPEND;
+ syscore_suspend();
+ if (pm_wakeup_pending()) {
+ error = -EAGAIN;
+@@ -569,6 +575,7 @@
+
+ Power_up:
+ syscore_resume();
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
+ enable_nonboot_cpus();
+
+diff -Nur linux-3.18.8.orig/kernel/power/suspend.c linux-3.18.8/kernel/power/suspend.c
+--- linux-3.18.8.orig/kernel/power/suspend.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/power/suspend.c 2015-03-03 08:05:18.000000000 +0100
+@@ -318,6 +318,8 @@
+ arch_suspend_disable_irqs();
+ BUG_ON(!irqs_disabled());
+
++ system_state = SYSTEM_SUSPEND;
++
+ error = syscore_suspend();
+ if (!error) {
+ *wakeup = pm_wakeup_pending();
+@@ -332,6 +334,8 @@
+ syscore_resume();
+ }
+
++ system_state = SYSTEM_RUNNING;
++
+ arch_suspend_enable_irqs();
+ BUG_ON(irqs_disabled());
+
+diff -Nur linux-3.18.8.orig/kernel/printk/printk.c linux-3.18.8/kernel/printk/printk.c
+--- linux-3.18.8.orig/kernel/printk/printk.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/printk/printk.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1165,6 +1165,7 @@
+ {
+ char *text;
+ int len = 0;
++ int attempts = 0;
+
+ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
+ if (!text)
+@@ -1176,7 +1177,14 @@
+ u64 seq;
+ u32 idx;
+ enum log_flags prev;
+-
++ int num_msg;
++try_again:
++ attempts++;
++ if (attempts > 10) {
++ len = -EBUSY;
++ goto out;
++ }
++ num_msg = 0;
+ if (clear_seq < log_first_seq) {
+ /* messages are gone, move to first available one */
+ clear_seq = log_first_seq;
+@@ -1197,6 +1205,14 @@
+ prev = msg->flags;
+ idx = log_next(idx);
+ seq++;
++ num_msg++;
++ if (num_msg > 5) {
++ num_msg = 0;
++ raw_spin_unlock_irq(&logbuf_lock);
++ raw_spin_lock_irq(&logbuf_lock);
++ if (clear_seq < log_first_seq)
++ goto try_again;
++ }
+ }
+
+ /* move first record forward until length fits into the buffer */
+@@ -1210,6 +1226,14 @@
+ prev = msg->flags;
+ idx = log_next(idx);
+ seq++;
++ num_msg++;
++ if (num_msg > 5) {
++ num_msg = 0;
++ raw_spin_unlock_irq(&logbuf_lock);
++ raw_spin_lock_irq(&logbuf_lock);
++ if (clear_seq < log_first_seq)
++ goto try_again;
++ }
+ }
+
+ /* last message fitting into this dump */
+@@ -1250,6 +1274,7 @@
+ clear_seq = log_next_seq;
+ clear_idx = log_next_idx;
+ }
++out:
+ raw_spin_unlock_irq(&logbuf_lock);
+
+ kfree(text);
+@@ -1407,6 +1432,7 @@
+ if (!console_drivers)
+ return;
+
++ migrate_disable();
+ for_each_console(con) {
+ if (exclusive_console && con != exclusive_console)
+ continue;
+@@ -1419,6 +1445,7 @@
+ continue;
+ con->write(con, text, len);
+ }
++ migrate_enable();
+ }
+
+ /*
+@@ -1479,6 +1506,15 @@
+ static int console_trylock_for_printk(void)
+ {
+ unsigned int cpu = smp_processor_id();
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int lock = !early_boot_irqs_disabled && (preempt_count() == 0) &&
++ !irqs_disabled();
++#else
++ int lock = 1;
++#endif
++
++ if (!lock)
++ return 0;
+
+ if (!console_trylock())
+ return 0;
+@@ -1613,6 +1649,62 @@
+ return textlen;
+ }
+
++#ifdef CONFIG_EARLY_PRINTK
++struct console *early_console;
++
++void early_vprintk(const char *fmt, va_list ap)
++{
++ if (early_console) {
++ char buf[512];
++ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
++
++ early_console->write(early_console, buf, n);
++ }
++}
++
++asmlinkage void early_printk(const char *fmt, ...)
++{
++ va_list ap;
++
++ va_start(ap, fmt);
++ early_vprintk(fmt, ap);
++ va_end(ap);
++}
++
++/*
++ * This is independent of any log levels - a global
++ * kill switch that turns off all of printk.
++ *
++ * Used by the NMI watchdog if early-printk is enabled.
++ */
++static bool __read_mostly printk_killswitch;
++
++static int __init force_early_printk_setup(char *str)
++{
++ printk_killswitch = true;
++ return 0;
++}
++early_param("force_early_printk", force_early_printk_setup);
++
++void printk_kill(void)
++{
++ printk_killswitch = true;
++}
++
++static int forced_early_printk(const char *fmt, va_list ap)
++{
++ if (!printk_killswitch)
++ return 0;
++ early_vprintk(fmt, ap);
++ return 1;
++}
++#else
++static inline int forced_early_printk(const char *fmt, va_list ap)
++{
++ return 0;
++}
++#endif
++
+ asmlinkage int vprintk_emit(int facility, int level,
+ const char *dict, size_t dictlen,
+ const char *fmt, va_list args)
+@@ -1629,6 +1721,13 @@
+ /* cpu currently holding logbuf_lock in this function */
+ static volatile unsigned int logbuf_cpu = UINT_MAX;
+
++ /*
++ * Fall back to early_printk if a debugging subsystem has
++ * killed printk output
++ */
++ if (unlikely(forced_early_printk(fmt, args)))
++ return 1;
++
+ if (level == SCHED_MESSAGE_LOGLEVEL) {
+ level = -1;
+ in_sched = true;
+@@ -1769,8 +1868,7 @@
+ * console_sem which would prevent anyone from printing to
+ * console
+ */
+- preempt_disable();
+-
++ migrate_disable();
+ /*
+ * Try to acquire and then immediately release the console
+ * semaphore. The release will print out buffers and wake up
+@@ -1778,7 +1876,7 @@
+ */
+ if (console_trylock_for_printk())
+ console_unlock();
+- preempt_enable();
++ migrate_enable();
+ lockdep_on();
+ }
+
+@@ -1878,29 +1976,6 @@
+
+ #endif /* CONFIG_PRINTK */
+
+-#ifdef CONFIG_EARLY_PRINTK
+-struct console *early_console;
+-
+-void early_vprintk(const char *fmt, va_list ap)
+-{
+- if (early_console) {
+- char buf[512];
+- int n = vscnprintf(buf, sizeof(buf), fmt, ap);
+-
+- early_console->write(early_console, buf, n);
+- }
+-}
+-
+-asmlinkage __visible void early_printk(const char *fmt, ...)
+-{
+- va_list ap;
+-
+- va_start(ap, fmt);
+- early_vprintk(fmt, ap);
+- va_end(ap);
+-}
+-#endif
+-
+ static int __add_preferred_console(char *name, int idx, char *options,
+ char *brl_options)
+ {
+@@ -2140,11 +2215,16 @@
+ goto out;
+
+ len = cont_print_text(text, size);
++#ifndef CONFIG_PREEMPT_RT_FULL
+ raw_spin_unlock(&logbuf_lock);
+ stop_critical_timings();
+ call_console_drivers(cont.level, text, len);
+ start_critical_timings();
+ local_irq_restore(flags);
++#else
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++ call_console_drivers(cont.level, text, len);
++#endif
+ return;
+ out:
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+@@ -2232,12 +2312,17 @@
+ console_idx = log_next(console_idx);
+ console_seq++;
+ console_prev = msg->flags;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++ call_console_drivers(level, text, len);
++#else
+ raw_spin_unlock(&logbuf_lock);
+
+ stop_critical_timings(); /* don't trace print latency */
+ call_console_drivers(level, text, len);
+ start_critical_timings();
+ local_irq_restore(flags);
++#endif
+ }
+ console_locked = 0;
+
+diff -Nur linux-3.18.8.orig/kernel/ptrace.c linux-3.18.8/kernel/ptrace.c
+--- linux-3.18.8.orig/kernel/ptrace.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/ptrace.c 2015-03-03 08:05:18.000000000 +0100
+@@ -129,7 +129,12 @@
+
+ spin_lock_irq(&task->sighand->siglock);
+ if (task_is_traced(task) && !__fatal_signal_pending(task)) {
+- task->state = __TASK_TRACED;
++ raw_spin_lock_irq(&task->pi_lock);
++ if (task->state & __TASK_TRACED)
++ task->state = __TASK_TRACED;
++ else
++ task->saved_state = __TASK_TRACED;
++ raw_spin_unlock_irq(&task->pi_lock);
+ ret = true;
+ }
+ spin_unlock_irq(&task->sighand->siglock);
+diff -Nur linux-3.18.8.orig/kernel/rcu/tiny.c linux-3.18.8/kernel/rcu/tiny.c
+--- linux-3.18.8.orig/kernel/rcu/tiny.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/rcu/tiny.c 2015-03-03 08:05:18.000000000 +0100
+@@ -370,6 +370,7 @@
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Post an RCU bottom-half callback to be invoked after any subsequent
+ * quiescent state.
+@@ -379,6 +380,7 @@
+ __call_rcu(head, func, &rcu_bh_ctrlblk);
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_bh);
++#endif
+
+ void rcu_init(void)
+ {
+diff -Nur linux-3.18.8.orig/kernel/rcu/tree.c linux-3.18.8/kernel/rcu/tree.c
+--- linux-3.18.8.orig/kernel/rcu/tree.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/rcu/tree.c 2015-03-03 08:05:18.000000000 +0100
+@@ -56,6 +56,11 @@
+ #include <linux/random.h>
+ #include <linux/ftrace_event.h>
+ #include <linux/suspend.h>
++#include <linux/delay.h>
++#include <linux/gfp.h>
++#include <linux/oom.h>
++#include <linux/smpboot.h>
++#include "../time/tick-internal.h"
+
+ #include "tree.h"
+ #include "rcu.h"
+@@ -152,8 +157,6 @@
+ */
+ static int rcu_scheduler_fully_active __read_mostly;
+
+-#ifdef CONFIG_RCU_BOOST
+-
+ /*
+ * Control variables for per-CPU and per-rcu_node kthreads. These
+ * handle all flavors of RCU.
+@@ -163,8 +166,6 @@
+ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
+ DEFINE_PER_CPU(char, rcu_cpu_has_work);
+
+-#endif /* #ifdef CONFIG_RCU_BOOST */
+-
+ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
+ static void invoke_rcu_core(void);
+ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
+@@ -207,6 +208,19 @@
+ }
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void rcu_preempt_qs(void);
++
++void rcu_bh_qs(void)
++{
++ unsigned long flags;
++
++ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */
++ local_irq_save(flags);
++ rcu_preempt_qs();
++ local_irq_restore(flags);
++}
++#else
+ void rcu_bh_qs(void)
+ {
+ if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
+@@ -216,6 +230,7 @@
+ __this_cpu_write(rcu_bh_data.passed_quiesce, 1);
+ }
+ }
++#endif
+
+ static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
+
+@@ -336,6 +351,7 @@
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Return the number of RCU BH batches processed thus far for debug & stats.
+ */
+@@ -362,6 +378,7 @@
+ force_quiescent_state(&rcu_bh_state);
+ }
+ EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
++#endif
+
+ /*
+ * Show the state of the grace-period kthreads.
+@@ -1411,7 +1428,7 @@
+ !ACCESS_ONCE(rsp->gp_flags) ||
+ !rsp->gp_kthread)
+ return;
+- wake_up(&rsp->gp_wq);
++ swait_wake(&rsp->gp_wq);
+ }
+
+ /*
+@@ -1793,7 +1810,7 @@
+ ACCESS_ONCE(rsp->gpnum),
+ TPS("reqwait"));
+ rsp->gp_state = RCU_GP_WAIT_GPS;
+- wait_event_interruptible(rsp->gp_wq,
++ swait_event_interruptible(rsp->gp_wq,
+ ACCESS_ONCE(rsp->gp_flags) &
+ RCU_GP_FLAG_INIT);
+ /* Locking provides needed memory barrier. */
+@@ -1821,7 +1838,7 @@
+ ACCESS_ONCE(rsp->gpnum),
+ TPS("fqswait"));
+ rsp->gp_state = RCU_GP_WAIT_FQS;
+- ret = wait_event_interruptible_timeout(rsp->gp_wq,
++ ret = swait_event_interruptible_timeout(rsp->gp_wq,
+ ((gf = ACCESS_ONCE(rsp->gp_flags)) &
+ RCU_GP_FLAG_FQS) ||
+ (!ACCESS_ONCE(rnp->qsmask) &&
+@@ -2565,16 +2582,14 @@
+ /*
+ * Do RCU core processing for the current CPU.
+ */
+-static void rcu_process_callbacks(struct softirq_action *unused)
++static void rcu_process_callbacks(void)
+ {
+ struct rcu_state *rsp;
+
+ if (cpu_is_offline(smp_processor_id()))
+ return;
+- trace_rcu_utilization(TPS("Start RCU core"));
+ for_each_rcu_flavor(rsp)
+ __rcu_process_callbacks(rsp);
+- trace_rcu_utilization(TPS("End RCU core"));
+ }
+
+ /*
+@@ -2588,18 +2603,105 @@
+ {
+ if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
+ return;
+- if (likely(!rsp->boost)) {
+- rcu_do_batch(rsp, rdp);
++ rcu_do_batch(rsp, rdp);
++}
++
++static void rcu_wake_cond(struct task_struct *t, int status)
++{
++ /*
++ * If the thread is yielding, only wake it when this
++ * is invoked from idle
++ */
++ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
++ wake_up_process(t);
++}
++
++/*
++ * Wake up this CPU's rcuc kthread to do RCU core processing.
++ */
++static void invoke_rcu_core(void)
++{
++ unsigned long flags;
++ struct task_struct *t;
++
++ if (!cpu_online(smp_processor_id()))
+ return;
++ local_irq_save(flags);
++ __this_cpu_write(rcu_cpu_has_work, 1);
++ t = __this_cpu_read(rcu_cpu_kthread_task);
++ if (t != NULL && current != t)
++ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
++ local_irq_restore(flags);
++}
++
++static void rcu_cpu_kthread_park(unsigned int cpu)
++{
++ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
++}
++
++static int rcu_cpu_kthread_should_run(unsigned int cpu)
++{
++ return __this_cpu_read(rcu_cpu_has_work);
++}
++
++/*
++ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
++ * RCU softirq used in flavors and configurations of RCU that do not
++ * support RCU priority boosting.
++ */
++static void rcu_cpu_kthread(unsigned int cpu)
++{
++ unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
++ char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
++ int spincnt;
++
++ for (spincnt = 0; spincnt < 10; spincnt++) {
++ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
++ local_bh_disable();
++ *statusp = RCU_KTHREAD_RUNNING;
++ this_cpu_inc(rcu_cpu_kthread_loops);
++ local_irq_disable();
++ work = *workp;
++ *workp = 0;
++ local_irq_enable();
++ if (work)
++ rcu_process_callbacks();
++ local_bh_enable();
++ if (*workp == 0) {
++ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
++ *statusp = RCU_KTHREAD_WAITING;
++ return;
++ }
+ }
+- invoke_rcu_callbacks_kthread();
++ *statusp = RCU_KTHREAD_YIELDING;
++ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
++ schedule_timeout_interruptible(2);
++ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
++ *statusp = RCU_KTHREAD_WAITING;
+ }
+
+-static void invoke_rcu_core(void)
++static struct smp_hotplug_thread rcu_cpu_thread_spec = {
++ .store = &rcu_cpu_kthread_task,
++ .thread_should_run = rcu_cpu_kthread_should_run,
++ .thread_fn = rcu_cpu_kthread,
++ .thread_comm = "rcuc/%u",
++ .setup = rcu_cpu_kthread_setup,
++ .park = rcu_cpu_kthread_park,
++};
++
++/*
++ * Spawn per-CPU RCU core processing kthreads.
++ */
++static int __init rcu_spawn_core_kthreads(void)
+ {
+- if (cpu_online(smp_processor_id()))
+- raise_softirq(RCU_SOFTIRQ);
++ int cpu;
++
++ for_each_possible_cpu(cpu)
++ per_cpu(rcu_cpu_has_work, cpu) = 0;
++ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
++ return 0;
+ }
++early_initcall(rcu_spawn_core_kthreads);
+
+ /*
+ * Handle any core-RCU processing required by a call_rcu() invocation.
+@@ -2734,6 +2836,7 @@
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Queue an RCU callback for invocation after a quicker grace period.
+ */
+@@ -2742,6 +2845,7 @@
+ __call_rcu(head, func, &rcu_bh_state, -1, 0);
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_bh);
++#endif
+
+ /*
+ * Queue an RCU callback for lazy invocation after a grace period.
+@@ -2833,6 +2937,7 @@
+ }
+ EXPORT_SYMBOL_GPL(synchronize_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
+ *
+@@ -2859,6 +2964,7 @@
+ wait_rcu_gp(call_rcu_bh);
+ }
+ EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
++#endif
+
+ /**
+ * get_state_synchronize_rcu - Snapshot current RCU state
+@@ -3341,6 +3447,7 @@
+ mutex_unlock(&rsp->barrier_mutex);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
+ */
+@@ -3349,6 +3456,7 @@
+ _rcu_barrier(&rcu_bh_state);
+ }
+ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
++#endif
+
+ /**
+ * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
+@@ -3658,7 +3766,7 @@
+ }
+
+ rsp->rda = rda;
+- init_waitqueue_head(&rsp->gp_wq);
++ init_swait_head(&rsp->gp_wq);
+ rnp = rsp->level[rcu_num_lvls - 1];
+ for_each_possible_cpu(i) {
+ while (i > rnp->grphi)
+@@ -3755,7 +3863,6 @@
+ rcu_init_one(&rcu_bh_state, &rcu_bh_data);
+ rcu_init_one(&rcu_sched_state, &rcu_sched_data);
+ __rcu_init_preempt();
+- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+
+ /*
+ * We don't need protection against CPU-hotplug here because
+diff -Nur linux-3.18.8.orig/kernel/rcu/tree.h linux-3.18.8/kernel/rcu/tree.h
+--- linux-3.18.8.orig/kernel/rcu/tree.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/rcu/tree.h 2015-03-03 08:05:18.000000000 +0100
+@@ -28,6 +28,7 @@
+ #include <linux/cpumask.h>
+ #include <linux/seqlock.h>
+ #include <linux/irq_work.h>
++#include <linux/wait-simple.h>
+
+ /*
+ * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
+@@ -208,7 +209,7 @@
+ /* This can happen due to race conditions. */
+ #endif /* #ifdef CONFIG_RCU_BOOST */
+ #ifdef CONFIG_RCU_NOCB_CPU
+- wait_queue_head_t nocb_gp_wq[2];
++ struct swait_head nocb_gp_wq[2];
+ /* Place for rcu_nocb_kthread() to wait GP. */
+ #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
+ int need_future_gp[2];
+@@ -348,7 +349,7 @@
+ atomic_long_t nocb_follower_count_lazy; /* (approximate). */
+ int nocb_p_count; /* # CBs being invoked by kthread */
+ int nocb_p_count_lazy; /* (approximate). */
+- wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
++ struct swait_head nocb_wq; /* For nocb kthreads to sleep on. */
+ struct task_struct *nocb_kthread;
+ int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
+
+@@ -439,7 +440,7 @@
+ unsigned long gpnum; /* Current gp number. */
+ unsigned long completed; /* # of last completed gp. */
+ struct task_struct *gp_kthread; /* Task for grace periods. */
+- wait_queue_head_t gp_wq; /* Where GP task waits. */
++ struct swait_head gp_wq; /* Where GP task waits. */
+ short gp_flags; /* Commands for GP task. */
+ short gp_state; /* GP kthread sleep state. */
+
+@@ -570,10 +571,9 @@
+ static void __init __rcu_init_preempt(void);
+ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
+ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
+-static void invoke_rcu_callbacks_kthread(void);
+ static bool rcu_is_callbacks_kthread(void);
++static void rcu_cpu_kthread_setup(unsigned int cpu);
+ #ifdef CONFIG_RCU_BOOST
+-static void rcu_preempt_do_callbacks(void);
+ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
+ struct rcu_node *rnp);
+ #endif /* #ifdef CONFIG_RCU_BOOST */
+diff -Nur linux-3.18.8.orig/kernel/rcu/tree_plugin.h linux-3.18.8/kernel/rcu/tree_plugin.h
+--- linux-3.18.8.orig/kernel/rcu/tree_plugin.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/rcu/tree_plugin.h 2015-03-03 08:05:18.000000000 +0100
+@@ -24,12 +24,6 @@
+ * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+-#include <linux/delay.h>
+-#include <linux/gfp.h>
+-#include <linux/oom.h>
+-#include <linux/smpboot.h>
+-#include "../time/tick-internal.h"
+-
+ #define RCU_KTHREAD_PRIO 1
+
+ #ifdef CONFIG_RCU_BOOST
+@@ -335,7 +329,7 @@
+ }
+
+ /* Hardware IRQ handlers cannot block, complain if they get here. */
+- if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) {
++ if (WARN_ON_ONCE(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET))) {
+ local_irq_restore(flags);
+ return;
+ }
+@@ -635,15 +629,6 @@
+ t->rcu_read_unlock_special.b.need_qs = true;
+ }
+
+-#ifdef CONFIG_RCU_BOOST
+-
+-static void rcu_preempt_do_callbacks(void)
+-{
+- rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
+-}
+-
+-#endif /* #ifdef CONFIG_RCU_BOOST */
+-
+ /*
+ * Queue a preemptible-RCU callback for invocation after a grace period.
+ */
+@@ -1072,6 +1057,19 @@
+
+ #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
+
++/*
++ * If boosting, set rcuc kthreads to realtime priority.
++ */
++static void rcu_cpu_kthread_setup(unsigned int cpu)
++{
++#ifdef CONFIG_RCU_BOOST
++ struct sched_param sp;
++
++ sp.sched_priority = RCU_KTHREAD_PRIO;
++ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
++#endif /* #ifdef CONFIG_RCU_BOOST */
++}
++
+ #ifdef CONFIG_RCU_BOOST
+
+ #include "../locking/rtmutex_common.h"
+@@ -1103,16 +1101,6 @@
+
+ #endif /* #else #ifdef CONFIG_RCU_TRACE */
+
+-static void rcu_wake_cond(struct task_struct *t, int status)
+-{
+- /*
+- * If the thread is yielding, only wake it when this
+- * is invoked from idle
+- */
+- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
+- wake_up_process(t);
+-}
+-
+ /*
+ * Carry out RCU priority boosting on the task indicated by ->exp_tasks
+ * or ->boost_tasks, advancing the pointer to the next task in the
+@@ -1261,23 +1249,6 @@
+ }
+
+ /*
+- * Wake up the per-CPU kthread to invoke RCU callbacks.
+- */
+-static void invoke_rcu_callbacks_kthread(void)
+-{
+- unsigned long flags;
+-
+- local_irq_save(flags);
+- __this_cpu_write(rcu_cpu_has_work, 1);
+- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
+- current != __this_cpu_read(rcu_cpu_kthread_task)) {
+- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
+- __this_cpu_read(rcu_cpu_kthread_status));
+- }
+- local_irq_restore(flags);
+-}
+-
+-/*
+ * Is the current CPU running the RCU-callbacks kthread?
+ * Caller must have preemption disabled.
+ */
+@@ -1332,67 +1303,6 @@
+ return 0;
+ }
+
+-static void rcu_kthread_do_work(void)
+-{
+- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
+- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
+- rcu_preempt_do_callbacks();
+-}
+-
+-static void rcu_cpu_kthread_setup(unsigned int cpu)
+-{
+- struct sched_param sp;
+-
+- sp.sched_priority = RCU_KTHREAD_PRIO;
+- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+-}
+-
+-static void rcu_cpu_kthread_park(unsigned int cpu)
+-{
+- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+-}
+-
+-static int rcu_cpu_kthread_should_run(unsigned int cpu)
+-{
+- return __this_cpu_read(rcu_cpu_has_work);
+-}
+-
+-/*
+- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
+- * RCU softirq used in flavors and configurations of RCU that do not
+- * support RCU priority boosting.
+- */
+-static void rcu_cpu_kthread(unsigned int cpu)
+-{
+- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
+- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
+- int spincnt;
+-
+- for (spincnt = 0; spincnt < 10; spincnt++) {
+- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
+- local_bh_disable();
+- *statusp = RCU_KTHREAD_RUNNING;
+- this_cpu_inc(rcu_cpu_kthread_loops);
+- local_irq_disable();
+- work = *workp;
+- *workp = 0;
+- local_irq_enable();
+- if (work)
+- rcu_kthread_do_work();
+- local_bh_enable();
+- if (*workp == 0) {
+- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
+- *statusp = RCU_KTHREAD_WAITING;
+- return;
+- }
+- }
+- *statusp = RCU_KTHREAD_YIELDING;
+- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
+- schedule_timeout_interruptible(2);
+- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
+- *statusp = RCU_KTHREAD_WAITING;
+-}
+-
+ /*
+ * Set the per-rcu_node kthread's affinity to cover all CPUs that are
+ * served by the rcu_node in question. The CPU hotplug lock is still
+@@ -1426,26 +1336,13 @@
+ free_cpumask_var(cm);
+ }
+
+-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
+- .store = &rcu_cpu_kthread_task,
+- .thread_should_run = rcu_cpu_kthread_should_run,
+- .thread_fn = rcu_cpu_kthread,
+- .thread_comm = "rcuc/%u",
+- .setup = rcu_cpu_kthread_setup,
+- .park = rcu_cpu_kthread_park,
+-};
+-
+ /*
+ * Spawn boost kthreads -- called as soon as the scheduler is running.
+ */
+ static void __init rcu_spawn_boost_kthreads(void)
+ {
+ struct rcu_node *rnp;
+- int cpu;
+
+- for_each_possible_cpu(cpu)
+- per_cpu(rcu_cpu_has_work, cpu) = 0;
+- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
+ rnp = rcu_get_root(rcu_state_p);
+ (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
+ if (NUM_RCU_NODES > 1) {
+@@ -1472,11 +1369,6 @@
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ }
+
+-static void invoke_rcu_callbacks_kthread(void)
+-{
+- WARN_ON_ONCE(1);
+-}
+-
+ static bool rcu_is_callbacks_kthread(void)
+ {
+ return false;
+@@ -1500,7 +1392,7 @@
+
+ #endif /* #else #ifdef CONFIG_RCU_BOOST */
+
+-#if !defined(CONFIG_RCU_FAST_NO_HZ)
++#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
+
+ /*
+ * Check to see if any future RCU-related work will need to be done
+@@ -1518,7 +1410,9 @@
+ return rcu_cpu_has_callbacks(cpu, NULL);
+ }
+ #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
++#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
+
++#if !defined(CONFIG_RCU_FAST_NO_HZ)
+ /*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
+ * after it.
+@@ -1615,6 +1509,8 @@
+ return cbs_ready;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ /*
+ * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
+ * to invoke. If the CPU has callbacks, try to advance them. Tell the
+@@ -1655,7 +1551,7 @@
+ return 0;
+ }
+ #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
+-
++#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
+ /*
+ * Prepare a CPU for idle from an RCU perspective. The first major task
+ * is to sense whether nohz mode has been enabled or disabled via sysfs.
+@@ -2001,7 +1897,7 @@
+ */
+ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
+ {
+- wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
++ swait_wake_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
+ }
+
+ /*
+@@ -2019,8 +1915,8 @@
+
+ static void rcu_init_one_nocb(struct rcu_node *rnp)
+ {
+- init_waitqueue_head(&rnp->nocb_gp_wq[0]);
+- init_waitqueue_head(&rnp->nocb_gp_wq[1]);
++ init_swait_head(&rnp->nocb_gp_wq[0]);
++ init_swait_head(&rnp->nocb_gp_wq[1]);
+ }
+
+ #ifndef CONFIG_RCU_NOCB_CPU_ALL
+@@ -2045,7 +1941,7 @@
+ if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
+ /* Prior smp_mb__after_atomic() orders against prior enqueue. */
+ ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
+- wake_up(&rdp_leader->nocb_wq);
++ swait_wake(&rdp_leader->nocb_wq);
+ }
+ }
+
+@@ -2238,7 +2134,7 @@
+ */
+ trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
+ for (;;) {
+- wait_event_interruptible(
++ swait_event_interruptible(
+ rnp->nocb_gp_wq[c & 0x1],
+ (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
+ if (likely(d))
+@@ -2266,7 +2162,7 @@
+ /* Wait for callbacks to appear. */
+ if (!rcu_nocb_poll) {
+ trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
+- wait_event_interruptible(my_rdp->nocb_wq,
++ swait_event_interruptible(my_rdp->nocb_wq,
+ !ACCESS_ONCE(my_rdp->nocb_leader_sleep));
+ /* Memory barrier handled by smp_mb() calls below and repoll. */
+ } else if (firsttime) {
+@@ -2347,7 +2243,7 @@
+ * List was empty, wake up the follower.
+ * Memory barriers supplied by atomic_long_add().
+ */
+- wake_up(&rdp->nocb_wq);
++ swait_wake(&rdp->nocb_wq);
+ }
+ }
+
+@@ -2368,7 +2264,7 @@
+ if (!rcu_nocb_poll) {
+ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+ "FollowerSleep");
+- wait_event_interruptible(rdp->nocb_wq,
++ swait_event_interruptible(rdp->nocb_wq,
+ ACCESS_ONCE(rdp->nocb_follower_head));
+ } else if (firsttime) {
+ /* Don't drown trace log with "Poll"! */
+@@ -2539,7 +2435,7 @@
+ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
+ {
+ rdp->nocb_tail = &rdp->nocb_head;
+- init_waitqueue_head(&rdp->nocb_wq);
++ init_swait_head(&rdp->nocb_wq);
+ rdp->nocb_follower_tail = &rdp->nocb_follower_head;
+ }
+
+diff -Nur linux-3.18.8.orig/kernel/rcu/update.c linux-3.18.8/kernel/rcu/update.c
+--- linux-3.18.8.orig/kernel/rcu/update.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/rcu/update.c 2015-03-03 08:05:18.000000000 +0100
+@@ -170,6 +170,7 @@
+ }
+ EXPORT_SYMBOL_GPL(rcu_read_lock_held);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
+ *
+@@ -196,6 +197,7 @@
+ return in_softirq() || irqs_disabled();
+ }
+ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
++#endif
+
+ #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+diff -Nur linux-3.18.8.orig/kernel/relay.c linux-3.18.8/kernel/relay.c
+--- linux-3.18.8.orig/kernel/relay.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/relay.c 2015-03-03 08:05:18.000000000 +0100
+@@ -339,6 +339,10 @@
+ {
+ struct rchan_buf *buf = (struct rchan_buf *)data;
+ wake_up_interruptible(&buf->read_wait);
++ /*
++ * Stupid polling for now:
++ */
++ mod_timer(&buf->timer, jiffies + 1);
+ }
+
+ /**
+@@ -356,6 +360,7 @@
+ init_waitqueue_head(&buf->read_wait);
+ kref_init(&buf->kref);
+ setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
++ mod_timer(&buf->timer, jiffies + 1);
+ } else
+ del_timer_sync(&buf->timer);
+
+@@ -739,15 +744,6 @@
+ else
+ buf->early_bytes += buf->chan->subbuf_size -
+ buf->padding[old_subbuf];
+- smp_mb();
+- if (waitqueue_active(&buf->read_wait))
+- /*
+- * Calling wake_up_interruptible() from here
+- * will deadlock if we happen to be logging
+- * from the scheduler (trying to re-grab
+- * rq->lock), so defer it.
+- */
+- mod_timer(&buf->timer, jiffies + 1);
+ }
+
+ old = buf->data;
+diff -Nur linux-3.18.8.orig/kernel/res_counter.c linux-3.18.8/kernel/res_counter.c
+--- linux-3.18.8.orig/kernel/res_counter.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/res_counter.c 2015-03-03 08:05:18.000000000 +0100
+@@ -59,7 +59,7 @@
+
+ r = ret = 0;
+ *limit_fail_at = NULL;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ for (c = counter; c != NULL; c = c->parent) {
+ spin_lock(&c->lock);
+ r = res_counter_charge_locked(c, val, force);
+@@ -79,7 +79,7 @@
+ spin_unlock(&u->lock);
+ }
+ }
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ return ret;
+ }
+@@ -104,7 +104,7 @@
+ struct res_counter *c;
+ u64 ret = 0;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ for (c = counter; c != top; c = c->parent) {
+ u64 r;
+ spin_lock(&c->lock);
+@@ -113,7 +113,7 @@
+ ret = r;
+ spin_unlock(&c->lock);
+ }
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ return ret;
+ }
+
+diff -Nur linux-3.18.8.orig/kernel/sched/completion.c linux-3.18.8/kernel/sched/completion.c
+--- linux-3.18.8.orig/kernel/sched/completion.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/sched/completion.c 2015-03-03 08:05:18.000000000 +0100
+@@ -30,10 +30,10 @@
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&x->wait.lock, flags);
++ raw_spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+- __wake_up_locked(&x->wait, TASK_NORMAL, 1);
+- spin_unlock_irqrestore(&x->wait.lock, flags);
++ __swait_wake_locked(&x->wait, TASK_NORMAL, 1);
++ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+ }
+ EXPORT_SYMBOL(complete);
+
+@@ -50,10 +50,10 @@
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&x->wait.lock, flags);
++ raw_spin_lock_irqsave(&x->wait.lock, flags);
+ x->done += UINT_MAX/2;
+- __wake_up_locked(&x->wait, TASK_NORMAL, 0);
+- spin_unlock_irqrestore(&x->wait.lock, flags);
++ __swait_wake_locked(&x->wait, TASK_NORMAL, 0);
++ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+ }
+ EXPORT_SYMBOL(complete_all);
+
+@@ -62,20 +62,20 @@
+ long (*action)(long), long timeout, int state)
+ {
+ if (!x->done) {
+- DECLARE_WAITQUEUE(wait, current);
++ DEFINE_SWAITER(wait);
+
+- __add_wait_queue_tail_exclusive(&x->wait, &wait);
++ swait_prepare_locked(&x->wait, &wait);
+ do {
+ if (signal_pending_state(state, current)) {
+ timeout = -ERESTARTSYS;
+ break;
+ }
+ __set_current_state(state);
+- spin_unlock_irq(&x->wait.lock);
++ raw_spin_unlock_irq(&x->wait.lock);
+ timeout = action(timeout);
+- spin_lock_irq(&x->wait.lock);
++ raw_spin_lock_irq(&x->wait.lock);
+ } while (!x->done && timeout);
+- __remove_wait_queue(&x->wait, &wait);
++ swait_finish_locked(&x->wait, &wait);
+ if (!x->done)
+ return timeout;
+ }
+@@ -89,9 +89,9 @@
+ {
+ might_sleep();
+
+- spin_lock_irq(&x->wait.lock);
++ raw_spin_lock_irq(&x->wait.lock);
+ timeout = do_wait_for_common(x, action, timeout, state);
+- spin_unlock_irq(&x->wait.lock);
++ raw_spin_unlock_irq(&x->wait.lock);
+ return timeout;
+ }
+
+@@ -267,12 +267,12 @@
+ unsigned long flags;
+ int ret = 1;
+
+- spin_lock_irqsave(&x->wait.lock, flags);
++ raw_spin_lock_irqsave(&x->wait.lock, flags);
+ if (!x->done)
+ ret = 0;
+ else
+ x->done--;
+- spin_unlock_irqrestore(&x->wait.lock, flags);
++ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(try_wait_for_completion);
+@@ -290,10 +290,10 @@
+ unsigned long flags;
+ int ret = 1;
+
+- spin_lock_irqsave(&x->wait.lock, flags);
++ raw_spin_lock_irqsave(&x->wait.lock, flags);
+ if (!x->done)
+ ret = 0;
+- spin_unlock_irqrestore(&x->wait.lock, flags);
++ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(completion_done);
+diff -Nur linux-3.18.8.orig/kernel/sched/core.c linux-3.18.8/kernel/sched/core.c
+--- linux-3.18.8.orig/kernel/sched/core.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/sched/core.c 2015-03-03 08:05:18.000000000 +0100
+@@ -280,7 +280,11 @@
+ * Number of tasks to iterate in a single balance run.
+ * Limited because this is done with IRQs disabled.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ const_debug unsigned int sysctl_sched_nr_migrate = 32;
++#else
++const_debug unsigned int sysctl_sched_nr_migrate = 8;
++#endif
+
+ /*
+ * period over which we average the RT time consumption, measured
+@@ -511,6 +515,7 @@
+
+ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rq->hrtick_timer.function = hrtick;
++ rq->hrtick_timer.irqsafe = 1;
+ }
+ #else /* CONFIG_SCHED_HRTICK */
+ static inline void hrtick_clear(struct rq *rq)
+@@ -622,6 +627,38 @@
+ trace_sched_wake_idle_without_ipi(cpu);
+ }
+
++#ifdef CONFIG_PREEMPT_LAZY
++void resched_curr_lazy(struct rq *rq)
++{
++ struct task_struct *curr = rq->curr;
++ int cpu;
++
++ if (!sched_feat(PREEMPT_LAZY)) {
++ resched_curr(rq);
++ return;
++ }
++
++ lockdep_assert_held(&rq->lock);
++
++ if (test_tsk_need_resched(curr))
++ return;
++
++ if (test_tsk_need_resched_lazy(curr))
++ return;
++
++ set_tsk_need_resched_lazy(curr);
++
++ cpu = cpu_of(rq);
++ if (cpu == smp_processor_id())
++ return;
++
++ /* NEED_RESCHED_LAZY must be visible before we test polling */
++ smp_mb();
++ if (!tsk_is_polling(curr))
++ smp_send_reschedule(cpu);
++}
++#endif
++
+ void resched_cpu(int cpu)
+ {
+ struct rq *rq = cpu_rq(cpu);
+@@ -645,12 +682,14 @@
+ */
+ int get_nohz_timer_target(int pinned)
+ {
+- int cpu = smp_processor_id();
++ int cpu;
+ int i;
+ struct sched_domain *sd;
+
++ preempt_disable_rt();
++ cpu = smp_processor_id();
+ if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
+- return cpu;
++ goto preempt_en_rt;
+
+ rcu_read_lock();
+ for_each_domain(cpu, sd) {
+@@ -663,6 +702,8 @@
+ }
+ unlock:
+ rcu_read_unlock();
++preempt_en_rt:
++ preempt_enable_rt();
+ return cpu;
+ }
+ /*
+@@ -1193,6 +1234,18 @@
+
+ static int migration_cpu_stop(void *data);
+
++static bool check_task_state(struct task_struct *p, long match_state)
++{
++ bool match = false;
++
++ raw_spin_lock_irq(&p->pi_lock);
++ if (p->state == match_state || p->saved_state == match_state)
++ match = true;
++ raw_spin_unlock_irq(&p->pi_lock);
++
++ return match;
++}
++
+ /*
+ * wait_task_inactive - wait for a thread to unschedule.
+ *
+@@ -1237,7 +1290,7 @@
+ * is actually now running somewhere else!
+ */
+ while (task_running(rq, p)) {
+- if (match_state && unlikely(p->state != match_state))
++ if (match_state && !check_task_state(p, match_state))
+ return 0;
+ cpu_relax();
+ }
+@@ -1252,7 +1305,8 @@
+ running = task_running(rq, p);
+ queued = task_on_rq_queued(p);
+ ncsw = 0;
+- if (!match_state || p->state == match_state)
++ if (!match_state || p->state == match_state ||
++ p->saved_state == match_state)
+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
+ task_rq_unlock(rq, p, &flags);
+
+@@ -1477,10 +1531,6 @@
+ {
+ activate_task(rq, p, en_flags);
+ p->on_rq = TASK_ON_RQ_QUEUED;
+-
+- /* if a worker is waking up, notify workqueue */
+- if (p->flags & PF_WQ_WORKER)
+- wq_worker_waking_up(p, cpu_of(rq));
+ }
+
+ /*
+@@ -1694,8 +1744,27 @@
+ */
+ smp_mb__before_spinlock();
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+- if (!(p->state & state))
++ if (!(p->state & state)) {
++ /*
++ * The task might be running due to a spinlock sleeper
++ * wakeup. Check the saved state and set it to running
++ * if the wakeup condition is true.
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER)) {
++ if (p->saved_state & state) {
++ p->saved_state = TASK_RUNNING;
++ success = 1;
++ }
++ }
+ goto out;
++ }
++
++ /*
++ * If this is a regular wakeup, then we can unconditionally
++ * clear the saved state of a "lock sleeper".
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER))
++ p->saved_state = TASK_RUNNING;
+
+ success = 1; /* we're going to change ->state */
+ cpu = task_cpu(p);
+@@ -1738,42 +1807,6 @@
+ }
+
+ /**
+- * try_to_wake_up_local - try to wake up a local task with rq lock held
+- * @p: the thread to be awakened
+- *
+- * Put @p on the run-queue if it's not already there. The caller must
+- * ensure that this_rq() is locked, @p is bound to this_rq() and not
+- * the current task.
+- */
+-static void try_to_wake_up_local(struct task_struct *p)
+-{
+- struct rq *rq = task_rq(p);
+-
+- if (WARN_ON_ONCE(rq != this_rq()) ||
+- WARN_ON_ONCE(p == current))
+- return;
+-
+- lockdep_assert_held(&rq->lock);
+-
+- if (!raw_spin_trylock(&p->pi_lock)) {
+- raw_spin_unlock(&rq->lock);
+- raw_spin_lock(&p->pi_lock);
+- raw_spin_lock(&rq->lock);
+- }
+-
+- if (!(p->state & TASK_NORMAL))
+- goto out;
+-
+- if (!task_on_rq_queued(p))
+- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+-
+- ttwu_do_wakeup(rq, p, 0);
+- ttwu_stat(p, smp_processor_id(), 0);
+-out:
+- raw_spin_unlock(&p->pi_lock);
+-}
+-
+-/**
+ * wake_up_process - Wake up a specific process
+ * @p: The process to be woken up.
+ *
+@@ -1787,11 +1820,23 @@
+ */
+ int wake_up_process(struct task_struct *p)
+ {
+- WARN_ON(task_is_stopped_or_traced(p));
++ WARN_ON(__task_is_stopped_or_traced(p));
+ return try_to_wake_up(p, TASK_NORMAL, 0);
+ }
+ EXPORT_SYMBOL(wake_up_process);
+
++/**
++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
++ * @p: The process to be woken up.
++ *
++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
++ * the nature of the wakeup.
++ */
++int wake_up_lock_sleeper(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
++}
++
+ int wake_up_state(struct task_struct *p, unsigned int state)
+ {
+ return try_to_wake_up(p, state, 0);
+@@ -1982,6 +2027,9 @@
+ p->on_cpu = 0;
+ #endif
+ init_task_preempt_count(p);
++#ifdef CONFIG_HAVE_PREEMPT_LAZY
++ task_thread_info(p)->preempt_lazy_count = 0;
++#endif
+ #ifdef CONFIG_SMP
+ plist_node_init(&p->pushable_tasks, MAX_PRIO);
+ RB_CLEAR_NODE(&p->pushable_dl_tasks);
+@@ -2265,8 +2313,12 @@
+ finish_arch_post_lock_switch();
+
+ fire_sched_in_preempt_notifiers(current);
++ /*
++ * We use mmdrop_delayed() here so we don't have to do the
++ * full __mmdrop() when we are the last user.
++ */
+ if (mm)
+- mmdrop(mm);
++ mmdrop_delayed(mm);
+ if (unlikely(prev_state == TASK_DEAD)) {
+ if (prev->sched_class->task_dead)
+ prev->sched_class->task_dead(prev);
+@@ -2691,6 +2743,133 @@
+ schedstat_inc(this_rq(), sched_count);
+ }
+
++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
++#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */
++#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
++#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
++
++static inline void update_migrate_disable(struct task_struct *p)
++{
++ const struct cpumask *mask;
++
++ if (likely(!p->migrate_disable))
++ return;
++
++ /* Did we already update affinity? */
++ if (unlikely(migrate_disabled_updated(p)))
++ return;
++
++ /*
++ * Since this is always current we can get away with only locking
++ * rq->lock, the ->cpus_allowed value can normally only be changed
++ * while holding both p->pi_lock and rq->lock, but seeing that this
++ * is current, we cannot actually be waking up, so all code that
++ * relies on serialization against p->pi_lock is out of scope.
++ *
++ * Having rq->lock serializes us against things like
++ * set_cpus_allowed_ptr() that can still happen concurrently.
++ */
++ mask = tsk_cpus_allowed(p);
++
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ /* mask==cpumask_of(task_cpu(p)) which has a cpumask_weight==1 */
++ p->nr_cpus_allowed = 1;
++
++ /* Let migrate_enable know to fix things back up */
++ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
++}
++
++void migrate_disable(void)
++{
++ struct task_struct *p = current;
++
++ if (in_atomic()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic++;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ if (unlikely(p->migrate_disable_atomic)) {
++ tracing_off();
++ WARN_ON_ONCE(1);
++ }
++#endif
++
++ if (p->migrate_disable) {
++ p->migrate_disable++;
++ return;
++ }
++
++ preempt_disable();
++ preempt_lazy_disable();
++ pin_current_cpu();
++ p->migrate_disable = 1;
++ preempt_enable();
++}
++EXPORT_SYMBOL(migrate_disable);
++
++void migrate_enable(void)
++{
++ struct task_struct *p = current;
++ const struct cpumask *mask;
++ unsigned long flags;
++ struct rq *rq;
++
++ if (in_atomic()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic--;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ if (unlikely(p->migrate_disable_atomic)) {
++ tracing_off();
++ WARN_ON_ONCE(1);
++ }
++#endif
++ WARN_ON_ONCE(p->migrate_disable <= 0);
++
++ if (migrate_disable_count(p) > 1) {
++ p->migrate_disable--;
++ return;
++ }
++
++ preempt_disable();
++ if (unlikely(migrate_disabled_updated(p))) {
++ /*
++ * Undo whatever update_migrate_disable() did, also see there
++ * about locking.
++ */
++ rq = this_rq();
++ raw_spin_lock_irqsave(&rq->lock, flags);
++
++ /*
++ * Clearing migrate_disable causes tsk_cpus_allowed to
++ * show the tasks original cpu affinity.
++ */
++ p->migrate_disable = 0;
++ mask = tsk_cpus_allowed(p);
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->nr_cpus_allowed = cpumask_weight(mask);
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++ } else
++ p->migrate_disable = 0;
++
++ unpin_current_cpu();
++ preempt_enable();
++ preempt_lazy_enable();
++}
++EXPORT_SYMBOL(migrate_enable);
++#else
++static inline void update_migrate_disable(struct task_struct *p) { }
++#define migrate_disabled_updated(p) 0
++#endif
++
+ /*
+ * Pick up the highest-prio task:
+ */
+@@ -2794,6 +2973,8 @@
+ smp_mb__before_spinlock();
+ raw_spin_lock_irq(&rq->lock);
+
++ update_migrate_disable(prev);
++
+ switch_count = &prev->nivcsw;
+ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+ if (unlikely(signal_pending_state(prev->state, prev))) {
+@@ -2801,19 +2982,6 @@
+ } else {
+ deactivate_task(rq, prev, DEQUEUE_SLEEP);
+ prev->on_rq = 0;
+-
+- /*
+- * If a worker went to sleep, notify and ask workqueue
+- * whether it wants to wake up a task to maintain
+- * concurrency.
+- */
+- if (prev->flags & PF_WQ_WORKER) {
+- struct task_struct *to_wakeup;
+-
+- to_wakeup = wq_worker_sleeping(prev, cpu);
+- if (to_wakeup)
+- try_to_wake_up_local(to_wakeup);
+- }
+ }
+ switch_count = &prev->nvcsw;
+ }
+@@ -2823,6 +2991,7 @@
+
+ next = pick_next_task(rq, prev);
+ clear_tsk_need_resched(prev);
++ clear_tsk_need_resched_lazy(prev);
+ clear_preempt_need_resched();
+ rq->skip_clock_update = 0;
+
+@@ -2852,9 +3021,20 @@
+
+ static inline void sched_submit_work(struct task_struct *tsk)
+ {
+- if (!tsk->state || tsk_is_pi_blocked(tsk))
++ if (!tsk->state)
+ return;
+ /*
++ * If a worker went to sleep, notify and ask workqueue whether
++ * it wants to wake up a task to maintain concurrency.
++ */
++ if (tsk->flags & PF_WQ_WORKER)
++ wq_worker_sleeping(tsk);
++
++
++ if (tsk_is_pi_blocked(tsk))
++ return;
++
++ /*
+ * If we are going to sleep and we have plugged IO queued,
+ * make sure to submit it to avoid deadlocks.
+ */
+@@ -2862,12 +3042,19 @@
+ blk_schedule_flush_plug(tsk);
+ }
+
++static inline void sched_update_worker(struct task_struct *tsk)
++{
++ if (tsk->flags & PF_WQ_WORKER)
++ wq_worker_running(tsk);
++}
++
+ asmlinkage __visible void __sched schedule(void)
+ {
+ struct task_struct *tsk = current;
+
+ sched_submit_work(tsk);
+ __schedule();
++ sched_update_worker(tsk);
+ }
+ EXPORT_SYMBOL(schedule);
+
+@@ -2917,9 +3104,26 @@
+ if (likely(!preemptible()))
+ return;
+
++#ifdef CONFIG_PREEMPT_LAZY
++ /*
++ * Check for lazy preemption
++ */
++ if (current_thread_info()->preempt_lazy_count &&
++ !test_thread_flag(TIF_NEED_RESCHED))
++ return;
++#endif
+ do {
+ __preempt_count_add(PREEMPT_ACTIVE);
++ /*
++ * The add/subtract must not be traced by the function
++ * tracer. But we still want to account for the
++ * preempt off latency tracer. Since the _notrace versions
++ * of add/subtract skip the accounting for latency tracer
++ * we must force it manually.
++ */
++ start_critical_timings();
+ __schedule();
++ stop_critical_timings();
+ __preempt_count_sub(PREEMPT_ACTIVE);
+
+ /*
+@@ -4229,9 +4433,16 @@
+
+ static void __cond_resched(void)
+ {
+- __preempt_count_add(PREEMPT_ACTIVE);
+- __schedule();
+- __preempt_count_sub(PREEMPT_ACTIVE);
++ do {
++ __preempt_count_add(PREEMPT_ACTIVE);
++ __schedule();
++ __preempt_count_sub(PREEMPT_ACTIVE);
++ /*
++ * Check again in case we missed a preemption
++ * opportunity between schedule and now.
++ */
++ barrier();
++ } while (need_resched());
+ }
+
+ int __sched _cond_resched(void)
+@@ -4272,6 +4483,7 @@
+ }
+ EXPORT_SYMBOL(__cond_resched_lock);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int __sched __cond_resched_softirq(void)
+ {
+ BUG_ON(!in_softirq());
+@@ -4285,6 +4497,7 @@
+ return 0;
+ }
+ EXPORT_SYMBOL(__cond_resched_softirq);
++#endif
+
+ /**
+ * yield - yield the current processor to other threads.
+@@ -4646,7 +4859,9 @@
+
+ /* Set the preempt count _outside_ the spinlocks! */
+ init_idle_preempt_count(idle, cpu);
+-
++#ifdef CONFIG_HAVE_PREEMPT_LAZY
++ task_thread_info(idle)->preempt_lazy_count = 0;
++#endif
+ /*
+ * The idle tasks have their own, simple scheduling class:
+ */
+@@ -4688,11 +4903,91 @@
+
+ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+- if (p->sched_class && p->sched_class->set_cpus_allowed)
+- p->sched_class->set_cpus_allowed(p, new_mask);
++ if (!migrate_disabled_updated(p)) {
++ if (p->sched_class && p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, new_mask);
++ p->nr_cpus_allowed = cpumask_weight(new_mask);
++ }
+
+ cpumask_copy(&p->cpus_allowed, new_mask);
+- p->nr_cpus_allowed = cpumask_weight(new_mask);
++}
++
++static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
++static DEFINE_MUTEX(sched_down_mutex);
++static cpumask_t sched_down_cpumask;
++
++void tell_sched_cpu_down_begin(int cpu)
++{
++ mutex_lock(&sched_down_mutex);
++ cpumask_set_cpu(cpu, &sched_down_cpumask);
++ mutex_unlock(&sched_down_mutex);
++}
++
++void tell_sched_cpu_down_done(int cpu)
++{
++ mutex_lock(&sched_down_mutex);
++ cpumask_clear_cpu(cpu, &sched_down_cpumask);
++ mutex_unlock(&sched_down_mutex);
++}
++
++/**
++ * migrate_me - try to move the current task off this cpu
++ *
++ * Used by the pin_current_cpu() code to try to get tasks
++ * to move off the current CPU as it is going down.
++ * It will only move the task if the task isn't pinned to
++ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY)
++ * and the task has to be in a RUNNING state. Otherwise the
++ * movement of the task will wake it up (change its state
++ * to running) when the task did not expect it.
++ *
++ * Returns 1 if it succeeded in moving the current task
++ * 0 otherwise.
++ */
++int migrate_me(void)
++{
++ struct task_struct *p = current;
++ struct migration_arg arg;
++ struct cpumask *cpumask;
++ struct cpumask *mask;
++ unsigned long flags;
++ unsigned int dest_cpu;
++ struct rq *rq;
++
++ /*
++ * We can not migrate tasks bounded to a CPU or tasks not
++ * running. The movement of the task will wake it up.
++ */
++ if (p->flags & PF_NO_SETAFFINITY || p->state)
++ return 0;
++
++ mutex_lock(&sched_down_mutex);
++ rq = task_rq_lock(p, &flags);
++
++ cpumask = &__get_cpu_var(sched_cpumasks);
++ mask = &p->cpus_allowed;
++
++ cpumask_andnot(cpumask, mask, &sched_down_cpumask);
++
++ if (!cpumask_weight(cpumask)) {
++ /* It's only on this CPU? */
++ task_rq_unlock(rq, p, &flags);
++ mutex_unlock(&sched_down_mutex);
++ return 0;
++ }
++
++ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
++
++ arg.task = p;
++ arg.dest_cpu = dest_cpu;
++
++ task_rq_unlock(rq, p, &flags);
++
++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++ tlb_migrate_finish(p->mm);
++ mutex_unlock(&sched_down_mutex);
++
++ return 1;
+ }
+
+ /*
+@@ -4738,7 +5033,7 @@
+ do_set_cpus_allowed(p, new_mask);
+
+ /* Can the task run on the task's current CPU? If so, we're done */
+- if (cpumask_test_cpu(task_cpu(p), new_mask))
++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
+ goto out;
+
+ dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+@@ -4878,6 +5173,8 @@
+
+ #ifdef CONFIG_HOTPLUG_CPU
+
++static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
++
+ /*
+ * Ensures that the idle task is using init_mm right before its cpu goes
+ * offline.
+@@ -4892,7 +5189,11 @@
+ switch_mm(mm, &init_mm, current);
+ finish_arch_post_lock_switch();
+ }
+- mmdrop(mm);
++ /*
++ * Defer the cleanup to an alive cpu. On RT we can neither
++ * call mmdrop() nor mmdrop_delayed() from here.
++ */
++ per_cpu(idle_last_mm, smp_processor_id()) = mm;
+ }
+
+ /*
+@@ -5235,6 +5536,10 @@
+
+ case CPU_DEAD:
+ calc_load_migrate(rq);
++ if (per_cpu(idle_last_mm, cpu)) {
++ mmdrop(per_cpu(idle_last_mm, cpu));
++ per_cpu(idle_last_mm, cpu) = NULL;
++ }
+ break;
+ #endif
+ }
+@@ -7176,7 +7481,8 @@
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ static inline int preempt_count_equals(int preempt_offset)
+ {
+- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
++ int nested = (preempt_count() & ~PREEMPT_ACTIVE) +
++ sched_rcu_preempt_depth();
+
+ return (nested == preempt_offset);
+ }
+diff -Nur linux-3.18.8.orig/kernel/sched/cputime.c linux-3.18.8/kernel/sched/cputime.c
+--- linux-3.18.8.orig/kernel/sched/cputime.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/sched/cputime.c 2015-03-03 08:05:18.000000000 +0100
+@@ -675,37 +675,45 @@
+
+ void vtime_account_system(struct task_struct *tsk)
+ {
+- write_seqlock(&tsk->vtime_seqlock);
++ raw_spin_lock(&tsk->vtime_lock);
++ write_seqcount_begin(&tsk->vtime_seq);
+ __vtime_account_system(tsk);
+- write_sequnlock(&tsk->vtime_seqlock);
++ write_seqcount_end(&tsk->vtime_seq);
++ raw_spin_unlock(&tsk->vtime_lock);
+ }
+
+ void vtime_gen_account_irq_exit(struct task_struct *tsk)
+ {
+- write_seqlock(&tsk->vtime_seqlock);
++ raw_spin_lock(&tsk->vtime_lock);
++ write_seqcount_begin(&tsk->vtime_seq);
+ __vtime_account_system(tsk);
+ if (context_tracking_in_user())
+ tsk->vtime_snap_whence = VTIME_USER;
+- write_sequnlock(&tsk->vtime_seqlock);
++ write_seqcount_end(&tsk->vtime_seq);
++ raw_spin_unlock(&tsk->vtime_lock);
+ }
+
+ void vtime_account_user(struct task_struct *tsk)
+ {
+ cputime_t delta_cpu;
+
+- write_seqlock(&tsk->vtime_seqlock);
++ raw_spin_lock(&tsk->vtime_lock);
++ write_seqcount_begin(&tsk->vtime_seq);
+ delta_cpu = get_vtime_delta(tsk);
+ tsk->vtime_snap_whence = VTIME_SYS;
+ account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
+- write_sequnlock(&tsk->vtime_seqlock);
++ write_seqcount_end(&tsk->vtime_seq);
++ raw_spin_unlock(&tsk->vtime_lock);
+ }
+
+ void vtime_user_enter(struct task_struct *tsk)
+ {
+- write_seqlock(&tsk->vtime_seqlock);
++ raw_spin_lock(&tsk->vtime_lock);
++ write_seqcount_begin(&tsk->vtime_seq);
+ __vtime_account_system(tsk);
+ tsk->vtime_snap_whence = VTIME_USER;
+- write_sequnlock(&tsk->vtime_seqlock);
++ write_seqcount_end(&tsk->vtime_seq);
++ raw_spin_unlock(&tsk->vtime_lock);
+ }
+
+ void vtime_guest_enter(struct task_struct *tsk)
+@@ -717,19 +725,23 @@
+ * synchronization against the reader (task_gtime())
+ * that can thus safely catch up with a tickless delta.
+ */
+- write_seqlock(&tsk->vtime_seqlock);
++ raw_spin_lock(&tsk->vtime_lock);
++ write_seqcount_begin(&tsk->vtime_seq);
+ __vtime_account_system(tsk);
+ current->flags |= PF_VCPU;
+- write_sequnlock(&tsk->vtime_seqlock);
++ write_seqcount_end(&tsk->vtime_seq);
++ raw_spin_unlock(&tsk->vtime_lock);
+ }
+ EXPORT_SYMBOL_GPL(vtime_guest_enter);
+
+ void vtime_guest_exit(struct task_struct *tsk)
+ {
+- write_seqlock(&tsk->vtime_seqlock);
++ raw_spin_lock(&tsk->vtime_lock);
++ write_seqcount_begin(&tsk->vtime_seq);
+ __vtime_account_system(tsk);
+ current->flags &= ~PF_VCPU;
+- write_sequnlock(&tsk->vtime_seqlock);
++ write_seqcount_end(&tsk->vtime_seq);
++ raw_spin_unlock(&tsk->vtime_lock);
+ }
+ EXPORT_SYMBOL_GPL(vtime_guest_exit);
+
+@@ -742,24 +754,30 @@
+
+ void arch_vtime_task_switch(struct task_struct *prev)
+ {
+- write_seqlock(&prev->vtime_seqlock);
++ raw_spin_lock(&prev->vtime_lock);
++ write_seqcount_begin(&prev->vtime_seq);
+ prev->vtime_snap_whence = VTIME_SLEEPING;
+- write_sequnlock(&prev->vtime_seqlock);
++ write_seqcount_end(&prev->vtime_seq);
++ raw_spin_unlock(&prev->vtime_lock);
+
+- write_seqlock(&current->vtime_seqlock);
++ raw_spin_lock(&current->vtime_lock);
++ write_seqcount_begin(&current->vtime_seq);
+ current->vtime_snap_whence = VTIME_SYS;
+ current->vtime_snap = sched_clock_cpu(smp_processor_id());
+- write_sequnlock(&current->vtime_seqlock);
++ write_seqcount_end(&current->vtime_seq);
++ raw_spin_unlock(&current->vtime_lock);
+ }
+
+ void vtime_init_idle(struct task_struct *t, int cpu)
+ {
+ unsigned long flags;
+
+- write_seqlock_irqsave(&t->vtime_seqlock, flags);
++ raw_spin_lock_irqsave(&t->vtime_lock, flags);
++ write_seqcount_begin(&t->vtime_seq);
+ t->vtime_snap_whence = VTIME_SYS;
+ t->vtime_snap = sched_clock_cpu(cpu);
+- write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
++ write_seqcount_end(&t->vtime_seq);
++ raw_spin_unlock_irqrestore(&t->vtime_lock, flags);
+ }
+
+ cputime_t task_gtime(struct task_struct *t)
+@@ -768,13 +786,13 @@
+ cputime_t gtime;
+
+ do {
+- seq = read_seqbegin(&t->vtime_seqlock);
++ seq = read_seqcount_begin(&t->vtime_seq);
+
+ gtime = t->gtime;
+ if (t->flags & PF_VCPU)
+ gtime += vtime_delta(t);
+
+- } while (read_seqretry(&t->vtime_seqlock, seq));
++ } while (read_seqcount_retry(&t->vtime_seq, seq));
+
+ return gtime;
+ }
+@@ -797,7 +815,7 @@
+ *udelta = 0;
+ *sdelta = 0;
+
+- seq = read_seqbegin(&t->vtime_seqlock);
++ seq = read_seqcount_begin(&t->vtime_seq);
+
+ if (u_dst)
+ *u_dst = *u_src;
+@@ -821,7 +839,7 @@
+ if (t->vtime_snap_whence == VTIME_SYS)
+ *sdelta = delta;
+ }
+- } while (read_seqretry(&t->vtime_seqlock, seq));
++ } while (read_seqcount_retry(&t->vtime_seq, seq));
+ }
+
+
+diff -Nur linux-3.18.8.orig/kernel/sched/deadline.c linux-3.18.8/kernel/sched/deadline.c
+--- linux-3.18.8.orig/kernel/sched/deadline.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/sched/deadline.c 2015-03-03 08:05:18.000000000 +0100
+@@ -570,6 +570,7 @@
+
+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ timer->function = dl_task_timer;
++ timer->irqsafe = 1;
+ }
+
+ static
+diff -Nur linux-3.18.8.orig/kernel/sched/debug.c linux-3.18.8/kernel/sched/debug.c
+--- linux-3.18.8.orig/kernel/sched/debug.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/sched/debug.c 2015-03-03 08:05:18.000000000 +0100
+@@ -256,6 +256,9 @@
+ P(rt_throttled);
+ PN(rt_time);
+ PN(rt_runtime);
++#ifdef CONFIG_SMP
++ P(rt_nr_migratory);
++#endif
+
+ #undef PN
+ #undef P
+@@ -634,6 +637,10 @@
+ #endif
+ P(policy);
+ P(prio);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ P(migrate_disable);
++#endif
++ P(nr_cpus_allowed);
+ #undef PN
+ #undef __PN
+ #undef P
+diff -Nur linux-3.18.8.orig/kernel/sched/fair.c linux-3.18.8/kernel/sched/fair.c
+--- linux-3.18.8.orig/kernel/sched/fair.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/sched/fair.c 2015-03-03 08:05:18.000000000 +0100
+@@ -2951,7 +2951,7 @@
+ ideal_runtime = sched_slice(cfs_rq, curr);
+ delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+ if (delta_exec > ideal_runtime) {
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ /*
+ * The current task ran long enough, ensure it doesn't get
+ * re-elected due to buddy favours.
+@@ -2975,7 +2975,7 @@
+ return;
+
+ if (delta > ideal_runtime)
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ }
+
+ static void
+@@ -3115,7 +3115,7 @@
+ * validating it and just reschedule.
+ */
+ if (queued) {
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ return;
+ }
+ /*
+@@ -3306,7 +3306,7 @@
+ * hierarchy can be throttled
+ */
+ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ }
+
+ static __always_inline
+@@ -3925,7 +3925,7 @@
+
+ if (delta < 0) {
+ if (rq->curr == p)
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ return;
+ }
+ hrtick_start(rq, delta);
+@@ -4792,7 +4792,7 @@
+ return;
+
+ preempt:
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ /*
+ * Only set the backward buddy when the current task is still
+ * on the rq. This can happen when a wakeup gets interleaved
+@@ -7576,7 +7576,7 @@
+ * 'current' within the tree based on its new key value.
+ */
+ swap(curr->vruntime, se->vruntime);
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ }
+
+ se->vruntime -= cfs_rq->min_vruntime;
+@@ -7601,7 +7601,7 @@
+ */
+ if (rq->curr == p) {
+ if (p->prio > oldprio)
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ } else
+ check_preempt_curr(rq, p, 0);
+ }
+diff -Nur linux-3.18.8.orig/kernel/sched/features.h linux-3.18.8/kernel/sched/features.h
+--- linux-3.18.8.orig/kernel/sched/features.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/sched/features.h 2015-03-03 08:05:18.000000000 +0100
+@@ -50,12 +50,18 @@
+ */
+ SCHED_FEAT(NONTASK_CAPACITY, true)
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++SCHED_FEAT(TTWU_QUEUE, false)
++# ifdef CONFIG_PREEMPT_LAZY
++SCHED_FEAT(PREEMPT_LAZY, true)
++# endif
++#else
+ /*
+ * Queue remote wakeups on the target CPU and process them
+ * using the scheduler IPI. Reduces rq->lock contention/bounces.
+ */
+ SCHED_FEAT(TTWU_QUEUE, true)
+-
++#endif
+ SCHED_FEAT(FORCE_SD_OVERLAP, false)
+ SCHED_FEAT(RT_RUNTIME_SHARE, true)
+ SCHED_FEAT(LB_MIN, false)
+diff -Nur linux-3.18.8.orig/kernel/sched/Makefile linux-3.18.8/kernel/sched/Makefile
+--- linux-3.18.8.orig/kernel/sched/Makefile 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/sched/Makefile 2015-03-03 08:05:18.000000000 +0100
+@@ -13,7 +13,7 @@
+
+ obj-y += core.o proc.o clock.o cputime.o
+ obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
+-obj-y += wait.o completion.o idle.o
++obj-y += wait.o wait-simple.o work-simple.o completion.o idle.o
+ obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
+ obj-$(CONFIG_SCHEDSTATS) += stats.o
+diff -Nur linux-3.18.8.orig/kernel/sched/rt.c linux-3.18.8/kernel/sched/rt.c
+--- linux-3.18.8.orig/kernel/sched/rt.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/sched/rt.c 2015-03-03 08:05:18.000000000 +0100
+@@ -43,6 +43,7 @@
+
+ hrtimer_init(&rt_b->rt_period_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ rt_b->rt_period_timer.irqsafe = 1;
+ rt_b->rt_period_timer.function = sched_rt_period_timer;
+ }
+
+diff -Nur linux-3.18.8.orig/kernel/sched/sched.h linux-3.18.8/kernel/sched/sched.h
+--- linux-3.18.8.orig/kernel/sched/sched.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/sched/sched.h 2015-03-03 08:05:18.000000000 +0100
+@@ -1018,6 +1018,7 @@
+ #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
+ #define WF_FORK 0x02 /* child wakeup after fork */
+ #define WF_MIGRATED 0x4 /* internal use, task got migrated */
++#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
+
+ /*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+@@ -1210,6 +1211,15 @@
+ extern void resched_curr(struct rq *rq);
+ extern void resched_cpu(int cpu);
+
++#ifdef CONFIG_PREEMPT_LAZY
++extern void resched_curr_lazy(struct rq *rq);
++#else
++static inline void resched_curr_lazy(struct rq *rq)
++{
++ resched_curr(rq);
++}
++#endif
++
+ extern struct rt_bandwidth def_rt_bandwidth;
+ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+
+diff -Nur linux-3.18.8.orig/kernel/sched/wait-simple.c linux-3.18.8/kernel/sched/wait-simple.c
+--- linux-3.18.8.orig/kernel/sched/wait-simple.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/kernel/sched/wait-simple.c 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,115 @@
++/*
++ * Simple waitqueues without fancy flags and callbacks
++ *
++ * (C) 2011 Thomas Gleixner <tglx@linutronix.de>
++ *
++ * Based on kernel/wait.c
++ *
++ * For licencing details see kernel-base/COPYING
++ */
++#include <linux/init.h>
++#include <linux/export.h>
++#include <linux/sched.h>
++#include <linux/wait-simple.h>
++
++/* Adds w to head->list. Must be called with head->lock locked. */
++static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
++{
++ list_add(&w->node, &head->list);
++ /* We can't let the condition leak before the setting of head */
++ smp_mb();
++}
++
++/* Removes w from head->list. Must be called with head->lock locked. */
++static inline void __swait_dequeue(struct swaiter *w)
++{
++ list_del_init(&w->node);
++}
++
++void __init_swait_head(struct swait_head *head, struct lock_class_key *key)
++{
++ raw_spin_lock_init(&head->lock);
++ lockdep_set_class(&head->lock, key);
++ INIT_LIST_HEAD(&head->list);
++}
++EXPORT_SYMBOL(__init_swait_head);
++
++void swait_prepare_locked(struct swait_head *head, struct swaiter *w)
++{
++ w->task = current;
++ if (list_empty(&w->node))
++ __swait_enqueue(head, w);
++}
++
++void swait_prepare(struct swait_head *head, struct swaiter *w, int state)
++{
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&head->lock, flags);
++ swait_prepare_locked(head, w);
++ __set_current_state(state);
++ raw_spin_unlock_irqrestore(&head->lock, flags);
++}
++EXPORT_SYMBOL(swait_prepare);
++
++void swait_finish_locked(struct swait_head *head, struct swaiter *w)
++{
++ __set_current_state(TASK_RUNNING);
++ if (w->task)
++ __swait_dequeue(w);
++}
++
++void swait_finish(struct swait_head *head, struct swaiter *w)
++{
++ unsigned long flags;
++
++ __set_current_state(TASK_RUNNING);
++ if (w->task) {
++ raw_spin_lock_irqsave(&head->lock, flags);
++ __swait_dequeue(w);
++ raw_spin_unlock_irqrestore(&head->lock, flags);
++ }
++}
++EXPORT_SYMBOL(swait_finish);
++
++unsigned int
++__swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num)
++{
++ struct swaiter *curr, *next;
++ int woken = 0;
++
++ list_for_each_entry_safe(curr, next, &head->list, node) {
++ if (wake_up_state(curr->task, state)) {
++ __swait_dequeue(curr);
++ /*
++ * The waiting task can free the waiter as
++ * soon as curr->task = NULL is written,
++ * without taking any locks. A memory barrier
++ * is required here to prevent the following
++ * store to curr->task from getting ahead of
++ * the dequeue operation.
++ */
++ smp_wmb();
++ curr->task = NULL;
++ if (++woken == num)
++ break;
++ }
++ }
++ return woken;
++}
++
++unsigned int
++__swait_wake(struct swait_head *head, unsigned int state, unsigned int num)
++{
++ unsigned long flags;
++ int woken;
++
++ if (!swaitqueue_active(head))
++ return 0;
++
++ raw_spin_lock_irqsave(&head->lock, flags);
++ woken = __swait_wake_locked(head, state, num);
++ raw_spin_unlock_irqrestore(&head->lock, flags);
++ return woken;
++}
++EXPORT_SYMBOL(__swait_wake);
+diff -Nur linux-3.18.8.orig/kernel/sched/work-simple.c linux-3.18.8/kernel/sched/work-simple.c
+--- linux-3.18.8.orig/kernel/sched/work-simple.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/kernel/sched/work-simple.c 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,176 @@
++/*
++ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
++ *
++ * Provides a framework for enqueuing callbacks from irq context
++ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
++ */
++
++#include <linux/wait-simple.h>
++#include <linux/work-simple.h>
++#include <linux/kthread.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++
++#define SWORK_EVENT_PENDING (1 << 0)
++
++static DEFINE_MUTEX(worker_mutex);
++static struct sworker *glob_worker;
++
++struct sworker {
++ struct list_head events;
++ struct swait_head wq;
++
++ raw_spinlock_t lock;
++
++ struct task_struct *task;
++ int refs;
++};
++
++static bool swork_readable(struct sworker *worker)
++{
++ bool r;
++
++ if (kthread_should_stop())
++ return true;
++
++ raw_spin_lock(&worker->lock);
++ r = !list_empty(&worker->events);
++ raw_spin_unlock(&worker->lock);
++
++ return r;
++}
++
++static int swork_kthread(void *arg)
++{
++ struct sworker *worker = arg;
++
++ pr_info("swork_kthread enter\n");
++
++ for (;;) {
++ swait_event_interruptible(worker->wq,
++ swork_readable(worker));
++ if (kthread_should_stop())
++ break;
++
++ raw_spin_lock(&worker->lock);
++ while (!list_empty(&worker->events)) {
++ struct swork_event *sev;
++
++ sev = list_first_entry(&worker->events,
++ struct swork_event, item);
++ list_del(&sev->item);
++ raw_spin_unlock(&worker->lock);
++
++ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
++ &sev->flags));
++ sev->func(sev);
++ raw_spin_lock(&worker->lock);
++ }
++ raw_spin_unlock(&worker->lock);
++ }
++
++ pr_info("swork_kthread exit\n");
++ return 0;
++}
++
++static struct sworker *swork_create(void)
++{
++ struct sworker *worker;
++
++ worker = kzalloc(sizeof(*worker), GFP_KERNEL);
++ if (!worker)
++ return ERR_PTR(-ENOMEM);
++
++ INIT_LIST_HEAD(&worker->events);
++ raw_spin_lock_init(&worker->lock);
++ init_swait_head(&worker->wq);
++
++ worker->task = kthread_run(swork_kthread, worker, "kswork");
++ if (IS_ERR(worker->task)) {
++ kfree(worker);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ return worker;
++}
++
++static void swork_destroy(struct sworker *worker)
++{
++ kthread_stop(worker->task);
++
++ WARN_ON(!list_empty(&worker->events));
++ kfree(worker);
++}
++
++/**
++ * swork_queue - queue swork
++ *
++ * Returns %false if @work was already on a queue, %true otherwise.
++ *
++ * The work is queued and processed on a random CPU
++ */
++bool swork_queue(struct swork_event *sev)
++{
++ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
++ return false;
++
++ WARN_ON(irqs_disabled());
++
++ raw_spin_lock(&glob_worker->lock);
++ list_add_tail(&sev->item, &glob_worker->events);
++ raw_spin_unlock(&glob_worker->lock);
++
++ swait_wake(&glob_worker->wq);
++ return true;
++}
++EXPORT_SYMBOL_GPL(swork_queue);
++
++/**
++ * swork_get - get an instance of the sworker
++ *
++ * Returns an negative error code if the initialization if the worker did not
++ * work, %0 otherwise.
++ *
++ */
++int swork_get(void)
++{
++ struct sworker *worker;
++
++ mutex_lock(&worker_mutex);
++ if (!glob_worker) {
++ worker = swork_create();
++ if (IS_ERR(worker)) {
++ mutex_unlock(&worker_mutex);
++ return -ENOMEM;
++ }
++
++ glob_worker = worker;
++ }
++
++ glob_worker->refs++;
++ mutex_unlock(&worker_mutex);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(swork_get);
++
++/**
++ * swork_put - puts an instance of the sworker
++ *
++ * Will destroy the sworker thread. This function must not be called until all
++ * queued events have been completed.
++ */
++void swork_put(void)
++{
++ mutex_lock(&worker_mutex);
++
++ glob_worker->refs--;
++ if (glob_worker->refs > 0)
++ goto out;
++
++ swork_destroy(glob_worker);
++ glob_worker = NULL;
++out:
++ mutex_unlock(&worker_mutex);
++}
++EXPORT_SYMBOL_GPL(swork_put);
+diff -Nur linux-3.18.8.orig/kernel/signal.c linux-3.18.8/kernel/signal.c
+--- linux-3.18.8.orig/kernel/signal.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/signal.c 2015-03-03 08:05:18.000000000 +0100
+@@ -14,6 +14,7 @@
+ #include <linux/export.h>
+ #include <linux/init.h>
+ #include <linux/sched.h>
++#include <linux/sched/rt.h>
+ #include <linux/fs.h>
+ #include <linux/tty.h>
+ #include <linux/binfmts.h>
+@@ -352,13 +353,45 @@
+ return false;
+ }
+
++#ifdef __HAVE_ARCH_CMPXCHG
++static inline struct sigqueue *get_task_cache(struct task_struct *t)
++{
++ struct sigqueue *q = t->sigqueue_cache;
++
++ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
++ return NULL;
++ return q;
++}
++
++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
++{
++ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
++ return 0;
++ return 1;
++}
++
++#else
++
++static inline struct sigqueue *get_task_cache(struct task_struct *t)
++{
++ return NULL;
++}
++
++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
++{
++ return 1;
++}
++
++#endif
++
+ /*
+ * allocate a new signal queue record
+ * - this may be called without locks if and only if t == current, otherwise an
+ * appropriate lock must be held to stop the target task from exiting
+ */
+ static struct sigqueue *
+-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit, int fromslab)
+ {
+ struct sigqueue *q = NULL;
+ struct user_struct *user;
+@@ -375,7 +408,10 @@
+ if (override_rlimit ||
+ atomic_read(&user->sigpending) <=
+ task_rlimit(t, RLIMIT_SIGPENDING)) {
+- q = kmem_cache_alloc(sigqueue_cachep, flags);
++ if (!fromslab)
++ q = get_task_cache(t);
++ if (!q)
++ q = kmem_cache_alloc(sigqueue_cachep, flags);
+ } else {
+ print_dropped_signal(sig);
+ }
+@@ -392,6 +428,13 @@
+ return q;
+ }
+
++static struct sigqueue *
++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit)
++{
++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
++}
++
+ static void __sigqueue_free(struct sigqueue *q)
+ {
+ if (q->flags & SIGQUEUE_PREALLOC)
+@@ -401,6 +444,21 @@
+ kmem_cache_free(sigqueue_cachep, q);
+ }
+
++static void sigqueue_free_current(struct sigqueue *q)
++{
++ struct user_struct *up;
++
++ if (q->flags & SIGQUEUE_PREALLOC)
++ return;
++
++ up = q->user;
++ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
++ atomic_dec(&up->sigpending);
++ free_uid(up);
++ } else
++ __sigqueue_free(q);
++}
++
+ void flush_sigqueue(struct sigpending *queue)
+ {
+ struct sigqueue *q;
+@@ -414,6 +472,21 @@
+ }
+
+ /*
++ * Called from __exit_signal. Flush tsk->pending and
++ * tsk->sigqueue_cache
++ */
++void flush_task_sigqueue(struct task_struct *tsk)
++{
++ struct sigqueue *q;
++
++ flush_sigqueue(&tsk->pending);
++
++ q = get_task_cache(tsk);
++ if (q)
++ kmem_cache_free(sigqueue_cachep, q);
++}
++
++/*
+ * Flush all pending signals for a task.
+ */
+ void __flush_signals(struct task_struct *t)
+@@ -565,7 +638,7 @@
+ still_pending:
+ list_del_init(&first->list);
+ copy_siginfo(info, &first->info);
+- __sigqueue_free(first);
++ sigqueue_free_current(first);
+ } else {
+ /*
+ * Ok, it wasn't in the queue. This must be
+@@ -611,6 +684,8 @@
+ {
+ int signr;
+
++ WARN_ON_ONCE(tsk != current);
++
+ /* We only dequeue private signals from ourselves, we don't let
+ * signalfd steal them
+ */
+@@ -1207,8 +1282,8 @@
+ * We don't want to have recursive SIGSEGV's etc, for example,
+ * that is why we also clear SIGNAL_UNKILLABLE.
+ */
+-int
+-force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
++static int
++do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ {
+ unsigned long int flags;
+ int ret, blocked, ignored;
+@@ -1233,6 +1308,39 @@
+ return ret;
+ }
+
++int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
++{
++/*
++ * On some archs, PREEMPT_RT has to delay sending a signal from a trap
++ * since it can not enable preemption, and the signal code's spin_locks
++ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
++ * send the signal on exit of the trap.
++ */
++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
++ if (in_atomic()) {
++ if (WARN_ON_ONCE(t != current))
++ return 0;
++ if (WARN_ON_ONCE(t->forced_info.si_signo))
++ return 0;
++
++ if (is_si_special(info)) {
++ WARN_ON_ONCE(info != SEND_SIG_PRIV);
++ t->forced_info.si_signo = sig;
++ t->forced_info.si_errno = 0;
++ t->forced_info.si_code = SI_KERNEL;
++ t->forced_info.si_pid = 0;
++ t->forced_info.si_uid = 0;
++ } else {
++ t->forced_info = *info;
++ }
++
++ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
++ return 0;
++ }
++#endif
++ return do_force_sig_info(sig, info, t);
++}
++
+ /*
+ * Nuke all other threads in the group.
+ */
+@@ -1267,12 +1375,12 @@
+ * Disable interrupts early to avoid deadlocks.
+ * See rcu_read_unlock() comment header for details.
+ */
+- local_irq_save(*flags);
++ local_irq_save_nort(*flags);
+ rcu_read_lock();
+ sighand = rcu_dereference(tsk->sighand);
+ if (unlikely(sighand == NULL)) {
+ rcu_read_unlock();
+- local_irq_restore(*flags);
++ local_irq_restore_nort(*flags);
+ break;
+ }
+
+@@ -1283,7 +1391,7 @@
+ }
+ spin_unlock(&sighand->siglock);
+ rcu_read_unlock();
+- local_irq_restore(*flags);
++ local_irq_restore_nort(*flags);
+ }
+
+ return sighand;
+@@ -1528,7 +1636,8 @@
+ */
+ struct sigqueue *sigqueue_alloc(void)
+ {
+- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
++ /* Preallocated sigqueue objects always from the slabcache ! */
++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
+
+ if (q)
+ q->flags |= SIGQUEUE_PREALLOC;
+@@ -1889,15 +1998,7 @@
+ if (gstop_done && ptrace_reparented(current))
+ do_notify_parent_cldstop(current, false, why);
+
+- /*
+- * Don't want to allow preemption here, because
+- * sys_ptrace() needs this task to be inactive.
+- *
+- * XXX: implement read_unlock_no_resched().
+- */
+- preempt_disable();
+ read_unlock(&tasklist_lock);
+- preempt_enable_no_resched();
+ freezable_schedule();
+ } else {
+ /*
+diff -Nur linux-3.18.8.orig/kernel/softirq.c linux-3.18.8/kernel/softirq.c
+--- linux-3.18.8.orig/kernel/softirq.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/softirq.c 2015-03-03 08:05:18.000000000 +0100
+@@ -21,10 +21,12 @@
+ #include <linux/freezer.h>
+ #include <linux/kthread.h>
+ #include <linux/rcupdate.h>
++#include <linux/delay.h>
+ #include <linux/ftrace.h>
+ #include <linux/smp.h>
+ #include <linux/smpboot.h>
+ #include <linux/tick.h>
++#include <linux/locallock.h>
+ #include <linux/irq.h>
+
+ #define CREATE_TRACE_POINTS
+@@ -62,6 +64,98 @@
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
+ };
+
++#ifdef CONFIG_NO_HZ_COMMON
++# ifdef CONFIG_PREEMPT_RT_FULL
++
++struct softirq_runner {
++ struct task_struct *runner[NR_SOFTIRQS];
++};
++
++static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
++
++static inline void softirq_set_runner(unsigned int sirq)
++{
++ struct softirq_runner *sr = &__get_cpu_var(softirq_runners);
++
++ sr->runner[sirq] = current;
++}
++
++static inline void softirq_clr_runner(unsigned int sirq)
++{
++ struct softirq_runner *sr = &__get_cpu_var(softirq_runners);
++
++ sr->runner[sirq] = NULL;
++}
++
++/*
++ * On preempt-rt a softirq running context might be blocked on a
++ * lock. There might be no other runnable task on this CPU because the
++ * lock owner runs on some other CPU. So we have to go into idle with
++ * the pending bit set. Therefor we need to check this otherwise we
++ * warn about false positives which confuses users and defeats the
++ * whole purpose of this test.
++ *
++ * This code is called with interrupts disabled.
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++ struct softirq_runner *sr = &__get_cpu_var(softirq_runners);
++ u32 warnpending;
++ int i;
++
++ if (rate_limit >= 10)
++ return;
++
++ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
++ for (i = 0; i < NR_SOFTIRQS; i++) {
++ struct task_struct *tsk = sr->runner[i];
++
++ /*
++ * The wakeup code in rtmutex.c wakes up the task
++ * _before_ it sets pi_blocked_on to NULL under
++ * tsk->pi_lock. So we need to check for both: state
++ * and pi_blocked_on.
++ */
++ if (tsk) {
++ raw_spin_lock(&tsk->pi_lock);
++ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
++ /* Clear all bits pending in that task */
++ warnpending &= ~(tsk->softirqs_raised);
++ warnpending &= ~(1 << i);
++ }
++ raw_spin_unlock(&tsk->pi_lock);
++ }
++ }
++
++ if (warnpending) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ warnpending);
++ rate_limit++;
++ }
++}
++# else
++/*
++ * On !PREEMPT_RT we just printk rate limited:
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++
++ if (rate_limit < 10 &&
++ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ local_softirq_pending());
++ rate_limit++;
++ }
++}
++# endif
++
++#else /* !CONFIG_NO_HZ_COMMON */
++static inline void softirq_set_runner(unsigned int sirq) { }
++static inline void softirq_clr_runner(unsigned int sirq) { }
++#endif
++
+ /*
+ * we cannot loop indefinitely here to avoid userspace starvation,
+ * but we also don't want to introduce a worst case 1/HZ latency
+@@ -77,6 +171,70 @@
+ wake_up_process(tsk);
+ }
+
++static void handle_softirq(unsigned int vec_nr)
++{
++ struct softirq_action *h = softirq_vec + vec_nr;
++ int prev_count;
++
++ prev_count = preempt_count();
++
++ kstat_incr_softirqs_this_cpu(vec_nr);
++
++ trace_softirq_entry(vec_nr);
++ h->action(h);
++ trace_softirq_exit(vec_nr);
++ if (unlikely(prev_count != preempt_count())) {
++ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
++ vec_nr, softirq_to_name[vec_nr], h->action,
++ prev_count, preempt_count());
++ preempt_count_set(prev_count);
++ }
++}
++
++#ifndef CONFIG_PREEMPT_RT_FULL
++static inline int ksoftirqd_softirq_pending(void)
++{
++ return local_softirq_pending();
++}
++
++static void handle_pending_softirqs(u32 pending, int need_rcu_bh_qs)
++{
++ struct softirq_action *h = softirq_vec;
++ int softirq_bit;
++
++ local_irq_enable();
++
++ h = softirq_vec;
++
++ while ((softirq_bit = ffs(pending))) {
++ unsigned int vec_nr;
++
++ h += softirq_bit - 1;
++ vec_nr = h - softirq_vec;
++ handle_softirq(vec_nr);
++
++ h++;
++ pending >>= softirq_bit;
++ }
++
++ if (need_rcu_bh_qs)
++ rcu_bh_qs();
++ local_irq_disable();
++}
++
++static void run_ksoftirqd(unsigned int cpu)
++{
++ local_irq_disable();
++ if (ksoftirqd_softirq_pending()) {
++ __do_softirq();
++ rcu_note_context_switch(cpu);
++ local_irq_enable();
++ cond_resched();
++ return;
++ }
++ local_irq_enable();
++}
++
+ /*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+@@ -228,10 +386,8 @@
+ unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
+ unsigned long old_flags = current->flags;
+ int max_restart = MAX_SOFTIRQ_RESTART;
+- struct softirq_action *h;
+ bool in_hardirq;
+ __u32 pending;
+- int softirq_bit;
+
+ /*
+ * Mask out PF_MEMALLOC s current task context is borrowed for the
+@@ -250,36 +406,7 @@
+ /* Reset the pending bitmask before enabling irqs */
+ set_softirq_pending(0);
+
+- local_irq_enable();
+-
+- h = softirq_vec;
+-
+- while ((softirq_bit = ffs(pending))) {
+- unsigned int vec_nr;
+- int prev_count;
+-
+- h += softirq_bit - 1;
+-
+- vec_nr = h - softirq_vec;
+- prev_count = preempt_count();
+-
+- kstat_incr_softirqs_this_cpu(vec_nr);
+-
+- trace_softirq_entry(vec_nr);
+- h->action(h);
+- trace_softirq_exit(vec_nr);
+- if (unlikely(prev_count != preempt_count())) {
+- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
+- vec_nr, softirq_to_name[vec_nr], h->action,
+- prev_count, preempt_count());
+- preempt_count_set(prev_count);
+- }
+- h++;
+- pending >>= softirq_bit;
+- }
+-
+- rcu_bh_qs();
+- local_irq_disable();
++ handle_pending_softirqs(pending, 1);
+
+ pending = local_softirq_pending();
+ if (pending) {
+@@ -316,6 +443,285 @@
+ }
+
+ /*
++ * This function must run with irqs disabled!
++ */
++void raise_softirq_irqoff(unsigned int nr)
++{
++ __raise_softirq_irqoff(nr);
++
++ /*
++ * If we're in an interrupt or softirq, we're done
++ * (this also catches softirq-disabled code). We will
++ * actually run the softirq once we return from
++ * the irq or softirq.
++ *
++ * Otherwise we wake up ksoftirqd to make sure we
++ * schedule the softirq soon.
++ */
++ if (!in_interrupt())
++ wakeup_softirqd();
++}
++
++void __raise_softirq_irqoff(unsigned int nr)
++{
++ trace_softirq_raise(nr);
++ or_softirq_pending(1UL << nr);
++}
++
++static inline void local_bh_disable_nort(void) { local_bh_disable(); }
++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
++static void ksoftirqd_set_sched_params(unsigned int cpu) { }
++static void ksoftirqd_clr_sched_params(unsigned int cpu, bool online) { }
++
++#else /* !PREEMPT_RT_FULL */
++
++/*
++ * On RT we serialize softirq execution with a cpu local lock per softirq
++ */
++static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
++
++void __init softirq_early_init(void)
++{
++ int i;
++
++ for (i = 0; i < NR_SOFTIRQS; i++)
++ local_irq_lock_init(local_softirq_locks[i]);
++}
++
++static void lock_softirq(int which)
++{
++ local_lock(local_softirq_locks[which]);
++}
++
++static void unlock_softirq(int which)
++{
++ local_unlock(local_softirq_locks[which]);
++}
++
++static void do_single_softirq(int which, int need_rcu_bh_qs)
++{
++ unsigned long old_flags = current->flags;
++
++ current->flags &= ~PF_MEMALLOC;
++ vtime_account_irq_enter(current);
++ current->flags |= PF_IN_SOFTIRQ;
++ lockdep_softirq_enter();
++ local_irq_enable();
++ handle_softirq(which);
++ local_irq_disable();
++ lockdep_softirq_exit();
++ current->flags &= ~PF_IN_SOFTIRQ;
++ vtime_account_irq_enter(current);
++ tsk_restore_flags(current, old_flags, PF_MEMALLOC);
++}
++
++/*
++ * Called with interrupts disabled. Process softirqs which were raised
++ * in current context (or on behalf of ksoftirqd).
++ */
++static void do_current_softirqs(int need_rcu_bh_qs)
++{
++ while (current->softirqs_raised) {
++ int i = __ffs(current->softirqs_raised);
++ unsigned int pending, mask = (1U << i);
++
++ current->softirqs_raised &= ~mask;
++ local_irq_enable();
++
++ /*
++ * If the lock is contended, we boost the owner to
++ * process the softirq or leave the critical section
++ * now.
++ */
++ lock_softirq(i);
++ local_irq_disable();
++ softirq_set_runner(i);
++ /*
++ * Check with the local_softirq_pending() bits,
++ * whether we need to process this still or if someone
++ * else took care of it.
++ */
++ pending = local_softirq_pending();
++ if (pending & mask) {
++ set_softirq_pending(pending & ~mask);
++ do_single_softirq(i, need_rcu_bh_qs);
++ }
++ softirq_clr_runner(i);
++ unlock_softirq(i);
++ WARN_ON(current->softirq_nestcnt != 1);
++ }
++}
++
++static void __local_bh_disable(void)
++{
++ if (++current->softirq_nestcnt == 1)
++ migrate_disable();
++}
++
++void local_bh_disable(void)
++{
++ __local_bh_disable();
++}
++EXPORT_SYMBOL(local_bh_disable);
++
++void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
++{
++ __local_bh_disable();
++ if (cnt & PREEMPT_CHECK_OFFSET)
++ preempt_disable();
++}
++
++static void __local_bh_enable(void)
++{
++ if (WARN_ON(current->softirq_nestcnt == 0))
++ return;
++
++ local_irq_disable();
++ if (current->softirq_nestcnt == 1 && current->softirqs_raised)
++ do_current_softirqs(1);
++ local_irq_enable();
++
++ if (--current->softirq_nestcnt == 0)
++ migrate_enable();
++}
++
++void local_bh_enable(void)
++{
++ __local_bh_enable();
++}
++EXPORT_SYMBOL(local_bh_enable);
++
++extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
++{
++ __local_bh_enable();
++ if (cnt & PREEMPT_CHECK_OFFSET)
++ preempt_enable();
++}
++
++void local_bh_enable_ip(unsigned long ip)
++{
++ local_bh_enable();
++}
++EXPORT_SYMBOL(local_bh_enable_ip);
++
++void _local_bh_enable(void)
++{
++ if (WARN_ON(current->softirq_nestcnt == 0))
++ return;
++ if (--current->softirq_nestcnt == 0)
++ migrate_enable();
++}
++EXPORT_SYMBOL(_local_bh_enable);
++
++int in_serving_softirq(void)
++{
++ return current->flags & PF_IN_SOFTIRQ;
++}
++EXPORT_SYMBOL(in_serving_softirq);
++
++/* Called with preemption disabled */
++static void run_ksoftirqd(unsigned int cpu)
++{
++ local_irq_disable();
++ current->softirq_nestcnt++;
++
++ do_current_softirqs(1);
++ current->softirq_nestcnt--;
++ rcu_note_context_switch(cpu);
++ local_irq_enable();
++}
++
++/*
++ * Called from netif_rx_ni(). Preemption enabled, but migration
++ * disabled. So the cpu can't go away under us.
++ */
++void thread_do_softirq(void)
++{
++ if (!in_serving_softirq() && current->softirqs_raised) {
++ current->softirq_nestcnt++;
++ do_current_softirqs(0);
++ current->softirq_nestcnt--;
++ }
++}
++
++static void do_raise_softirq_irqoff(unsigned int nr)
++{
++ trace_softirq_raise(nr);
++ or_softirq_pending(1UL << nr);
++
++ /*
++ * If we are not in a hard interrupt and inside a bh disabled
++ * region, we simply raise the flag on current. local_bh_enable()
++ * will make sure that the softirq is executed. Otherwise we
++ * delegate it to ksoftirqd.
++ */
++ if (!in_irq() && current->softirq_nestcnt)
++ current->softirqs_raised |= (1U << nr);
++ else if (__this_cpu_read(ksoftirqd))
++ __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
++}
++
++void __raise_softirq_irqoff(unsigned int nr)
++{
++ do_raise_softirq_irqoff(nr);
++ if (!in_irq() && !current->softirq_nestcnt)
++ wakeup_softirqd();
++}
++
++/*
++ * This function must run with irqs disabled!
++ */
++void raise_softirq_irqoff(unsigned int nr)
++{
++ do_raise_softirq_irqoff(nr);
++
++ /*
++ * If we're in an hard interrupt we let irq return code deal
++ * with the wakeup of ksoftirqd.
++ */
++ if (in_irq())
++ return;
++ /*
++ * If we are in thread context but outside of a bh disabled
++ * region, we need to wake ksoftirqd as well.
++ *
++ * CHECKME: Some of the places which do that could be wrapped
++ * into local_bh_disable/enable pairs. Though it's unclear
++ * whether this is worth the effort. To find those places just
++ * raise a WARN() if the condition is met.
++ */
++ if (!current->softirq_nestcnt)
++ wakeup_softirqd();
++}
++
++static inline int ksoftirqd_softirq_pending(void)
++{
++ return current->softirqs_raised;
++}
++
++static inline void local_bh_disable_nort(void) { }
++static inline void _local_bh_enable_nort(void) { }
++
++static inline void ksoftirqd_set_sched_params(unsigned int cpu)
++{
++ struct sched_param param = { .sched_priority = 1 };
++
++ sched_setscheduler(current, SCHED_FIFO, &param);
++ /* Take over all pending softirqs when starting */
++ local_irq_disable();
++ current->softirqs_raised = local_softirq_pending();
++ local_irq_enable();
++}
++
++static inline void ksoftirqd_clr_sched_params(unsigned int cpu, bool online)
++{
++ struct sched_param param = { .sched_priority = 0 };
++
++ sched_setscheduler(current, SCHED_NORMAL, &param);
++}
++
++#endif /* PREEMPT_RT_FULL */
++/*
+ * Enter an interrupt context.
+ */
+ void irq_enter(void)
+@@ -326,9 +732,9 @@
+ * Prevent raise_softirq from needlessly waking up ksoftirqd
+ * here, as softirq will be serviced on return from interrupt.
+ */
+- local_bh_disable();
++ local_bh_disable_nort();
+ tick_irq_enter();
+- _local_bh_enable();
++ _local_bh_enable_nort();
+ }
+
+ __irq_enter();
+@@ -336,6 +742,7 @@
+
+ static inline void invoke_softirq(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (!force_irqthreads) {
+ #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
+ /*
+@@ -355,6 +762,15 @@
+ } else {
+ wakeup_softirqd();
+ }
++#else /* PREEMPT_RT_FULL */
++ unsigned long flags;
++
++ local_irq_save(flags);
++ if (__this_cpu_read(ksoftirqd) &&
++ __this_cpu_read(ksoftirqd)->softirqs_raised)
++ wakeup_softirqd();
++ local_irq_restore(flags);
++#endif
+ }
+
+ static inline void tick_irq_exit(void)
+@@ -391,26 +807,6 @@
+ trace_hardirq_exit(); /* must be last! */
+ }
+
+-/*
+- * This function must run with irqs disabled!
+- */
+-inline void raise_softirq_irqoff(unsigned int nr)
+-{
+- __raise_softirq_irqoff(nr);
+-
+- /*
+- * If we're in an interrupt or softirq, we're done
+- * (this also catches softirq-disabled code). We will
+- * actually run the softirq once we return from
+- * the irq or softirq.
+- *
+- * Otherwise we wake up ksoftirqd to make sure we
+- * schedule the softirq soon.
+- */
+- if (!in_interrupt())
+- wakeup_softirqd();
+-}
+-
+ void raise_softirq(unsigned int nr)
+ {
+ unsigned long flags;
+@@ -420,12 +816,6 @@
+ local_irq_restore(flags);
+ }
+
+-void __raise_softirq_irqoff(unsigned int nr)
+-{
+- trace_softirq_raise(nr);
+- or_softirq_pending(1UL << nr);
+-}
+-
+ void open_softirq(int nr, void (*action)(struct softirq_action *))
+ {
+ softirq_vec[nr].action = action;
+@@ -442,15 +832,45 @@
+ static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
+ static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
+
++static void inline
++__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
++{
++ if (tasklet_trylock(t)) {
++again:
++ /* We may have been preempted before tasklet_trylock
++ * and __tasklet_action may have already run.
++ * So double check the sched bit while the takslet
++ * is locked before adding it to the list.
++ */
++ if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
++ t->next = NULL;
++ *head->tail = t;
++ head->tail = &(t->next);
++ raise_softirq_irqoff(nr);
++ tasklet_unlock(t);
++ } else {
++ /* This is subtle. If we hit the corner case above
++ * It is possible that we get preempted right here,
++ * and another task has successfully called
++ * tasklet_schedule(), then this function, and
++ * failed on the trylock. Thus we must be sure
++ * before releasing the tasklet lock, that the
++ * SCHED_BIT is clear. Otherwise the tasklet
++ * may get its SCHED_BIT set, but not added to the
++ * list
++ */
++ if (!tasklet_tryunlock(t))
++ goto again;
++ }
++ }
++}
++
+ void __tasklet_schedule(struct tasklet_struct *t)
+ {
+ unsigned long flags;
+
+ local_irq_save(flags);
+- t->next = NULL;
+- *__this_cpu_read(tasklet_vec.tail) = t;
+- __this_cpu_write(tasklet_vec.tail, &(t->next));
+- raise_softirq_irqoff(TASKLET_SOFTIRQ);
++ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(__tasklet_schedule);
+@@ -460,10 +880,7 @@
+ unsigned long flags;
+
+ local_irq_save(flags);
+- t->next = NULL;
+- *__this_cpu_read(tasklet_hi_vec.tail) = t;
+- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
+- raise_softirq_irqoff(HI_SOFTIRQ);
++ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL(__tasklet_hi_schedule);
+@@ -472,48 +889,116 @@
+ {
+ BUG_ON(!irqs_disabled());
+
+- t->next = __this_cpu_read(tasklet_hi_vec.head);
+- __this_cpu_write(tasklet_hi_vec.head, t);
+- __raise_softirq_irqoff(HI_SOFTIRQ);
++ __tasklet_hi_schedule(t);
+ }
+ EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
+-static void tasklet_action(struct softirq_action *a)
++void tasklet_enable(struct tasklet_struct *t)
+ {
+- struct tasklet_struct *list;
++ if (!atomic_dec_and_test(&t->count))
++ return;
++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
++ tasklet_schedule(t);
++}
++EXPORT_SYMBOL(tasklet_enable);
+
+- local_irq_disable();
+- list = __this_cpu_read(tasklet_vec.head);
+- __this_cpu_write(tasklet_vec.head, NULL);
+- __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
+- local_irq_enable();
++void tasklet_hi_enable(struct tasklet_struct *t)
++{
++ if (!atomic_dec_and_test(&t->count))
++ return;
++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
++ tasklet_hi_schedule(t);
++}
++EXPORT_SYMBOL(tasklet_hi_enable);
++
++static void __tasklet_action(struct softirq_action *a,
++ struct tasklet_struct *list)
++{
++ int loops = 1000000;
+
+ while (list) {
+ struct tasklet_struct *t = list;
+
+ list = list->next;
+
+- if (tasklet_trylock(t)) {
+- if (!atomic_read(&t->count)) {
+- if (!test_and_clear_bit(TASKLET_STATE_SCHED,
+- &t->state))
+- BUG();
+- t->func(t->data);
+- tasklet_unlock(t);
+- continue;
+- }
+- tasklet_unlock(t);
++ /*
++ * Should always succeed - after a tasklist got on the
++ * list (after getting the SCHED bit set from 0 to 1),
++ * nothing but the tasklet softirq it got queued to can
++ * lock it:
++ */
++ if (!tasklet_trylock(t)) {
++ WARN_ON(1);
++ continue;
+ }
+
+- local_irq_disable();
+ t->next = NULL;
+- *__this_cpu_read(tasklet_vec.tail) = t;
+- __this_cpu_write(tasklet_vec.tail, &(t->next));
+- __raise_softirq_irqoff(TASKLET_SOFTIRQ);
+- local_irq_enable();
++
++ /*
++ * If we cannot handle the tasklet because it's disabled,
++ * mark it as pending. tasklet_enable() will later
++ * re-schedule the tasklet.
++ */
++ if (unlikely(atomic_read(&t->count))) {
++out_disabled:
++ /* implicit unlock: */
++ wmb();
++ t->state = TASKLET_STATEF_PENDING;
++ continue;
++ }
++
++ /*
++ * After this point on the tasklet might be rescheduled
++ * on another CPU, but it can only be added to another
++ * CPU's tasklet list if we unlock the tasklet (which we
++ * dont do yet).
++ */
++ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
++ WARN_ON(1);
++
++again:
++ t->func(t->data);
++
++ /*
++ * Try to unlock the tasklet. We must use cmpxchg, because
++ * another CPU might have scheduled or disabled the tasklet.
++ * We only allow the STATE_RUN -> 0 transition here.
++ */
++ while (!tasklet_tryunlock(t)) {
++ /*
++ * If it got disabled meanwhile, bail out:
++ */
++ if (atomic_read(&t->count))
++ goto out_disabled;
++ /*
++ * If it got scheduled meanwhile, re-execute
++ * the tasklet function:
++ */
++ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
++ goto again;
++ if (!--loops) {
++ printk("hm, tasklet state: %08lx\n", t->state);
++ WARN_ON(1);
++ tasklet_unlock(t);
++ break;
++ }
++ }
+ }
+ }
+
++static void tasklet_action(struct softirq_action *a)
++{
++ struct tasklet_struct *list;
++
++ local_irq_disable();
++ list = __get_cpu_var(tasklet_vec).head;
++ __get_cpu_var(tasklet_vec).head = NULL;
++ __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
++ local_irq_enable();
++
++ __tasklet_action(a, list);
++}
++
+ static void tasklet_hi_action(struct softirq_action *a)
+ {
+ struct tasklet_struct *list;
+@@ -524,30 +1009,7 @@
+ __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
+ local_irq_enable();
+
+- while (list) {
+- struct tasklet_struct *t = list;
+-
+- list = list->next;
+-
+- if (tasklet_trylock(t)) {
+- if (!atomic_read(&t->count)) {
+- if (!test_and_clear_bit(TASKLET_STATE_SCHED,
+- &t->state))
+- BUG();
+- t->func(t->data);
+- tasklet_unlock(t);
+- continue;
+- }
+- tasklet_unlock(t);
+- }
+-
+- local_irq_disable();
+- t->next = NULL;
+- *__this_cpu_read(tasklet_hi_vec.tail) = t;
+- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
+- __raise_softirq_irqoff(HI_SOFTIRQ);
+- local_irq_enable();
+- }
++ __tasklet_action(a, list);
+ }
+
+ void tasklet_init(struct tasklet_struct *t,
+@@ -568,7 +1030,7 @@
+
+ while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+ do {
+- yield();
++ msleep(1);
+ } while (test_bit(TASKLET_STATE_SCHED, &t->state));
+ }
+ tasklet_unlock_wait(t);
+@@ -642,26 +1104,26 @@
+ open_softirq(HI_SOFTIRQ, tasklet_hi_action);
+ }
+
+-static int ksoftirqd_should_run(unsigned int cpu)
+-{
+- return local_softirq_pending();
+-}
+-
+-static void run_ksoftirqd(unsigned int cpu)
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
++void tasklet_unlock_wait(struct tasklet_struct *t)
+ {
+- local_irq_disable();
+- if (local_softirq_pending()) {
++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
+ /*
+- * We can safely run softirq on inline stack, as we are not deep
+- * in the task stack here.
++ * Hack for now to avoid this busy-loop:
+ */
+- __do_softirq();
+- rcu_note_context_switch(cpu);
+- local_irq_enable();
+- cond_resched();
+- return;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ msleep(1);
++#else
++ barrier();
++#endif
+ }
+- local_irq_enable();
++}
++EXPORT_SYMBOL(tasklet_unlock_wait);
++#endif
++
++static int ksoftirqd_should_run(unsigned int cpu)
++{
++ return ksoftirqd_softirq_pending();
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -743,6 +1205,8 @@
+
+ static struct smp_hotplug_thread softirq_threads = {
+ .store = &ksoftirqd,
++ .setup = ksoftirqd_set_sched_params,
++ .cleanup = ksoftirqd_clr_sched_params,
+ .thread_should_run = ksoftirqd_should_run,
+ .thread_fn = run_ksoftirqd,
+ .thread_comm = "ksoftirqd/%u",
+diff -Nur linux-3.18.8.orig/kernel/stop_machine.c linux-3.18.8/kernel/stop_machine.c
+--- linux-3.18.8.orig/kernel/stop_machine.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/stop_machine.c 2015-03-03 08:05:18.000000000 +0100
+@@ -30,12 +30,12 @@
+ atomic_t nr_todo; /* nr left to execute */
+ bool executed; /* actually executed? */
+ int ret; /* collected return value */
+- struct completion completion; /* fired if nr_todo reaches 0 */
++ struct task_struct *waiter; /* woken when nr_todo reaches 0 */
+ };
+
+ /* the actual stopper, one per every possible cpu, enabled on online cpus */
+ struct cpu_stopper {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ bool enabled; /* is this stopper enabled? */
+ struct list_head works; /* list of pending works */
+ };
+@@ -56,7 +56,7 @@
+ {
+ memset(done, 0, sizeof(*done));
+ atomic_set(&done->nr_todo, nr_todo);
+- init_completion(&done->completion);
++ done->waiter = current;
+ }
+
+ /* signal completion unless @done is NULL */
+@@ -65,8 +65,10 @@
+ if (done) {
+ if (executed)
+ done->executed = true;
+- if (atomic_dec_and_test(&done->nr_todo))
+- complete(&done->completion);
++ if (atomic_dec_and_test(&done->nr_todo)) {
++ wake_up_process(done->waiter);
++ done->waiter = NULL;
++ }
+ }
+ }
+
+@@ -78,7 +80,7 @@
+
+ unsigned long flags;
+
+- spin_lock_irqsave(&stopper->lock, flags);
++ raw_spin_lock_irqsave(&stopper->lock, flags);
+
+ if (stopper->enabled) {
+ list_add_tail(&work->list, &stopper->works);
+@@ -86,7 +88,23 @@
+ } else
+ cpu_stop_signal_done(work->done, false);
+
+- spin_unlock_irqrestore(&stopper->lock, flags);
++ raw_spin_unlock_irqrestore(&stopper->lock, flags);
++}
++
++static void wait_for_stop_done(struct cpu_stop_done *done)
++{
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (atomic_read(&done->nr_todo)) {
++ schedule();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++ /*
++ * We need to wait until cpu_stop_signal_done() has cleared
++ * done->waiter.
++ */
++ while (done->waiter)
++ cpu_relax();
++ set_current_state(TASK_RUNNING);
+ }
+
+ /**
+@@ -120,7 +138,7 @@
+
+ cpu_stop_init_done(&done, 1);
+ cpu_stop_queue_work(cpu, &work);
+- wait_for_completion(&done.completion);
++ wait_for_stop_done(&done);
+ return done.executed ? done.ret : -ENOENT;
+ }
+
+@@ -248,7 +266,7 @@
+ struct irq_cpu_stop_queue_work_info call_args;
+ struct multi_stop_data msdata;
+
+- preempt_disable();
++ preempt_disable_nort();
+ msdata = (struct multi_stop_data){
+ .fn = fn,
+ .data = arg,
+@@ -281,7 +299,7 @@
+ * This relies on the stopper workqueues to be FIFO.
+ */
+ if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
+- preempt_enable();
++ preempt_enable_nort();
+ return -ENOENT;
+ }
+
+@@ -295,9 +313,9 @@
+ &irq_cpu_stop_queue_work,
+ &call_args, 1);
+ lg_local_unlock(&stop_cpus_lock);
+- preempt_enable();
++ preempt_enable_nort();
+
+- wait_for_completion(&done.completion);
++ wait_for_stop_done(&done);
+
+ return done.executed ? done.ret : -ENOENT;
+ }
+@@ -329,7 +347,7 @@
+
+ static void queue_stop_cpus_work(const struct cpumask *cpumask,
+ cpu_stop_fn_t fn, void *arg,
+- struct cpu_stop_done *done)
++ struct cpu_stop_done *done, bool inactive)
+ {
+ struct cpu_stop_work *work;
+ unsigned int cpu;
+@@ -343,11 +361,13 @@
+ }
+
+ /*
+- * Disable preemption while queueing to avoid getting
+- * preempted by a stopper which might wait for other stoppers
+- * to enter @fn which can lead to deadlock.
++ * Make sure that all work is queued on all cpus before
++ * any of the cpus can execute it.
+ */
+- lg_global_lock(&stop_cpus_lock);
++ if (!inactive)
++ lg_global_lock(&stop_cpus_lock);
++ else
++ lg_global_trylock_relax(&stop_cpus_lock);
+ for_each_cpu(cpu, cpumask)
+ cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
+ lg_global_unlock(&stop_cpus_lock);
+@@ -359,8 +379,8 @@
+ struct cpu_stop_done done;
+
+ cpu_stop_init_done(&done, cpumask_weight(cpumask));
+- queue_stop_cpus_work(cpumask, fn, arg, &done);
+- wait_for_completion(&done.completion);
++ queue_stop_cpus_work(cpumask, fn, arg, &done, false);
++ wait_for_stop_done(&done);
+ return done.executed ? done.ret : -ENOENT;
+ }
+
+@@ -439,9 +459,9 @@
+ unsigned long flags;
+ int run;
+
+- spin_lock_irqsave(&stopper->lock, flags);
++ raw_spin_lock_irqsave(&stopper->lock, flags);
+ run = !list_empty(&stopper->works);
+- spin_unlock_irqrestore(&stopper->lock, flags);
++ raw_spin_unlock_irqrestore(&stopper->lock, flags);
+ return run;
+ }
+
+@@ -453,13 +473,13 @@
+
+ repeat:
+ work = NULL;
+- spin_lock_irq(&stopper->lock);
++ raw_spin_lock_irq(&stopper->lock);
+ if (!list_empty(&stopper->works)) {
+ work = list_first_entry(&stopper->works,
+ struct cpu_stop_work, list);
+ list_del_init(&work->list);
+ }
+- spin_unlock_irq(&stopper->lock);
++ raw_spin_unlock_irq(&stopper->lock);
+
+ if (work) {
+ cpu_stop_fn_t fn = work->fn;
+@@ -467,6 +487,16 @@
+ struct cpu_stop_done *done = work->done;
+ char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
+
++ /*
++ * Wait until the stopper finished scheduling on all
++ * cpus
++ */
++ lg_global_lock(&stop_cpus_lock);
++ /*
++ * Let other cpu threads continue as well
++ */
++ lg_global_unlock(&stop_cpus_lock);
++
+ /* cpu stop callbacks are not allowed to sleep */
+ preempt_disable();
+
+@@ -481,7 +511,13 @@
+ kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
+ ksym_buf), arg);
+
++ /*
++ * Make sure that the wakeup and setting done->waiter
++ * to NULL is atomic.
++ */
++ local_irq_disable();
+ cpu_stop_signal_done(done, true);
++ local_irq_enable();
+ goto repeat;
+ }
+ }
+@@ -500,20 +536,20 @@
+ unsigned long flags;
+
+ /* drain remaining works */
+- spin_lock_irqsave(&stopper->lock, flags);
++ raw_spin_lock_irqsave(&stopper->lock, flags);
+ list_for_each_entry(work, &stopper->works, list)
+ cpu_stop_signal_done(work->done, false);
+ stopper->enabled = false;
+- spin_unlock_irqrestore(&stopper->lock, flags);
++ raw_spin_unlock_irqrestore(&stopper->lock, flags);
+ }
+
+ static void cpu_stop_unpark(unsigned int cpu)
+ {
+ struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+
+- spin_lock_irq(&stopper->lock);
++ raw_spin_lock_irq(&stopper->lock);
+ stopper->enabled = true;
+- spin_unlock_irq(&stopper->lock);
++ raw_spin_unlock_irq(&stopper->lock);
+ }
+
+ static struct smp_hotplug_thread cpu_stop_threads = {
+@@ -535,10 +571,12 @@
+ for_each_possible_cpu(cpu) {
+ struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+
+- spin_lock_init(&stopper->lock);
++ raw_spin_lock_init(&stopper->lock);
+ INIT_LIST_HEAD(&stopper->works);
+ }
+
++ lg_lock_init(&stop_cpus_lock, "stop_cpus_lock");
++
+ BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
+ stop_machine_initialized = true;
+ return 0;
+@@ -634,11 +672,11 @@
+ set_state(&msdata, MULTI_STOP_PREPARE);
+ cpu_stop_init_done(&done, num_active_cpus());
+ queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
+- &done);
++ &done, true);
+ ret = multi_cpu_stop(&msdata);
+
+ /* Busy wait for completion. */
+- while (!completion_done(&done.completion))
++ while (atomic_read(&done.nr_todo))
+ cpu_relax();
+
+ mutex_unlock(&stop_cpus_mutex);
+diff -Nur linux-3.18.8.orig/kernel/time/hrtimer.c linux-3.18.8/kernel/time/hrtimer.c
+--- linux-3.18.8.orig/kernel/time/hrtimer.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/time/hrtimer.c 2015-03-03 08:05:18.000000000 +0100
+@@ -48,11 +48,13 @@
+ #include <linux/sched/rt.h>
+ #include <linux/sched/deadline.h>
+ #include <linux/timer.h>
++#include <linux/kthread.h>
+ #include <linux/freezer.h>
+
+ #include <asm/uaccess.h>
+
+ #include <trace/events/timer.h>
++#include <trace/events/hist.h>
+
+ #include "timekeeping.h"
+
+@@ -568,8 +570,7 @@
+ * When the callback is running, we do not reprogram the clock event
+ * device. The timer callback is either running on a different CPU or
+ * the callback is executed in the hrtimer_interrupt context. The
+- * reprogramming is handled either by the softirq, which called the
+- * callback or at the end of the hrtimer_interrupt.
++ * reprogramming is handled at the end of the hrtimer_interrupt.
+ */
+ if (hrtimer_callback_running(timer))
+ return 0;
+@@ -604,6 +605,9 @@
+ return res;
+ }
+
++static void __run_hrtimer(struct hrtimer *timer, ktime_t *now);
++static int hrtimer_rt_defer(struct hrtimer *timer);
++
+ /*
+ * Initialize the high resolution related parts of cpu_base
+ */
+@@ -613,6 +617,21 @@
+ base->hres_active = 0;
+ }
+
++static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
++ struct hrtimer_clock_base *base,
++ int wakeup)
++{
++ if (!hrtimer_reprogram(timer, base))
++ return 0;
++ if (!wakeup)
++ return -ETIME;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ if (!hrtimer_rt_defer(timer))
++ return -ETIME;
++#endif
++ return 1;
++}
++
+ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+ {
+ ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+@@ -678,6 +697,44 @@
+
+ static DECLARE_WORK(hrtimer_work, clock_was_set_work);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * RT can not call schedule_work from real interrupt context.
++ * Need to make a thread to do the real work.
++ */
++static struct task_struct *clock_set_delay_thread;
++static bool do_clock_set_delay;
++
++static int run_clock_set_delay(void *ignore)
++{
++ while (!kthread_should_stop()) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (do_clock_set_delay) {
++ do_clock_set_delay = false;
++ schedule_work(&hrtimer_work);
++ }
++ schedule();
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
++}
++
++void clock_was_set_delayed(void)
++{
++ do_clock_set_delay = true;
++ /* Make visible before waking up process */
++ smp_wmb();
++ wake_up_process(clock_set_delay_thread);
++}
++
++static __init int create_clock_set_delay_thread(void)
++{
++ clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd");
++ BUG_ON(!clock_set_delay_thread);
++ return 0;
++}
++early_initcall(create_clock_set_delay_thread);
++#else /* PREEMPT_RT_FULL */
+ /*
+ * Called from timekeeping and resume code to reprogramm the hrtimer
+ * interrupt device on all cpus.
+@@ -686,6 +743,7 @@
+ {
+ schedule_work(&hrtimer_work);
+ }
++#endif
+
+ #else
+
+@@ -694,6 +752,13 @@
+ static inline int hrtimer_switch_to_hres(void) { return 0; }
+ static inline void
+ hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
++static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
++ struct hrtimer_clock_base *base,
++ int wakeup)
++{
++ return 0;
++}
++
+ static inline int hrtimer_reprogram(struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
+ {
+@@ -701,7 +766,6 @@
+ }
+ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
+ static inline void retrigger_next_event(void *arg) { }
+-
+ #endif /* CONFIG_HIGH_RES_TIMERS */
+
+ /*
+@@ -819,6 +883,32 @@
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_forward);
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
++
++/**
++ * hrtimer_wait_for_timer - Wait for a running timer
++ *
++ * @timer: timer to wait for
++ *
++ * The function waits in case the timers callback function is
++ * currently executed on the waitqueue of the timer base. The
++ * waitqueue is woken up after the timer callback function has
++ * finished execution.
++ */
++void hrtimer_wait_for_timer(const struct hrtimer *timer)
++{
++ struct hrtimer_clock_base *base = timer->base;
++
++ if (base && base->cpu_base && !timer->irqsafe)
++ wait_event(base->cpu_base->wait,
++ !(timer->state & HRTIMER_STATE_CALLBACK));
++}
++
++#else
++# define wake_up_timer_waiters(b) do { } while (0)
++#endif
++
+ /*
+ * enqueue_hrtimer - internal function to (re)start a timer
+ *
+@@ -862,6 +952,11 @@
+ if (!(timer->state & HRTIMER_STATE_ENQUEUED))
+ goto out;
+
++ if (unlikely(!list_empty(&timer->cb_entry))) {
++ list_del_init(&timer->cb_entry);
++ goto out;
++ }
++
+ next_timer = timerqueue_getnext(&base->active);
+ timerqueue_del(&base->active, &timer->node);
+ if (&timer->node == next_timer) {
+@@ -949,7 +1044,16 @@
+ new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+
+ timer_stats_hrtimer_set_start_info(timer);
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ {
++ ktime_t now = new_base->get_time();
+
++ if (ktime_to_ns(tim) < ktime_to_ns(now))
++ timer->praecox = now;
++ else
++ timer->praecox = ktime_set(0, 0);
++ }
++#endif
+ leftmost = enqueue_hrtimer(timer, new_base);
+
+ if (!leftmost) {
+@@ -963,15 +1067,26 @@
+ * on dynticks target.
+ */
+ wake_up_nohz_cpu(new_base->cpu_base->cpu);
+- } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) &&
+- hrtimer_reprogram(timer, new_base)) {
++ } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases)) {
++
++ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup);
++ if (ret < 0) {
++ /*
++ * In case we failed to reprogram the timer (mostly
++ * because out current timer is already elapsed),
++ * remove it again and report a failure. This avoids
++ * stale base->first entries.
++ */
++ debug_deactivate(timer);
++ __remove_hrtimer(timer, new_base,
++ timer->state & HRTIMER_STATE_CALLBACK, 0);
++ } else if (ret > 0) {
+ /*
+ * Only allow reprogramming if the new base is on this CPU.
+ * (it might still be on another CPU if the timer was pending)
+ *
+ * XXX send_remote_softirq() ?
+ */
+- if (wakeup) {
+ /*
+ * We need to drop cpu_base->lock to avoid a
+ * lock ordering issue vs. rq->lock.
+@@ -979,9 +1094,7 @@
+ raw_spin_unlock(&new_base->cpu_base->lock);
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ local_irq_restore(flags);
+- return ret;
+- } else {
+- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ return 0;
+ }
+ }
+
+@@ -1072,7 +1185,7 @@
+
+ if (ret >= 0)
+ return ret;
+- cpu_relax();
++ hrtimer_wait_for_timer(timer);
+ }
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_cancel);
+@@ -1151,6 +1264,7 @@
+
+ base = hrtimer_clockid_to_base(clock_id);
+ timer->base = &cpu_base->clock_base[base];
++ INIT_LIST_HEAD(&timer->cb_entry);
+ timerqueue_init(&timer->node);
+
+ #ifdef CONFIG_TIMER_STATS
+@@ -1234,6 +1348,126 @@
+ timer->state &= ~HRTIMER_STATE_CALLBACK;
+ }
+
++static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
++ struct hrtimer_clock_base *base)
++{
++ /*
++ * Note, we clear the callback flag before we requeue the
++ * timer otherwise we trigger the callback_running() check
++ * in hrtimer_reprogram().
++ */
++ timer->state &= ~HRTIMER_STATE_CALLBACK;
++
++ if (restart != HRTIMER_NORESTART) {
++ BUG_ON(hrtimer_active(timer));
++ /*
++ * Enqueue the timer, if it's the leftmost timer then
++ * we need to reprogram it.
++ */
++ if (!enqueue_hrtimer(timer, base))
++ return;
++
++#ifndef CONFIG_HIGH_RES_TIMERS
++ }
++#else
++ if (base->cpu_base->hres_active &&
++ hrtimer_reprogram(timer, base))
++ goto requeue;
++
++ } else if (hrtimer_active(timer)) {
++ /*
++ * If the timer was rearmed on another CPU, reprogram
++ * the event device.
++ */
++ if (&timer->node == base->active.next &&
++ base->cpu_base->hres_active &&
++ hrtimer_reprogram(timer, base))
++ goto requeue;
++ }
++ return;
++
++requeue:
++ /*
++ * Timer is expired. Thus move it from tree to pending list
++ * again.
++ */
++ __remove_hrtimer(timer, base, timer->state, 0);
++ list_add_tail(&timer->cb_entry, &base->expired);
++#endif
++}
++
++/*
++ * The changes in mainline which removed the callback modes from
++ * hrtimer are not yet working with -rt. The non wakeup_process()
++ * based callbacks which involve sleeping locks need to be treated
++ * seperately.
++ */
++static void hrtimer_rt_run_pending(void)
++{
++ enum hrtimer_restart (*fn)(struct hrtimer *);
++ struct hrtimer_cpu_base *cpu_base;
++ struct hrtimer_clock_base *base;
++ struct hrtimer *timer;
++ int index, restart;
++
++ local_irq_disable();
++ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
++
++ raw_spin_lock(&cpu_base->lock);
++
++ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
++ base = &cpu_base->clock_base[index];
++
++ while (!list_empty(&base->expired)) {
++ timer = list_first_entry(&base->expired,
++ struct hrtimer, cb_entry);
++
++ /*
++ * Same as the above __run_hrtimer function
++ * just we run with interrupts enabled.
++ */
++ debug_hrtimer_deactivate(timer);
++ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
++ timer_stats_account_hrtimer(timer);
++ fn = timer->function;
++
++ raw_spin_unlock_irq(&cpu_base->lock);
++ restart = fn(timer);
++ raw_spin_lock_irq(&cpu_base->lock);
++
++ hrtimer_rt_reprogram(restart, timer, base);
++ }
++ }
++
++ raw_spin_unlock_irq(&cpu_base->lock);
++
++ wake_up_timer_waiters(cpu_base);
++}
++
++static int hrtimer_rt_defer(struct hrtimer *timer)
++{
++ if (timer->irqsafe)
++ return 0;
++
++ __remove_hrtimer(timer, timer->base, timer->state, 0);
++ list_add_tail(&timer->cb_entry, &timer->base->expired);
++ return 1;
++}
++
++#else
++
++static inline void hrtimer_rt_run_pending(void)
++{
++ hrtimer_peek_ahead_timers();
++}
++
++static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
++
++#endif
++
+ #ifdef CONFIG_HIGH_RES_TIMERS
+
+ /*
+@@ -1244,7 +1478,7 @@
+ {
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ ktime_t expires_next, now, entry_time, delta;
+- int i, retries = 0;
++ int i, retries = 0, raise = 0;
+
+ BUG_ON(!cpu_base->hres_active);
+ cpu_base->nr_events++;
+@@ -1279,6 +1513,15 @@
+
+ timer = container_of(node, struct hrtimer, node);
+
++ trace_hrtimer_interrupt(raw_smp_processor_id(),
++ ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ?
++ timer->praecox : hrtimer_get_expires(timer),
++ basenow)),
++ current,
++ timer->function == hrtimer_wakeup ?
++ container_of(timer, struct hrtimer_sleeper,
++ timer)->task : NULL);
++
+ /*
+ * The immediate goal for using the softexpires is
+ * minimizing wakeups, not running timers at the
+@@ -1304,7 +1547,10 @@
+ break;
+ }
+
+- __run_hrtimer(timer, &basenow);
++ if (!hrtimer_rt_defer(timer))
++ __run_hrtimer(timer, &basenow);
++ else
++ raise = 1;
+ }
+ }
+
+@@ -1319,7 +1565,7 @@
+ if (expires_next.tv64 == KTIME_MAX ||
+ !tick_program_event(expires_next, 0)) {
+ cpu_base->hang_detected = 0;
+- return;
++ goto out;
+ }
+
+ /*
+@@ -1363,6 +1609,9 @@
+ tick_program_event(expires_next, 1);
+ printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
+ ktime_to_ns(delta));
++out:
++ if (raise)
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ }
+
+ /*
+@@ -1398,18 +1647,18 @@
+ __hrtimer_peek_ahead_timers();
+ local_irq_restore(flags);
+ }
+-
+-static void run_hrtimer_softirq(struct softirq_action *h)
+-{
+- hrtimer_peek_ahead_timers();
+-}
+-
+ #else /* CONFIG_HIGH_RES_TIMERS */
+
+ static inline void __hrtimer_peek_ahead_timers(void) { }
+
+ #endif /* !CONFIG_HIGH_RES_TIMERS */
+
++
++static void run_hrtimer_softirq(struct softirq_action *h)
++{
++ hrtimer_rt_run_pending();
++}
++
+ /*
+ * Called from timer softirq every jiffy, expire hrtimers:
+ *
+@@ -1442,7 +1691,7 @@
+ struct timerqueue_node *node;
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ struct hrtimer_clock_base *base;
+- int index, gettime = 1;
++ int index, gettime = 1, raise = 0;
+
+ if (hrtimer_hres_active())
+ return;
+@@ -1467,10 +1716,16 @@
+ hrtimer_get_expires_tv64(timer))
+ break;
+
+- __run_hrtimer(timer, &base->softirq_time);
++ if (!hrtimer_rt_defer(timer))
++ __run_hrtimer(timer, &base->softirq_time);
++ else
++ raise = 1;
+ }
+ raw_spin_unlock(&cpu_base->lock);
+ }
++
++ if (raise)
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ }
+
+ /*
+@@ -1492,16 +1747,18 @@
+ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
+ {
+ sl->timer.function = hrtimer_wakeup;
++ sl->timer.irqsafe = 1;
+ sl->task = task;
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
+
+-static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
++static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode,
++ unsigned long state)
+ {
+ hrtimer_init_sleeper(t, current);
+
+ do {
+- set_current_state(TASK_INTERRUPTIBLE);
++ set_current_state(state);
+ hrtimer_start_expires(&t->timer, mode);
+ if (!hrtimer_active(&t->timer))
+ t->task = NULL;
+@@ -1545,7 +1802,8 @@
+ HRTIMER_MODE_ABS);
+ hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
+
+- if (do_nanosleep(&t, HRTIMER_MODE_ABS))
++ /* cpu_chill() does not care about restart state. */
++ if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE))
+ goto out;
+
+ rmtp = restart->nanosleep.rmtp;
+@@ -1562,8 +1820,10 @@
+ return ret;
+ }
+
+-long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+- const enum hrtimer_mode mode, const clockid_t clockid)
++static long
++__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
++ const enum hrtimer_mode mode, const clockid_t clockid,
++ unsigned long state)
+ {
+ struct restart_block *restart;
+ struct hrtimer_sleeper t;
+@@ -1576,7 +1836,7 @@
+
+ hrtimer_init_on_stack(&t.timer, clockid, mode);
+ hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
+- if (do_nanosleep(&t, mode))
++ if (do_nanosleep(&t, mode, state))
+ goto out;
+
+ /* Absolute timers do not update the rmtp value and restart: */
+@@ -1603,6 +1863,12 @@
+ return ret;
+ }
+
++long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
++ const enum hrtimer_mode mode, const clockid_t clockid)
++{
++ return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE);
++}
++
+ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
+ struct timespec __user *, rmtp)
+ {
+@@ -1617,6 +1883,26 @@
+ return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * Sleep for 1 ms in hope whoever holds what we want will let it go.
++ */
++void cpu_chill(void)
++{
++ struct timespec tu = {
++ .tv_nsec = NSEC_PER_MSEC,
++ };
++ unsigned int freeze_flag = current->flags & PF_NOFREEZE;
++
++ current->flags |= PF_NOFREEZE;
++ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC,
++ TASK_UNINTERRUPTIBLE);
++ if (!freeze_flag)
++ current->flags &= ~PF_NOFREEZE;
++}
++EXPORT_SYMBOL(cpu_chill);
++#endif
++
+ /*
+ * Functions related to boot-time initialization:
+ */
+@@ -1628,10 +1914,14 @@
+ for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+ cpu_base->clock_base[i].cpu_base = cpu_base;
+ timerqueue_init_head(&cpu_base->clock_base[i].active);
++ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
+ }
+
+ cpu_base->cpu = cpu;
+ hrtimer_init_hres(cpu_base);
++#ifdef CONFIG_PREEMPT_RT_BASE
++ init_waitqueue_head(&cpu_base->wait);
++#endif
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -1744,9 +2034,7 @@
+ hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
+ (void *)(long)smp_processor_id());
+ register_cpu_notifier(&hrtimers_nb);
+-#ifdef CONFIG_HIGH_RES_TIMERS
+ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
+-#endif
+ }
+
+ /**
+diff -Nur linux-3.18.8.orig/kernel/time/itimer.c linux-3.18.8/kernel/time/itimer.c
+--- linux-3.18.8.orig/kernel/time/itimer.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/time/itimer.c 2015-03-03 08:05:18.000000000 +0100
+@@ -213,6 +213,7 @@
+ /* We are sharing ->siglock with it_real_fn() */
+ if (hrtimer_try_to_cancel(timer) < 0) {
+ spin_unlock_irq(&tsk->sighand->siglock);
++ hrtimer_wait_for_timer(&tsk->signal->real_timer);
+ goto again;
+ }
+ expires = timeval_to_ktime(value->it_value);
+diff -Nur linux-3.18.8.orig/kernel/time/jiffies.c linux-3.18.8/kernel/time/jiffies.c
+--- linux-3.18.8.orig/kernel/time/jiffies.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/time/jiffies.c 2015-03-03 08:05:18.000000000 +0100
+@@ -73,7 +73,8 @@
+ .shift = JIFFIES_SHIFT,
+ };
+
+-__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
++__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock);
++__cacheline_aligned_in_smp seqcount_t jiffies_seq;
+
+ #if (BITS_PER_LONG < 64)
+ u64 get_jiffies_64(void)
+@@ -82,9 +83,9 @@
+ u64 ret;
+
+ do {
+- seq = read_seqbegin(&jiffies_lock);
++ seq = read_seqcount_begin(&jiffies_seq);
+ ret = jiffies_64;
+- } while (read_seqretry(&jiffies_lock, seq));
++ } while (read_seqcount_retry(&jiffies_seq, seq));
+ return ret;
+ }
+ EXPORT_SYMBOL(get_jiffies_64);
+diff -Nur linux-3.18.8.orig/kernel/time/ntp.c linux-3.18.8/kernel/time/ntp.c
+--- linux-3.18.8.orig/kernel/time/ntp.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/time/ntp.c 2015-03-03 08:05:18.000000000 +0100
+@@ -10,6 +10,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/hrtimer.h>
+ #include <linux/jiffies.h>
++#include <linux/kthread.h>
+ #include <linux/math64.h>
+ #include <linux/timex.h>
+ #include <linux/time.h>
+@@ -519,10 +520,52 @@
+ &sync_cmos_work, timespec_to_jiffies(&next));
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * RT can not call schedule_delayed_work from real interrupt context.
++ * Need to make a thread to do the real work.
++ */
++static struct task_struct *cmos_delay_thread;
++static bool do_cmos_delay;
++
++static int run_cmos_delay(void *ignore)
++{
++ while (!kthread_should_stop()) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ if (do_cmos_delay) {
++ do_cmos_delay = false;
++ queue_delayed_work(system_power_efficient_wq,
++ &sync_cmos_work, 0);
++ }
++ schedule();
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
++}
++
++void ntp_notify_cmos_timer(void)
++{
++ do_cmos_delay = true;
++ /* Make visible before waking up process */
++ smp_wmb();
++ wake_up_process(cmos_delay_thread);
++}
++
++static __init int create_cmos_delay_thread(void)
++{
++ cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd");
++ BUG_ON(!cmos_delay_thread);
++ return 0;
++}
++early_initcall(create_cmos_delay_thread);
++
++#else
++
+ void ntp_notify_cmos_timer(void)
+ {
+ queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
+ }
++#endif /* CONFIG_PREEMPT_RT_FULL */
+
+ #else
+ void ntp_notify_cmos_timer(void) { }
+diff -Nur linux-3.18.8.orig/kernel/time/posix-cpu-timers.c linux-3.18.8/kernel/time/posix-cpu-timers.c
+--- linux-3.18.8.orig/kernel/time/posix-cpu-timers.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/time/posix-cpu-timers.c 2015-03-03 08:05:18.000000000 +0100
+@@ -3,6 +3,7 @@
+ */
+
+ #include <linux/sched.h>
++#include <linux/sched/rt.h>
+ #include <linux/posix-timers.h>
+ #include <linux/errno.h>
+ #include <linux/math64.h>
+@@ -626,7 +627,7 @@
+ /*
+ * Disarm any old timer after extracting its expiry time.
+ */
+- WARN_ON_ONCE(!irqs_disabled());
++ WARN_ON_ONCE_NONRT(!irqs_disabled());
+
+ ret = 0;
+ old_incr = timer->it.cpu.incr;
+@@ -1047,7 +1048,7 @@
+ /*
+ * Now re-arm for the new expiry time.
+ */
+- WARN_ON_ONCE(!irqs_disabled());
++ WARN_ON_ONCE_NONRT(!irqs_disabled());
+ arm_timer(timer);
+ unlock_task_sighand(p, &flags);
+
+@@ -1113,10 +1114,11 @@
+ sig = tsk->signal;
+ if (sig->cputimer.running) {
+ struct task_cputime group_sample;
++ unsigned long flags;
+
+- raw_spin_lock(&sig->cputimer.lock);
++ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
+ group_sample = sig->cputimer.cputime;
+- raw_spin_unlock(&sig->cputimer.lock);
++ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
+
+ if (task_cputime_expired(&group_sample, &sig->cputime_expires))
+ return 1;
+@@ -1130,13 +1132,13 @@
+ * already updated our counts. We need to check if any timers fire now.
+ * Interrupts are disabled.
+ */
+-void run_posix_cpu_timers(struct task_struct *tsk)
++static void __run_posix_cpu_timers(struct task_struct *tsk)
+ {
+ LIST_HEAD(firing);
+ struct k_itimer *timer, *next;
+ unsigned long flags;
+
+- WARN_ON_ONCE(!irqs_disabled());
++ WARN_ON_ONCE_NONRT(!irqs_disabled());
+
+ /*
+ * The fast path checks that there are no expired thread or thread
+@@ -1194,6 +1196,190 @@
+ }
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++#include <linux/kthread.h>
++#include <linux/cpu.h>
++DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
++
++static int posix_cpu_timers_thread(void *data)
++{
++ int cpu = (long)data;
++
++ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
++
++ while (!kthread_should_stop()) {
++ struct task_struct *tsk = NULL;
++ struct task_struct *next = NULL;
++
++ if (cpu_is_offline(cpu))
++ goto wait_to_die;
++
++ /* grab task list */
++ raw_local_irq_disable();
++ tsk = per_cpu(posix_timer_tasklist, cpu);
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++ raw_local_irq_enable();
++
++ /* its possible the list is empty, just return */
++ if (!tsk) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
++ __set_current_state(TASK_RUNNING);
++ continue;
++ }
++
++ /* Process task list */
++ while (1) {
++ /* save next */
++ next = tsk->posix_timer_list;
++
++ /* run the task timers, clear its ptr and
++ * unreference it
++ */
++ __run_posix_cpu_timers(tsk);
++ tsk->posix_timer_list = NULL;
++ put_task_struct(tsk);
++
++ /* check if this is the last on the list */
++ if (next == tsk)
++ break;
++ tsk = next;
++ }
++ }
++ return 0;
++
++wait_to_die:
++ /* Wait for kthread_stop */
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
++}
++
++static inline int __fastpath_timer_check(struct task_struct *tsk)
++{
++ /* tsk == current, ensure it is safe to use ->signal/sighand */
++ if (unlikely(tsk->exit_state))
++ return 0;
++
++ if (!task_cputime_zero(&tsk->cputime_expires))
++ return 1;
++
++ if (!task_cputime_zero(&tsk->signal->cputime_expires))
++ return 1;
++
++ return 0;
++}
++
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ unsigned long cpu = smp_processor_id();
++ struct task_struct *tasklist;
++
++ BUG_ON(!irqs_disabled());
++ if(!per_cpu(posix_timer_task, cpu))
++ return;
++ /* get per-cpu references */
++ tasklist = per_cpu(posix_timer_tasklist, cpu);
++
++ /* check to see if we're already queued */
++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
++ get_task_struct(tsk);
++ if (tasklist) {
++ tsk->posix_timer_list = tasklist;
++ } else {
++ /*
++ * The list is terminated by a self-pointing
++ * task_struct
++ */
++ tsk->posix_timer_list = tsk;
++ }
++ per_cpu(posix_timer_tasklist, cpu) = tsk;
++
++ wake_up_process(per_cpu(posix_timer_task, cpu));
++ }
++}
++
++/*
++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
++ * Here we can start up the necessary migration thread for the new CPU.
++ */
++static int posix_cpu_thread_call(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ int cpu = (long)hcpu;
++ struct task_struct *p;
++ struct sched_param param;
++
++ switch (action) {
++ case CPU_UP_PREPARE:
++ p = kthread_create(posix_cpu_timers_thread, hcpu,
++ "posixcputmr/%d",cpu);
++ if (IS_ERR(p))
++ return NOTIFY_BAD;
++ p->flags |= PF_NOFREEZE;
++ kthread_bind(p, cpu);
++ /* Must be high prio to avoid getting starved */
++ param.sched_priority = MAX_RT_PRIO-1;
++ sched_setscheduler(p, SCHED_FIFO, &param);
++ per_cpu(posix_timer_task,cpu) = p;
++ break;
++ case CPU_ONLINE:
++ /* Strictly unneccessary, as first user will wake it. */
++ wake_up_process(per_cpu(posix_timer_task,cpu));
++ break;
++#ifdef CONFIG_HOTPLUG_CPU
++ case CPU_UP_CANCELED:
++ /* Unbind it from offline cpu so it can run. Fall thru. */
++ kthread_bind(per_cpu(posix_timer_task, cpu),
++ cpumask_any(cpu_online_mask));
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++ case CPU_DEAD:
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++#endif
++ }
++ return NOTIFY_OK;
++}
++
++/* Register at highest priority so that task migration (migrate_all_tasks)
++ * happens before everything else.
++ */
++static struct notifier_block posix_cpu_thread_notifier = {
++ .notifier_call = posix_cpu_thread_call,
++ .priority = 10
++};
++
++static int __init posix_cpu_thread_init(void)
++{
++ void *hcpu = (void *)(long)smp_processor_id();
++ /* Start one for boot CPU. */
++ unsigned long cpu;
++
++ /* init the per-cpu posix_timer_tasklets */
++ for_each_possible_cpu(cpu)
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
++ register_cpu_notifier(&posix_cpu_thread_notifier);
++ return 0;
++}
++early_initcall(posix_cpu_thread_init);
++#else /* CONFIG_PREEMPT_RT_BASE */
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ __run_posix_cpu_timers(tsk);
++}
++#endif /* CONFIG_PREEMPT_RT_BASE */
++
+ /*
+ * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
+ * The tsk->sighand->siglock must be held by the caller.
+diff -Nur linux-3.18.8.orig/kernel/time/posix-timers.c linux-3.18.8/kernel/time/posix-timers.c
+--- linux-3.18.8.orig/kernel/time/posix-timers.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/time/posix-timers.c 2015-03-03 08:05:18.000000000 +0100
+@@ -499,6 +499,7 @@
+ static struct pid *good_sigevent(sigevent_t * event)
+ {
+ struct task_struct *rtn = current->group_leader;
++ int sig = event->sigev_signo;
+
+ if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
+ (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
+@@ -507,7 +508,8 @@
+ return NULL;
+
+ if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
+- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
++ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
++ sig_kernel_coredump(sig)))
+ return NULL;
+
+ return task_pid(rtn);
+@@ -819,6 +821,20 @@
+ return overrun;
+ }
+
++/*
++ * Protected by RCU!
++ */
++static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (kc->timer_set == common_timer_set)
++ hrtimer_wait_for_timer(&timr->it.real.timer);
++ else
++ /* FIXME: Whacky hack for posix-cpu-timers */
++ schedule_timeout(1);
++#endif
++}
++
+ /* Set a POSIX.1b interval timer. */
+ /* timr->it_lock is taken. */
+ static int
+@@ -896,6 +912,7 @@
+ if (!timr)
+ return -EINVAL;
+
++ rcu_read_lock();
+ kc = clockid_to_kclock(timr->it_clock);
+ if (WARN_ON_ONCE(!kc || !kc->timer_set))
+ error = -EINVAL;
+@@ -904,9 +921,12 @@
+
+ unlock_timer(timr, flag);
+ if (error == TIMER_RETRY) {
++ timer_wait_for_callback(kc, timr);
+ rtn = NULL; // We already got the old time...
++ rcu_read_unlock();
+ goto retry;
+ }
++ rcu_read_unlock();
+
+ if (old_setting && !error &&
+ copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
+@@ -944,10 +964,15 @@
+ if (!timer)
+ return -EINVAL;
+
++ rcu_read_lock();
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
+ unlock_timer(timer, flags);
++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
++ timer);
++ rcu_read_unlock();
+ goto retry_delete;
+ }
++ rcu_read_unlock();
+
+ spin_lock(&current->sighand->siglock);
+ list_del(&timer->list);
+@@ -973,8 +998,18 @@
+ retry_delete:
+ spin_lock_irqsave(&timer->it_lock, flags);
+
++ /* On RT we can race with a deletion */
++ if (!timer->it_signal) {
++ unlock_timer(timer, flags);
++ return;
++ }
++
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
++ rcu_read_lock();
+ unlock_timer(timer, flags);
++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
++ timer);
++ rcu_read_unlock();
+ goto retry_delete;
+ }
+ list_del(&timer->list);
+diff -Nur linux-3.18.8.orig/kernel/time/tick-common.c linux-3.18.8/kernel/time/tick-common.c
+--- linux-3.18.8.orig/kernel/time/tick-common.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/time/tick-common.c 2015-03-03 08:05:18.000000000 +0100
+@@ -78,13 +78,15 @@
+ static void tick_periodic(int cpu)
+ {
+ if (tick_do_timer_cpu == cpu) {
+- write_seqlock(&jiffies_lock);
++ raw_spin_lock(&jiffies_lock);
++ write_seqcount_begin(&jiffies_seq);
+
+ /* Keep track of the next tick event */
+ tick_next_period = ktime_add(tick_next_period, tick_period);
+
+ do_timer(1);
+- write_sequnlock(&jiffies_lock);
++ write_seqcount_end(&jiffies_seq);
++ raw_spin_unlock(&jiffies_lock);
+ update_wall_time();
+ }
+
+@@ -146,9 +148,9 @@
+ ktime_t next;
+
+ do {
+- seq = read_seqbegin(&jiffies_lock);
++ seq = read_seqcount_begin(&jiffies_seq);
+ next = tick_next_period;
+- } while (read_seqretry(&jiffies_lock, seq));
++ } while (read_seqcount_retry(&jiffies_seq, seq));
+
+ clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
+
+diff -Nur linux-3.18.8.orig/kernel/time/tick-internal.h linux-3.18.8/kernel/time/tick-internal.h
+--- linux-3.18.8.orig/kernel/time/tick-internal.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/time/tick-internal.h 2015-03-03 08:05:18.000000000 +0100
+@@ -6,7 +6,8 @@
+
+ #include "timekeeping.h"
+
+-extern seqlock_t jiffies_lock;
++extern raw_spinlock_t jiffies_lock;
++extern seqcount_t jiffies_seq;
+
+ #define CS_NAME_LEN 32
+
+diff -Nur linux-3.18.8.orig/kernel/time/tick-sched.c linux-3.18.8/kernel/time/tick-sched.c
+--- linux-3.18.8.orig/kernel/time/tick-sched.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/time/tick-sched.c 2015-03-03 08:05:18.000000000 +0100
+@@ -62,7 +62,8 @@
+ return;
+
+ /* Reevalute with jiffies_lock held */
+- write_seqlock(&jiffies_lock);
++ raw_spin_lock(&jiffies_lock);
++ write_seqcount_begin(&jiffies_seq);
+
+ delta = ktime_sub(now, last_jiffies_update);
+ if (delta.tv64 >= tick_period.tv64) {
+@@ -85,10 +86,12 @@
+ /* Keep the tick_next_period variable up to date */
+ tick_next_period = ktime_add(last_jiffies_update, tick_period);
+ } else {
+- write_sequnlock(&jiffies_lock);
++ write_seqcount_end(&jiffies_seq);
++ raw_spin_unlock(&jiffies_lock);
+ return;
+ }
+- write_sequnlock(&jiffies_lock);
++ write_seqcount_end(&jiffies_seq);
++ raw_spin_unlock(&jiffies_lock);
+ update_wall_time();
+ }
+
+@@ -99,12 +102,14 @@
+ {
+ ktime_t period;
+
+- write_seqlock(&jiffies_lock);
++ raw_spin_lock(&jiffies_lock);
++ write_seqcount_begin(&jiffies_seq);
+ /* Did we start the jiffies update yet ? */
+ if (last_jiffies_update.tv64 == 0)
+ last_jiffies_update = tick_next_period;
+ period = last_jiffies_update;
+- write_sequnlock(&jiffies_lock);
++ write_seqcount_end(&jiffies_seq);
++ raw_spin_unlock(&jiffies_lock);
+ return period;
+ }
+
+@@ -222,6 +227,7 @@
+
+ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
+ .func = nohz_full_kick_work_func,
++ .flags = IRQ_WORK_HARD_IRQ,
+ };
+
+ /*
+@@ -580,10 +586,10 @@
+
+ /* Read jiffies and the time when jiffies were updated last */
+ do {
+- seq = read_seqbegin(&jiffies_lock);
++ seq = read_seqcount_begin(&jiffies_seq);
+ last_update = last_jiffies_update;
+ last_jiffies = jiffies;
+- } while (read_seqretry(&jiffies_lock, seq));
++ } while (read_seqcount_retry(&jiffies_seq, seq));
+
+ if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
+ arch_needs_cpu() || irq_work_needs_cpu()) {
+@@ -761,14 +767,7 @@
+ return false;
+
+ if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
+- static int ratelimit;
+-
+- if (ratelimit < 10 &&
+- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
+- pr_warn("NOHZ: local_softirq_pending %02x\n",
+- (unsigned int) local_softirq_pending());
+- ratelimit++;
+- }
++ softirq_check_pending_idle();
+ return false;
+ }
+
+@@ -1156,6 +1155,7 @@
+ * Emulate tick processing via per-CPU hrtimers:
+ */
+ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++ ts->sched_timer.irqsafe = 1;
+ ts->sched_timer.function = tick_sched_timer;
+
+ /* Get the next period (per cpu) */
+diff -Nur linux-3.18.8.orig/kernel/time/timekeeping.c linux-3.18.8/kernel/time/timekeeping.c
+--- linux-3.18.8.orig/kernel/time/timekeeping.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/time/timekeeping.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1814,8 +1814,10 @@
+ */
+ void xtime_update(unsigned long ticks)
+ {
+- write_seqlock(&jiffies_lock);
++ raw_spin_lock(&jiffies_lock);
++ write_seqcount_begin(&jiffies_seq);
+ do_timer(ticks);
+- write_sequnlock(&jiffies_lock);
++ write_seqcount_end(&jiffies_seq);
++ raw_spin_unlock(&jiffies_lock);
+ update_wall_time();
+ }
+diff -Nur linux-3.18.8.orig/kernel/time/timer.c linux-3.18.8/kernel/time/timer.c
+--- linux-3.18.8.orig/kernel/time/timer.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/time/timer.c 2015-03-03 08:05:18.000000000 +0100
+@@ -78,6 +78,9 @@
+ struct tvec_base {
+ spinlock_t lock;
+ struct timer_list *running_timer;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ wait_queue_head_t wait_for_running_timer;
++#endif
+ unsigned long timer_jiffies;
+ unsigned long next_timer;
+ unsigned long active_timers;
+@@ -758,6 +761,36 @@
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
++ struct tvec_base *old,
++ struct tvec_base *new)
++{
++ /* See the comment in lock_timer_base() */
++ timer_set_base(timer, NULL);
++ spin_unlock(&old->lock);
++ spin_lock(&new->lock);
++ timer_set_base(timer, new);
++ return new;
++}
++#else
++static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
++ struct tvec_base *old,
++ struct tvec_base *new)
++{
++ /*
++ * We cannot do the above because we might be preempted and
++ * then the preempter would see NULL and loop forever.
++ */
++ if (spin_trylock(&new->lock)) {
++ timer_set_base(timer, new);
++ spin_unlock(&old->lock);
++ return new;
++ }
++ return old;
++}
++#endif
++
+ static inline int
+ __mod_timer(struct timer_list *timer, unsigned long expires,
+ bool pending_only, int pinned)
+@@ -788,14 +821,8 @@
+ * handler yet has not finished. This also guarantees that
+ * the timer is serialized wrt itself.
+ */
+- if (likely(base->running_timer != timer)) {
+- /* See the comment in lock_timer_base() */
+- timer_set_base(timer, NULL);
+- spin_unlock(&base->lock);
+- base = new_base;
+- spin_lock(&base->lock);
+- timer_set_base(timer, base);
+- }
++ if (likely(base->running_timer != timer))
++ base = switch_timer_base(timer, base, new_base);
+ }
+
+ timer->expires = expires;
+@@ -969,6 +996,29 @@
+ }
+ EXPORT_SYMBOL_GPL(add_timer_on);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * Wait for a running timer
++ */
++static void wait_for_running_timer(struct timer_list *timer)
++{
++ struct tvec_base *base = timer->base;
++
++ if (base->running_timer == timer)
++ wait_event(base->wait_for_running_timer,
++ base->running_timer != timer);
++}
++
++# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer)
++#else
++static inline void wait_for_running_timer(struct timer_list *timer)
++{
++ cpu_relax();
++}
++
++# define wakeup_timer_waiters(b) do { } while (0)
++#endif
++
+ /**
+ * del_timer - deactive a timer.
+ * @timer: the timer to be deactivated
+@@ -1026,7 +1076,7 @@
+ }
+ EXPORT_SYMBOL(try_to_del_timer_sync);
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ /**
+ * del_timer_sync - deactivate a timer and wait for the handler to finish.
+ * @timer: the timer to be deactivated
+@@ -1086,7 +1136,7 @@
+ int ret = try_to_del_timer_sync(timer);
+ if (ret >= 0)
+ return ret;
+- cpu_relax();
++ wait_for_running_timer(timer);
+ }
+ }
+ EXPORT_SYMBOL(del_timer_sync);
+@@ -1207,15 +1257,17 @@
+ if (irqsafe) {
+ spin_unlock(&base->lock);
+ call_timer_fn(timer, fn, data);
++ base->running_timer = NULL;
+ spin_lock(&base->lock);
+ } else {
+ spin_unlock_irq(&base->lock);
+ call_timer_fn(timer, fn, data);
++ base->running_timer = NULL;
+ spin_lock_irq(&base->lock);
+ }
+ }
+ }
+- base->running_timer = NULL;
++ wakeup_timer_waiters(base);
+ spin_unlock_irq(&base->lock);
+ }
+
+@@ -1355,17 +1407,31 @@
+ if (cpu_is_offline(smp_processor_id()))
+ return expires;
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /*
++ * On PREEMPT_RT we cannot sleep here. If the trylock does not
++ * succeed then we return the worst-case 'expires in 1 tick'
++ * value. We use the rt functions here directly to avoid a
++ * migrate_disable() call.
++ */
++ if (!spin_do_trylock(&base->lock))
++ return now + 1;
++#else
+ spin_lock(&base->lock);
++#endif
+ if (base->active_timers) {
+ if (time_before_eq(base->next_timer, base->timer_jiffies))
+ base->next_timer = __next_timer_interrupt(base);
+ expires = base->next_timer;
+ }
++#ifdef CONFIG_PREEMPT_RT_FULL
++ rt_spin_unlock_after_trylock_in_irq(&base->lock);
++#else
+ spin_unlock(&base->lock);
++#endif
+
+ if (time_before_eq(expires, now))
+ return now;
+-
+ return cmp_next_hrtimer_event(now, expires);
+ }
+ #endif
+@@ -1381,13 +1447,13 @@
+
+ /* Note: this timer irq context must be accounted for as well. */
+ account_process_tick(p, user_tick);
++ scheduler_tick();
+ run_local_timers();
+ rcu_check_callbacks(cpu, user_tick);
+ #ifdef CONFIG_IRQ_WORK
+ if (in_irq())
+ irq_work_tick();
+ #endif
+- scheduler_tick();
+ run_posix_cpu_timers(p);
+ }
+
+@@ -1400,6 +1466,10 @@
+
+ hrtimer_run_pending();
+
++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
++ irq_work_tick();
++#endif
++
+ if (time_after_eq(jiffies, base->timer_jiffies))
+ __run_timers(base);
+ }
+@@ -1574,6 +1644,9 @@
+ base = per_cpu(tvec_bases, cpu);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ init_waitqueue_head(&base->wait_for_running_timer);
++#endif
+
+ for (j = 0; j < TVN_SIZE; j++) {
+ INIT_LIST_HEAD(base->tv5.vec + j);
+@@ -1613,7 +1686,7 @@
+
+ BUG_ON(cpu_online(cpu));
+ old_base = per_cpu(tvec_bases, cpu);
+- new_base = get_cpu_var(tvec_bases);
++ new_base = get_local_var(tvec_bases);
+ /*
+ * The caller is globally serialized and nobody else
+ * takes two locks at once, deadlock is not possible.
+@@ -1634,7 +1707,7 @@
+
+ spin_unlock(&old_base->lock);
+ spin_unlock_irq(&new_base->lock);
+- put_cpu_var(tvec_bases);
++ put_local_var(tvec_bases);
+ }
+ #endif /* CONFIG_HOTPLUG_CPU */
+
+diff -Nur linux-3.18.8.orig/kernel/trace/Kconfig linux-3.18.8/kernel/trace/Kconfig
+--- linux-3.18.8.orig/kernel/trace/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/trace/Kconfig 2015-03-03 08:05:18.000000000 +0100
+@@ -187,6 +187,24 @@
+ enabled. This option and the preempt-off timing option can be
+ used together or separately.)
+
++config INTERRUPT_OFF_HIST
++ bool "Interrupts-off Latency Histogram"
++ depends on IRQSOFF_TRACER
++ help
++ This option generates continuously updated histograms (one per cpu)
++ of the duration of time periods with interrupts disabled. The
++ histograms are disabled by default. To enable them, write a non-zero
++ number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
++
++ If PREEMPT_OFF_HIST is also selected, additional histograms (one
++ per cpu) are generated that accumulate the duration of time periods
++ when both interrupts and preemption are disabled. The histogram data
++ will be located in the debug file system at
++
++ /sys/kernel/debug/tracing/latency_hist/irqsoff
++
+ config PREEMPT_TRACER
+ bool "Preemption-off Latency Tracer"
+ default n
+@@ -211,6 +229,24 @@
+ enabled. This option and the irqs-off timing option can be
+ used together or separately.)
+
++config PREEMPT_OFF_HIST
++ bool "Preemption-off Latency Histogram"
++ depends on PREEMPT_TRACER
++ help
++ This option generates continuously updated histograms (one per cpu)
++ of the duration of time periods with preemption disabled. The
++ histograms are disabled by default. To enable them, write a non-zero
++ number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
++
++ If INTERRUPT_OFF_HIST is also selected, additional histograms (one
++ per cpu) are generated that accumulate the duration of time periods
++ when both interrupts and preemption are disabled. The histogram data
++ will be located in the debug file system at
++
++ /sys/kernel/debug/tracing/latency_hist/preemptoff
++
+ config SCHED_TRACER
+ bool "Scheduling Latency Tracer"
+ select GENERIC_TRACER
+@@ -221,6 +257,74 @@
+ This tracer tracks the latency of the highest priority task
+ to be scheduled in, starting from the point it has woken up.
+
++config WAKEUP_LATENCY_HIST
++ bool "Scheduling Latency Histogram"
++ depends on SCHED_TRACER
++ help
++ This option generates continuously updated histograms (one per cpu)
++ of the scheduling latency of the highest priority task.
++ The histograms are disabled by default. To enable them, write a
++ non-zero number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/wakeup
++
++ Two different algorithms are used, one to determine the latency of
++ processes that exclusively use the highest priority of the system and
++ another one to determine the latency of processes that share the
++ highest system priority with other processes. The former is used to
++ improve hardware and system software, the latter to optimize the
++ priority design of a given system. The histogram data will be
++ located in the debug file system at
++
++ /sys/kernel/debug/tracing/latency_hist/wakeup
++
++ and
++
++ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
++
++ If both Scheduling Latency Histogram and Missed Timer Offsets
++ Histogram are selected, additional histogram data will be collected
++ that contain, in addition to the wakeup latency, the timer latency, in
++ case the wakeup was triggered by an expired timer. These histograms
++ are available in the
++
++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup
++
++ directory. They reflect the apparent interrupt and scheduling latency
++ and are best suitable to determine the worst-case latency of a given
++ system. To enable these histograms, write a non-zero number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
++
++config MISSED_TIMER_OFFSETS_HIST
++ depends on HIGH_RES_TIMERS
++ select GENERIC_TRACER
++ bool "Missed Timer Offsets Histogram"
++ help
++ Generate a histogram of missed timer offsets in microseconds. The
++ histograms are disabled by default. To enable them, write a non-zero
++ number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
++
++ The histogram data will be located in the debug file system at
++
++ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
++
++ If both Scheduling Latency Histogram and Missed Timer Offsets
++ Histogram are selected, additional histogram data will be collected
++ that contain, in addition to the wakeup latency, the timer latency, in
++ case the wakeup was triggered by an expired timer. These histograms
++ are available in the
++
++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup
++
++ directory. They reflect the apparent interrupt and scheduling latency
++ and are best suitable to determine the worst-case latency of a given
++ system. To enable these histograms, write a non-zero number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
++
+ config ENABLE_DEFAULT_TRACERS
+ bool "Trace process context switches and events"
+ depends on !GENERIC_TRACER
+diff -Nur linux-3.18.8.orig/kernel/trace/latency_hist.c linux-3.18.8/kernel/trace/latency_hist.c
+--- linux-3.18.8.orig/kernel/trace/latency_hist.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.18.8/kernel/trace/latency_hist.c 2015-03-03 08:05:18.000000000 +0100
+@@ -0,0 +1,1178 @@
++/*
++ * kernel/trace/latency_hist.c
++ *
++ * Add support for histograms of preemption-off latency and
++ * interrupt-off latency and wakeup latency, it depends on
++ * Real-Time Preemption Support.
++ *
++ * Copyright (C) 2005 MontaVista Software, Inc.
++ * Yi Yang <yyang@ch.mvista.com>
++ *
++ * Converted to work with the new latency tracer.
++ * Copyright (C) 2008 Red Hat, Inc.
++ * Steven Rostedt <srostedt@redhat.com>
++ *
++ */
++#include <linux/module.h>
++#include <linux/debugfs.h>
++#include <linux/seq_file.h>
++#include <linux/percpu.h>
++#include <linux/kallsyms.h>
++#include <linux/uaccess.h>
++#include <linux/sched.h>
++#include <linux/sched/rt.h>
++#include <linux/slab.h>
++#include <linux/atomic.h>
++#include <asm/div64.h>
++
++#include "trace.h"
++#include <trace/events/sched.h>
++
++#define NSECS_PER_USECS 1000L
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/hist.h>
++
++enum {
++ IRQSOFF_LATENCY = 0,
++ PREEMPTOFF_LATENCY,
++ PREEMPTIRQSOFF_LATENCY,
++ WAKEUP_LATENCY,
++ WAKEUP_LATENCY_SHAREDPRIO,
++ MISSED_TIMER_OFFSETS,
++ TIMERANDWAKEUP_LATENCY,
++ MAX_LATENCY_TYPE,
++};
++
++#define MAX_ENTRY_NUM 10240
++
++struct hist_data {
++ atomic_t hist_mode; /* 0 log, 1 don't log */
++ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
++ long min_lat;
++ long max_lat;
++ unsigned long long below_hist_bound_samples;
++ unsigned long long above_hist_bound_samples;
++ long long accumulate_lat;
++ unsigned long long total_samples;
++ unsigned long long hist_array[MAX_ENTRY_NUM];
++};
++
++struct enable_data {
++ int latency_type;
++ int enabled;
++};
++
++static char *latency_hist_dir_root = "latency_hist";
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
++static char *irqsoff_hist_dir = "irqsoff";
++static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
++static DEFINE_PER_CPU(int, hist_irqsoff_counting);
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
++static char *preemptoff_hist_dir = "preemptoff";
++static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
++static DEFINE_PER_CPU(int, hist_preemptoff_counting);
++#endif
++
++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
++static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
++static char *preemptirqsoff_hist_dir = "preemptirqsoff";
++static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
++static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
++#endif
++
++#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
++static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
++static struct enable_data preemptirqsoff_enabled_data = {
++ .latency_type = PREEMPTIRQSOFF_LATENCY,
++ .enabled = 0,
++};
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++struct maxlatproc_data {
++ char comm[FIELD_SIZEOF(struct task_struct, comm)];
++ char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
++ int pid;
++ int current_pid;
++ int prio;
++ int current_prio;
++ long latency;
++ long timeroffset;
++ cycle_t timestamp;
++};
++#endif
++
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
++static char *wakeup_latency_hist_dir = "wakeup";
++static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
++static notrace void probe_wakeup_latency_hist_start(void *v,
++ struct task_struct *p, int success);
++static notrace void probe_wakeup_latency_hist_stop(void *v,
++ struct task_struct *prev, struct task_struct *next);
++static notrace void probe_sched_migrate_task(void *,
++ struct task_struct *task, int cpu);
++static struct enable_data wakeup_latency_enabled_data = {
++ .latency_type = WAKEUP_LATENCY,
++ .enabled = 0,
++};
++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
++static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
++static DEFINE_PER_CPU(int, wakeup_sharedprio);
++static unsigned long wakeup_pid;
++#endif
++
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
++static char *missed_timer_offsets_dir = "missed_timer_offsets";
++static notrace void probe_hrtimer_interrupt(void *v, int cpu,
++ long long offset, struct task_struct *curr, struct task_struct *task);
++static struct enable_data missed_timer_offsets_enabled_data = {
++ .latency_type = MISSED_TIMER_OFFSETS,
++ .enabled = 0,
++};
++static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
++static unsigned long missed_timer_offsets_pid;
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
++static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
++static struct enable_data timerandwakeup_enabled_data = {
++ .latency_type = TIMERANDWAKEUP_LATENCY,
++ .enabled = 0,
++};
++static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
++#endif
++
++void notrace latency_hist(int latency_type, int cpu, long latency,
++ long timeroffset, cycle_t stop,
++ struct task_struct *p)
++{
++ struct hist_data *my_hist;
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ struct maxlatproc_data *mp = NULL;
++#endif
++
++ if (!cpu_possible(cpu) || latency_type < 0 ||
++ latency_type >= MAX_LATENCY_TYPE)
++ return;
++
++ switch (latency_type) {
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ case IRQSOFF_LATENCY:
++ my_hist = &per_cpu(irqsoff_hist, cpu);
++ break;
++#endif
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ case PREEMPTOFF_LATENCY:
++ my_hist = &per_cpu(preemptoff_hist, cpu);
++ break;
++#endif
++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
++ case PREEMPTIRQSOFF_LATENCY:
++ my_hist = &per_cpu(preemptirqsoff_hist, cpu);
++ break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ case WAKEUP_LATENCY:
++ my_hist = &per_cpu(wakeup_latency_hist, cpu);
++ mp = &per_cpu(wakeup_maxlatproc, cpu);
++ break;
++ case WAKEUP_LATENCY_SHAREDPRIO:
++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
++ break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ case MISSED_TIMER_OFFSETS:
++ my_hist = &per_cpu(missed_timer_offsets, cpu);
++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
++ break;
++#endif
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ case TIMERANDWAKEUP_LATENCY:
++ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
++ break;
++#endif
++
++ default:
++ return;
++ }
++
++ latency += my_hist->offset;
++
++ if (atomic_read(&my_hist->hist_mode) == 0)
++ return;
++
++ if (latency < 0 || latency >= MAX_ENTRY_NUM) {
++ if (latency < 0)
++ my_hist->below_hist_bound_samples++;
++ else
++ my_hist->above_hist_bound_samples++;
++ } else
++ my_hist->hist_array[latency]++;
++
++ if (unlikely(latency > my_hist->max_lat ||
++ my_hist->min_lat == LONG_MAX)) {
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ if (latency_type == WAKEUP_LATENCY ||
++ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
++ latency_type == MISSED_TIMER_OFFSETS ||
++ latency_type == TIMERANDWAKEUP_LATENCY) {
++ strncpy(mp->comm, p->comm, sizeof(mp->comm));
++ strncpy(mp->current_comm, current->comm,
++ sizeof(mp->current_comm));
++ mp->pid = task_pid_nr(p);
++ mp->current_pid = task_pid_nr(current);
++ mp->prio = p->prio;
++ mp->current_prio = current->prio;
++ mp->latency = latency;
++ mp->timeroffset = timeroffset;
++ mp->timestamp = stop;
++ }
++#endif
++ my_hist->max_lat = latency;
++ }
++ if (unlikely(latency < my_hist->min_lat))
++ my_hist->min_lat = latency;
++ my_hist->total_samples++;
++ my_hist->accumulate_lat += latency;
++}
++
++static void *l_start(struct seq_file *m, loff_t *pos)
++{
++ loff_t *index_ptr = NULL;
++ loff_t index = *pos;
++ struct hist_data *my_hist = m->private;
++
++ if (index == 0) {
++ char minstr[32], avgstr[32], maxstr[32];
++
++ atomic_dec(&my_hist->hist_mode);
++
++ if (likely(my_hist->total_samples)) {
++ long avg = (long) div64_s64(my_hist->accumulate_lat,
++ my_hist->total_samples);
++ snprintf(minstr, sizeof(minstr), "%ld",
++ my_hist->min_lat - my_hist->offset);
++ snprintf(avgstr, sizeof(avgstr), "%ld",
++ avg - my_hist->offset);
++ snprintf(maxstr, sizeof(maxstr), "%ld",
++ my_hist->max_lat - my_hist->offset);
++ } else {
++ strcpy(minstr, "<undef>");
++ strcpy(avgstr, minstr);
++ strcpy(maxstr, minstr);
++ }
++
++ seq_printf(m, "#Minimum latency: %s microseconds\n"
++ "#Average latency: %s microseconds\n"
++ "#Maximum latency: %s microseconds\n"
++ "#Total samples: %llu\n"
++ "#There are %llu samples lower than %ld"
++ " microseconds.\n"
++ "#There are %llu samples greater or equal"
++ " than %ld microseconds.\n"
++ "#usecs\t%16s\n",
++ minstr, avgstr, maxstr,
++ my_hist->total_samples,
++ my_hist->below_hist_bound_samples,
++ -my_hist->offset,
++ my_hist->above_hist_bound_samples,
++ MAX_ENTRY_NUM - my_hist->offset,
++ "samples");
++ }
++ if (index < MAX_ENTRY_NUM) {
++ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
++ if (index_ptr)
++ *index_ptr = index;
++ }
++
++ return index_ptr;
++}
++
++static void *l_next(struct seq_file *m, void *p, loff_t *pos)
++{
++ loff_t *index_ptr = p;
++ struct hist_data *my_hist = m->private;
++
++ if (++*pos >= MAX_ENTRY_NUM) {
++ atomic_inc(&my_hist->hist_mode);
++ return NULL;
++ }
++ *index_ptr = *pos;
++ return index_ptr;
++}
++
++static void l_stop(struct seq_file *m, void *p)
++{
++ kfree(p);
++}
++
++static int l_show(struct seq_file *m, void *p)
++{
++ int index = *(loff_t *) p;
++ struct hist_data *my_hist = m->private;
++
++ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
++ my_hist->hist_array[index]);
++ return 0;
++}
++
++static const struct seq_operations latency_hist_seq_op = {
++ .start = l_start,
++ .next = l_next,
++ .stop = l_stop,
++ .show = l_show
++};
++
++static int latency_hist_open(struct inode *inode, struct file *file)
++{
++ int ret;
++
++ ret = seq_open(file, &latency_hist_seq_op);
++ if (!ret) {
++ struct seq_file *seq = file->private_data;
++ seq->private = inode->i_private;
++ }
++ return ret;
++}
++
++static const struct file_operations latency_hist_fops = {
++ .open = latency_hist_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = seq_release,
++};
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static void clear_maxlatprocdata(struct maxlatproc_data *mp)
++{
++ mp->comm[0] = mp->current_comm[0] = '\0';
++ mp->prio = mp->current_prio = mp->pid = mp->current_pid =
++ mp->latency = mp->timeroffset = -1;
++ mp->timestamp = 0;
++}
++#endif
++
++static void hist_reset(struct hist_data *hist)
++{
++ atomic_dec(&hist->hist_mode);
++
++ memset(hist->hist_array, 0, sizeof(hist->hist_array));
++ hist->below_hist_bound_samples = 0ULL;
++ hist->above_hist_bound_samples = 0ULL;
++ hist->min_lat = LONG_MAX;
++ hist->max_lat = LONG_MIN;
++ hist->total_samples = 0ULL;
++ hist->accumulate_lat = 0LL;
++
++ atomic_inc(&hist->hist_mode);
++}
++
++static ssize_t
++latency_hist_reset(struct file *file, const char __user *a,
++ size_t size, loff_t *off)
++{
++ int cpu;
++ struct hist_data *hist = NULL;
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ struct maxlatproc_data *mp = NULL;
++#endif
++ off_t latency_type = (off_t) file->private_data;
++
++ for_each_online_cpu(cpu) {
++
++ switch (latency_type) {
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ case PREEMPTOFF_LATENCY:
++ hist = &per_cpu(preemptoff_hist, cpu);
++ break;
++#endif
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ case IRQSOFF_LATENCY:
++ hist = &per_cpu(irqsoff_hist, cpu);
++ break;
++#endif
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ case PREEMPTIRQSOFF_LATENCY:
++ hist = &per_cpu(preemptirqsoff_hist, cpu);
++ break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ case WAKEUP_LATENCY:
++ hist = &per_cpu(wakeup_latency_hist, cpu);
++ mp = &per_cpu(wakeup_maxlatproc, cpu);
++ break;
++ case WAKEUP_LATENCY_SHAREDPRIO:
++ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
++ break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ case MISSED_TIMER_OFFSETS:
++ hist = &per_cpu(missed_timer_offsets, cpu);
++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
++ break;
++#endif
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ case TIMERANDWAKEUP_LATENCY:
++ hist = &per_cpu(timerandwakeup_latency_hist, cpu);
++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
++ break;
++#endif
++ }
++
++ hist_reset(hist);
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ if (latency_type == WAKEUP_LATENCY ||
++ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
++ latency_type == MISSED_TIMER_OFFSETS ||
++ latency_type == TIMERANDWAKEUP_LATENCY)
++ clear_maxlatprocdata(mp);
++#endif
++ }
++
++ return size;
++}
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static ssize_t
++show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ int r;
++ unsigned long *this_pid = file->private_data;
++
++ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++}
++
++static ssize_t do_pid(struct file *file, const char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ unsigned long pid;
++ unsigned long *this_pid = file->private_data;
++
++ if (cnt >= sizeof(buf))
++ return -EINVAL;
++
++ if (copy_from_user(&buf, ubuf, cnt))
++ return -EFAULT;
++
++ buf[cnt] = '\0';
++
++ if (kstrtoul(buf, 10, &pid))
++ return -EINVAL;
++
++ *this_pid = pid;
++
++ return cnt;
++}
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static ssize_t
++show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++ int r;
++ struct maxlatproc_data *mp = file->private_data;
++ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
++ unsigned long long t;
++ unsigned long usecs, secs;
++ char *buf;
++
++ if (mp->pid == -1 || mp->current_pid == -1) {
++ buf = "(none)\n";
++ return simple_read_from_buffer(ubuf, cnt, ppos, buf,
++ strlen(buf));
++ }
++
++ buf = kmalloc(strmaxlen, GFP_KERNEL);
++ if (buf == NULL)
++ return -ENOMEM;
++
++ t = ns2usecs(mp->timestamp);
++ usecs = do_div(t, USEC_PER_SEC);
++ secs = (unsigned long) t;
++ r = snprintf(buf, strmaxlen,
++ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
++ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
++ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
++ secs, usecs);
++ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++ kfree(buf);
++ return r;
++}
++#endif
++
++static ssize_t
++show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ struct enable_data *ed = file->private_data;
++ int r;
++
++ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++}
++
++static ssize_t
++do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ long enable;
++ struct enable_data *ed = file->private_data;
++
++ if (cnt >= sizeof(buf))
++ return -EINVAL;
++
++ if (copy_from_user(&buf, ubuf, cnt))
++ return -EFAULT;
++
++ buf[cnt] = 0;
++
++ if (kstrtoul(buf, 10, &enable))
++ return -EINVAL;
++
++ if ((enable && ed->enabled) || (!enable && !ed->enabled))
++ return cnt;
++
++ if (enable) {
++ int ret;
++
++ switch (ed->latency_type) {
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++ case PREEMPTIRQSOFF_LATENCY:
++ ret = register_trace_preemptirqsoff_hist(
++ probe_preemptirqsoff_hist, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_preemptirqsoff_hist "
++ "to trace_preemptirqsoff_hist\n");
++ return ret;
++ }
++ break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ case WAKEUP_LATENCY:
++ ret = register_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_wakeup_latency_hist_start "
++ "to trace_sched_wakeup\n");
++ return ret;
++ }
++ ret = register_trace_sched_wakeup_new(
++ probe_wakeup_latency_hist_start, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_wakeup_latency_hist_start "
++ "to trace_sched_wakeup_new\n");
++ unregister_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ return ret;
++ }
++ ret = register_trace_sched_switch(
++ probe_wakeup_latency_hist_stop, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_wakeup_latency_hist_stop "
++ "to trace_sched_switch\n");
++ unregister_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_wakeup_new(
++ probe_wakeup_latency_hist_start, NULL);
++ return ret;
++ }
++ ret = register_trace_sched_migrate_task(
++ probe_sched_migrate_task, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_sched_migrate_task "
++ "to trace_sched_migrate_task\n");
++ unregister_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_wakeup_new(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_switch(
++ probe_wakeup_latency_hist_stop, NULL);
++ return ret;
++ }
++ break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ case MISSED_TIMER_OFFSETS:
++ ret = register_trace_hrtimer_interrupt(
++ probe_hrtimer_interrupt, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_hrtimer_interrupt "
++ "to trace_hrtimer_interrupt\n");
++ return ret;
++ }
++ break;
++#endif
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ case TIMERANDWAKEUP_LATENCY:
++ if (!wakeup_latency_enabled_data.enabled ||
++ !missed_timer_offsets_enabled_data.enabled)
++ return -EINVAL;
++ break;
++#endif
++ default:
++ break;
++ }
++ } else {
++ switch (ed->latency_type) {
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++ case PREEMPTIRQSOFF_LATENCY:
++ {
++ int cpu;
++
++ unregister_trace_preemptirqsoff_hist(
++ probe_preemptirqsoff_hist, NULL);
++ for_each_online_cpu(cpu) {
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ per_cpu(hist_irqsoff_counting,
++ cpu) = 0;
++#endif
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ per_cpu(hist_preemptoff_counting,
++ cpu) = 0;
++#endif
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ per_cpu(hist_preemptirqsoff_counting,
++ cpu) = 0;
++#endif
++ }
++ }
++ break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ case WAKEUP_LATENCY:
++ {
++ int cpu;
++
++ unregister_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_wakeup_new(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_switch(
++ probe_wakeup_latency_hist_stop, NULL);
++ unregister_trace_sched_migrate_task(
++ probe_sched_migrate_task, NULL);
++
++ for_each_online_cpu(cpu) {
++ per_cpu(wakeup_task, cpu) = NULL;
++ per_cpu(wakeup_sharedprio, cpu) = 0;
++ }
++ }
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ timerandwakeup_enabled_data.enabled = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ case MISSED_TIMER_OFFSETS:
++ unregister_trace_hrtimer_interrupt(
++ probe_hrtimer_interrupt, NULL);
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ timerandwakeup_enabled_data.enabled = 0;
++#endif
++ break;
++#endif
++ default:
++ break;
++ }
++ }
++ ed->enabled = enable;
++ return cnt;
++}
++
++static const struct file_operations latency_hist_reset_fops = {
++ .open = tracing_open_generic,
++ .write = latency_hist_reset,
++};
++
++static const struct file_operations enable_fops = {
++ .open = tracing_open_generic,
++ .read = show_enable,
++ .write = do_enable,
++};
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static const struct file_operations pid_fops = {
++ .open = tracing_open_generic,
++ .read = show_pid,
++ .write = do_pid,
++};
++
++static const struct file_operations maxlatproc_fops = {
++ .open = tracing_open_generic,
++ .read = show_maxlatproc,
++};
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++static notrace void probe_preemptirqsoff_hist(void *v, int reason,
++ int starthist)
++{
++ int cpu = raw_smp_processor_id();
++ int time_set = 0;
++
++ if (starthist) {
++ cycle_t uninitialized_var(start);
++
++ if (!preempt_count() && !irqs_disabled())
++ return;
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ if ((reason == IRQS_OFF || reason == TRACE_START) &&
++ !per_cpu(hist_irqsoff_counting, cpu)) {
++ per_cpu(hist_irqsoff_counting, cpu) = 1;
++ start = ftrace_now(cpu);
++ time_set++;
++ per_cpu(hist_irqsoff_start, cpu) = start;
++ }
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
++ !per_cpu(hist_preemptoff_counting, cpu)) {
++ per_cpu(hist_preemptoff_counting, cpu) = 1;
++ if (!(time_set++))
++ start = ftrace_now(cpu);
++ per_cpu(hist_preemptoff_start, cpu) = start;
++ }
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ if (per_cpu(hist_irqsoff_counting, cpu) &&
++ per_cpu(hist_preemptoff_counting, cpu) &&
++ !per_cpu(hist_preemptirqsoff_counting, cpu)) {
++ per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
++ if (!time_set)
++ start = ftrace_now(cpu);
++ per_cpu(hist_preemptirqsoff_start, cpu) = start;
++ }
++#endif
++ } else {
++ cycle_t uninitialized_var(stop);
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ if ((reason == IRQS_ON || reason == TRACE_STOP) &&
++ per_cpu(hist_irqsoff_counting, cpu)) {
++ cycle_t start = per_cpu(hist_irqsoff_start, cpu);
++
++ stop = ftrace_now(cpu);
++ time_set++;
++ if (start) {
++ long latency = ((long) (stop - start)) /
++ NSECS_PER_USECS;
++
++ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
++ stop, NULL);
++ }
++ per_cpu(hist_irqsoff_counting, cpu) = 0;
++ }
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
++ per_cpu(hist_preemptoff_counting, cpu)) {
++ cycle_t start = per_cpu(hist_preemptoff_start, cpu);
++
++ if (!(time_set++))
++ stop = ftrace_now(cpu);
++ if (start) {
++ long latency = ((long) (stop - start)) /
++ NSECS_PER_USECS;
++
++ latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
++ 0, stop, NULL);
++ }
++ per_cpu(hist_preemptoff_counting, cpu) = 0;
++ }
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ if ((!per_cpu(hist_irqsoff_counting, cpu) ||
++ !per_cpu(hist_preemptoff_counting, cpu)) &&
++ per_cpu(hist_preemptirqsoff_counting, cpu)) {
++ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
++
++ if (!time_set)
++ stop = ftrace_now(cpu);
++ if (start) {
++ long latency = ((long) (stop - start)) /
++ NSECS_PER_USECS;
++
++ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
++ latency, 0, stop, NULL);
++ }
++ per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
++ }
++#endif
++ }
++}
++#endif
++
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++static DEFINE_RAW_SPINLOCK(wakeup_lock);
++static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
++ int cpu)
++{
++ int old_cpu = task_cpu(task);
++
++ if (cpu != old_cpu) {
++ unsigned long flags;
++ struct task_struct *cpu_wakeup_task;
++
++ raw_spin_lock_irqsave(&wakeup_lock, flags);
++
++ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
++ if (task == cpu_wakeup_task) {
++ put_task_struct(cpu_wakeup_task);
++ per_cpu(wakeup_task, old_cpu) = NULL;
++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
++ get_task_struct(cpu_wakeup_task);
++ }
++
++ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
++ }
++}
++
++static notrace void probe_wakeup_latency_hist_start(void *v,
++ struct task_struct *p, int success)
++{
++ unsigned long flags;
++ struct task_struct *curr = current;
++ int cpu = task_cpu(p);
++ struct task_struct *cpu_wakeup_task;
++
++ raw_spin_lock_irqsave(&wakeup_lock, flags);
++
++ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
++
++ if (wakeup_pid) {
++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
++ p->prio == curr->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++ if (likely(wakeup_pid != task_pid_nr(p)))
++ goto out;
++ } else {
++ if (likely(!rt_task(p)) ||
++ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
++ p->prio > curr->prio)
++ goto out;
++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
++ p->prio == curr->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++ }
++
++ if (cpu_wakeup_task)
++ put_task_struct(cpu_wakeup_task);
++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
++ get_task_struct(cpu_wakeup_task);
++ cpu_wakeup_task->preempt_timestamp_hist =
++ ftrace_now(raw_smp_processor_id());
++out:
++ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
++}
++
++static notrace void probe_wakeup_latency_hist_stop(void *v,
++ struct task_struct *prev, struct task_struct *next)
++{
++ unsigned long flags;
++ int cpu = task_cpu(next);
++ long latency;
++ cycle_t stop;
++ struct task_struct *cpu_wakeup_task;
++
++ raw_spin_lock_irqsave(&wakeup_lock, flags);
++
++ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
++
++ if (cpu_wakeup_task == NULL)
++ goto out;
++
++ /* Already running? */
++ if (unlikely(current == cpu_wakeup_task))
++ goto out_reset;
++
++ if (next != cpu_wakeup_task) {
++ if (next->prio < cpu_wakeup_task->prio)
++ goto out_reset;
++
++ if (next->prio == cpu_wakeup_task->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++
++ goto out;
++ }
++
++ if (current->prio == cpu_wakeup_task->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++
++ /*
++ * The task we are waiting for is about to be switched to.
++ * Calculate latency and store it in histogram.
++ */
++ stop = ftrace_now(raw_smp_processor_id());
++
++ latency = ((long) (stop - next->preempt_timestamp_hist)) /
++ NSECS_PER_USECS;
++
++ if (per_cpu(wakeup_sharedprio, cpu)) {
++ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
++ next);
++ per_cpu(wakeup_sharedprio, cpu) = 0;
++ } else {
++ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ if (timerandwakeup_enabled_data.enabled) {
++ latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
++ next->timer_offset + latency, next->timer_offset,
++ stop, next);
++ }
++#endif
++ }
++
++out_reset:
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ next->timer_offset = 0;
++#endif
++ put_task_struct(cpu_wakeup_task);
++ per_cpu(wakeup_task, cpu) = NULL;
++out:
++ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
++}
++#endif
++
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++static notrace void probe_hrtimer_interrupt(void *v, int cpu,
++ long long latency_ns, struct task_struct *curr,
++ struct task_struct *task)
++{
++ if (latency_ns <= 0 && task != NULL && rt_task(task) &&
++ (task->prio < curr->prio ||
++ (task->prio == curr->prio &&
++ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
++ long latency;
++ cycle_t now;
++
++ if (missed_timer_offsets_pid) {
++ if (likely(missed_timer_offsets_pid !=
++ task_pid_nr(task)))
++ return;
++ }
++
++ now = ftrace_now(cpu);
++ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
++ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
++ task);
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ task->timer_offset = latency;
++#endif
++ }
++}
++#endif
++
++static __init int latency_hist_init(void)
++{
++ struct dentry *latency_hist_root = NULL;
++ struct dentry *dentry;
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ struct dentry *dentry_sharedprio;
++#endif
++ struct dentry *entry;
++ struct dentry *enable_root;
++ int i = 0;
++ struct hist_data *my_hist;
++ char name[64];
++ char *cpufmt = "CPU%d";
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ char *cpufmt_maxlatproc = "max_latency-CPU%d";
++ struct maxlatproc_data *mp = NULL;
++#endif
++
++ dentry = tracing_init_dentry();
++ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
++ enable_root = debugfs_create_dir("enable", latency_hist_root);
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(irqsoff_hist, i), &latency_hist_fops);
++ my_hist = &per_cpu(irqsoff_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++ }
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ dentry = debugfs_create_dir(preemptoff_hist_dir,
++ latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(preemptoff_hist, i), &latency_hist_fops);
++ my_hist = &per_cpu(preemptoff_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++ }
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
++ latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
++ my_hist = &per_cpu(preemptirqsoff_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++ }
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++ entry = debugfs_create_file("preemptirqsoff", 0644,
++ enable_root, (void *)&preemptirqsoff_enabled_data,
++ &enable_fops);
++#endif
++
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ dentry = debugfs_create_dir(wakeup_latency_hist_dir,
++ latency_hist_root);
++ dentry_sharedprio = debugfs_create_dir(
++ wakeup_latency_hist_dir_sharedprio, dentry);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(wakeup_latency_hist, i),
++ &latency_hist_fops);
++ my_hist = &per_cpu(wakeup_latency_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++
++ entry = debugfs_create_file(name, 0444, dentry_sharedprio,
++ &per_cpu(wakeup_latency_hist_sharedprio, i),
++ &latency_hist_fops);
++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++
++ sprintf(name, cpufmt_maxlatproc, i);
++
++ mp = &per_cpu(wakeup_maxlatproc, i);
++ entry = debugfs_create_file(name, 0444, dentry, mp,
++ &maxlatproc_fops);
++ clear_maxlatprocdata(mp);
++
++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
++ &maxlatproc_fops);
++ clear_maxlatprocdata(mp);
++ }
++ entry = debugfs_create_file("pid", 0644, dentry,
++ (void *)&wakeup_pid, &pid_fops);
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
++ entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
++ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
++ entry = debugfs_create_file("wakeup", 0644,
++ enable_root, (void *)&wakeup_latency_enabled_data,
++ &enable_fops);
++#endif
++
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ dentry = debugfs_create_dir(missed_timer_offsets_dir,
++ latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
++ my_hist = &per_cpu(missed_timer_offsets, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++
++ sprintf(name, cpufmt_maxlatproc, i);
++ mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
++ entry = debugfs_create_file(name, 0444, dentry, mp,
++ &maxlatproc_fops);
++ clear_maxlatprocdata(mp);
++ }
++ entry = debugfs_create_file("pid", 0644, dentry,
++ (void *)&missed_timer_offsets_pid, &pid_fops);
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
++ entry = debugfs_create_file("missed_timer_offsets", 0644,
++ enable_root, (void *)&missed_timer_offsets_enabled_data,
++ &enable_fops);
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
++ latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(timerandwakeup_latency_hist, i),
++ &latency_hist_fops);
++ my_hist = &per_cpu(timerandwakeup_latency_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++
++ sprintf(name, cpufmt_maxlatproc, i);
++ mp = &per_cpu(timerandwakeup_maxlatproc, i);
++ entry = debugfs_create_file(name, 0444, dentry, mp,
++ &maxlatproc_fops);
++ clear_maxlatprocdata(mp);
++ }
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
++ entry = debugfs_create_file("timerandwakeup", 0644,
++ enable_root, (void *)&timerandwakeup_enabled_data,
++ &enable_fops);
++#endif
++ return 0;
++}
++
++device_initcall(latency_hist_init);
+diff -Nur linux-3.18.8.orig/kernel/trace/Makefile linux-3.18.8/kernel/trace/Makefile
+--- linux-3.18.8.orig/kernel/trace/Makefile 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/trace/Makefile 2015-03-03 08:05:18.000000000 +0100
+@@ -36,6 +36,10 @@
+ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
+ obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
+ obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
++obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
++obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
++obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
++obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
+ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
+ obj-$(CONFIG_STACK_TRACER) += trace_stack.o
+ obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
+diff -Nur linux-3.18.8.orig/kernel/trace/trace.c linux-3.18.8/kernel/trace/trace.c
+--- linux-3.18.8.orig/kernel/trace/trace.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/trace/trace.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1579,6 +1579,7 @@
+ struct task_struct *tsk = current;
+
+ entry->preempt_count = pc & 0xff;
++ entry->preempt_lazy_count = preempt_lazy_count();
+ entry->pid = (tsk) ? tsk->pid : 0;
+ entry->flags =
+ #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+@@ -1588,8 +1589,11 @@
+ #endif
+ ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+ ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
++ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
++ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
+ (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
++
++ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
+ }
+ EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
+
+@@ -2509,14 +2513,17 @@
+
+ static void print_lat_help_header(struct seq_file *m)
+ {
+- seq_puts(m, "# _------=> CPU# \n");
+- seq_puts(m, "# / _-----=> irqs-off \n");
+- seq_puts(m, "# | / _----=> need-resched \n");
+- seq_puts(m, "# || / _---=> hardirq/softirq \n");
+- seq_puts(m, "# ||| / _--=> preempt-depth \n");
+- seq_puts(m, "# |||| / delay \n");
+- seq_puts(m, "# cmd pid ||||| time | caller \n");
+- seq_puts(m, "# \\ / ||||| \\ | / \n");
++ seq_puts(m, "# _--------=> CPU# \n");
++ seq_puts(m, "# / _-------=> irqs-off \n");
++ seq_puts(m, "# | / _------=> need-resched \n");
++ seq_puts(m, "# || / _-----=> need-resched_lazy \n");
++ seq_puts(m, "# ||| / _----=> hardirq/softirq \n");
++ seq_puts(m, "# |||| / _---=> preempt-depth \n");
++ seq_puts(m, "# ||||| / _--=> preempt-lazy-depth\n");
++ seq_puts(m, "# |||||| / _-=> migrate-disable \n");
++ seq_puts(m, "# ||||||| / delay \n");
++ seq_puts(m, "# cmd pid |||||||| time | caller \n");
++ seq_puts(m, "# \\ / |||||||| \\ | / \n");
+ }
+
+ static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
+@@ -2540,13 +2547,16 @@
+ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
+ {
+ print_event_info(buf, m);
+- seq_puts(m, "# _-----=> irqs-off\n");
+- seq_puts(m, "# / _----=> need-resched\n");
+- seq_puts(m, "# | / _---=> hardirq/softirq\n");
+- seq_puts(m, "# || / _--=> preempt-depth\n");
+- seq_puts(m, "# ||| / delay\n");
+- seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
+- seq_puts(m, "# | | | |||| | |\n");
++ seq_puts(m, "# _-------=> irqs-off \n");
++ seq_puts(m, "# / _------=> need-resched \n");
++ seq_puts(m, "# |/ _-----=> need-resched_lazy \n");
++ seq_puts(m, "# ||/ _----=> hardirq/softirq \n");
++ seq_puts(m, "# |||/ _---=> preempt-depth \n");
++ seq_puts(m, "# ||||/ _--=> preempt-lazy-depth\n");
++ seq_puts(m, "# ||||| / _-=> migrate-disable \n");
++ seq_puts(m, "# |||||| / delay\n");
++ seq_puts(m, "# TASK-PID CPU# |||||| TIMESTAMP FUNCTION\n");
++ seq_puts(m, "# | | | |||||| | |\n");
+ }
+
+ void
+diff -Nur linux-3.18.8.orig/kernel/trace/trace_events.c linux-3.18.8/kernel/trace/trace_events.c
+--- linux-3.18.8.orig/kernel/trace/trace_events.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/trace/trace_events.c 2015-03-03 08:05:18.000000000 +0100
+@@ -162,6 +162,8 @@
+ __common_field(unsigned char, flags);
+ __common_field(unsigned char, preempt_count);
+ __common_field(int, pid);
++ __common_field(unsigned short, migrate_disable);
++ __common_field(unsigned short, padding);
+
+ return ret;
+ }
+diff -Nur linux-3.18.8.orig/kernel/trace/trace.h linux-3.18.8/kernel/trace/trace.h
+--- linux-3.18.8.orig/kernel/trace/trace.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/trace/trace.h 2015-03-03 08:05:18.000000000 +0100
+@@ -119,6 +119,7 @@
+ * NEED_RESCHED - reschedule is requested
+ * HARDIRQ - inside an interrupt handler
+ * SOFTIRQ - inside a softirq handler
++ * NEED_RESCHED_LAZY - lazy reschedule is requested
+ */
+ enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 0x01,
+@@ -127,6 +128,7 @@
+ TRACE_FLAG_HARDIRQ = 0x08,
+ TRACE_FLAG_SOFTIRQ = 0x10,
+ TRACE_FLAG_PREEMPT_RESCHED = 0x20,
++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x40,
+ };
+
+ #define TRACE_BUF_SIZE 1024
+diff -Nur linux-3.18.8.orig/kernel/trace/trace_irqsoff.c linux-3.18.8/kernel/trace/trace_irqsoff.c
+--- linux-3.18.8.orig/kernel/trace/trace_irqsoff.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/trace/trace_irqsoff.c 2015-03-03 08:05:18.000000000 +0100
+@@ -17,6 +17,7 @@
+ #include <linux/fs.h>
+
+ #include "trace.h"
++#include <trace/events/hist.h>
+
+ static struct trace_array *irqsoff_trace __read_mostly;
+ static int tracer_enabled __read_mostly;
+@@ -435,11 +436,13 @@
+ {
+ if (preempt_trace() || irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++ trace_preemptirqsoff_hist(TRACE_START, 1);
+ }
+ EXPORT_SYMBOL_GPL(start_critical_timings);
+
+ void stop_critical_timings(void)
+ {
++ trace_preemptirqsoff_hist(TRACE_STOP, 0);
+ if (preempt_trace() || irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -449,6 +452,7 @@
+ #ifdef CONFIG_PROVE_LOCKING
+ void time_hardirqs_on(unsigned long a0, unsigned long a1)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(a0, a1);
+ }
+@@ -457,6 +461,7 @@
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(a0, a1);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+
+ #else /* !CONFIG_PROVE_LOCKING */
+@@ -482,6 +487,7 @@
+ */
+ void trace_hardirqs_on(void)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -491,11 +497,13 @@
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off);
+
+ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(CALLER_ADDR0, caller_addr);
+ }
+@@ -505,6 +513,7 @@
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(CALLER_ADDR0, caller_addr);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off_caller);
+
+@@ -514,12 +523,14 @@
+ #ifdef CONFIG_PREEMPT_TRACER
+ void trace_preempt_on(unsigned long a0, unsigned long a1)
+ {
++ trace_preemptirqsoff_hist(PREEMPT_ON, 0);
+ if (preempt_trace() && !irq_trace())
+ stop_critical_timing(a0, a1);
+ }
+
+ void trace_preempt_off(unsigned long a0, unsigned long a1)
+ {
++ trace_preemptirqsoff_hist(PREEMPT_ON, 1);
+ if (preempt_trace() && !irq_trace())
+ start_critical_timing(a0, a1);
+ }
+diff -Nur linux-3.18.8.orig/kernel/trace/trace_output.c linux-3.18.8/kernel/trace/trace_output.c
+--- linux-3.18.8.orig/kernel/trace/trace_output.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/trace/trace_output.c 2015-03-03 08:05:18.000000000 +0100
+@@ -410,6 +410,7 @@
+ {
+ char hardsoft_irq;
+ char need_resched;
++ char need_resched_lazy;
+ char irqs_off;
+ int hardirq;
+ int softirq;
+@@ -438,6 +439,8 @@
+ need_resched = '.';
+ break;
+ }
++ need_resched_lazy =
++ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
+
+ hardsoft_irq =
+ (hardirq && softirq) ? 'H' :
+@@ -445,8 +448,9 @@
+ softirq ? 's' :
+ '.';
+
+- if (!trace_seq_printf(s, "%c%c%c",
+- irqs_off, need_resched, hardsoft_irq))
++ if (!trace_seq_printf(s, "%c%c%c%c",
++ irqs_off, need_resched, need_resched_lazy,
++ hardsoft_irq))
+ return 0;
+
+ if (entry->preempt_count)
+@@ -454,6 +458,16 @@
+ else
+ ret = trace_seq_putc(s, '.');
+
++ if (entry->preempt_lazy_count)
++ ret = trace_seq_printf(s, "%x", entry->preempt_lazy_count);
++ else
++ ret = trace_seq_putc(s, '.');
++
++ if (entry->migrate_disable)
++ ret = trace_seq_printf(s, "%x", entry->migrate_disable);
++ else
++ ret = trace_seq_putc(s, '.');
++
+ return ret;
+ }
+
+diff -Nur linux-3.18.8.orig/kernel/user.c linux-3.18.8/kernel/user.c
+--- linux-3.18.8.orig/kernel/user.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/user.c 2015-03-03 08:05:18.000000000 +0100
+@@ -158,11 +158,11 @@
+ if (!up)
+ return;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
+ free_user(up, flags);
+ else
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ struct user_struct *alloc_uid(kuid_t uid)
+diff -Nur linux-3.18.8.orig/kernel/watchdog.c linux-3.18.8/kernel/watchdog.c
+--- linux-3.18.8.orig/kernel/watchdog.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/watchdog.c 2015-03-03 08:05:18.000000000 +0100
+@@ -248,6 +248,8 @@
+
+ #ifdef CONFIG_HARDLOCKUP_DETECTOR
+
++static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
++
+ static struct perf_event_attr wd_hw_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+@@ -281,13 +283,21 @@
+ /* only print hardlockups once */
+ if (__this_cpu_read(hard_watchdog_warn) == true)
+ return;
++ /*
++ * If early-printk is enabled then make sure we do not
++ * lock up in printk() and kill console logging:
++ */
++ printk_kill();
+
+- if (hardlockup_panic)
++ if (hardlockup_panic) {
+ panic("Watchdog detected hard LOCKUP on cpu %d",
+ this_cpu);
+- else
++ } else {
++ raw_spin_lock(&watchdog_output_lock);
+ WARN(1, "Watchdog detected hard LOCKUP on cpu %d",
+ this_cpu);
++ raw_spin_unlock(&watchdog_output_lock);
++ }
+
+ __this_cpu_write(hard_watchdog_warn, true);
+ return;
+@@ -430,6 +440,7 @@
+ /* kick off the timer for the hardlockup detector */
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer->function = watchdog_timer_fn;
++ hrtimer->irqsafe = 1;
+
+ /* Enable the perf event */
+ watchdog_nmi_enable(cpu);
+diff -Nur linux-3.18.8.orig/kernel/workqueue.c linux-3.18.8/kernel/workqueue.c
+--- linux-3.18.8.orig/kernel/workqueue.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/workqueue.c 2015-03-03 08:05:18.000000000 +0100
+@@ -48,6 +48,8 @@
+ #include <linux/nodemask.h>
+ #include <linux/moduleparam.h>
+ #include <linux/uaccess.h>
++#include <linux/locallock.h>
++#include <linux/delay.h>
+
+ #include "workqueue_internal.h"
+
+@@ -121,15 +123,20 @@
+ * cpu or grabbing pool->lock is enough for read access. If
+ * POOL_DISASSOCIATED is set, it's identical to L.
+ *
++ * On RT we need the extra protection via rt_lock_idle_list() for
++ * the list manipulations against read access from
++ * wq_worker_sleeping(). All other places are nicely serialized via
++ * pool->lock.
++ *
+ * A: pool->attach_mutex protected.
+ *
+ * PL: wq_pool_mutex protected.
+ *
+- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
++ * PR: wq_pool_mutex protected for writes. RCU protected for reads.
+ *
+ * WQ: wq->mutex protected.
+ *
+- * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
++ * WR: wq->mutex protected for writes. RCU protected for reads.
+ *
+ * MD: wq_mayday_lock protected.
+ */
+@@ -177,7 +184,7 @@
+ atomic_t nr_running ____cacheline_aligned_in_smp;
+
+ /*
+- * Destruction of pool is sched-RCU protected to allow dereferences
++ * Destruction of pool is RCU protected to allow dereferences
+ * from get_work_pool().
+ */
+ struct rcu_head rcu;
+@@ -206,7 +213,7 @@
+ /*
+ * Release of unbound pwq is punted to system_wq. See put_pwq()
+ * and pwq_unbound_release_workfn() for details. pool_workqueue
+- * itself is also sched-RCU protected so that the first pwq can be
++ * itself is also RCU protected so that the first pwq can be
+ * determined without grabbing wq->mutex.
+ */
+ struct work_struct unbound_release_work;
+@@ -321,6 +328,8 @@
+ struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
+ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
+
++static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
++
+ static int worker_thread(void *__worker);
+ static void copy_workqueue_attrs(struct workqueue_attrs *to,
+ const struct workqueue_attrs *from);
+@@ -329,14 +338,14 @@
+ #include <trace/events/workqueue.h>
+
+ #define assert_rcu_or_pool_mutex() \
+- rcu_lockdep_assert(rcu_read_lock_sched_held() || \
++ rcu_lockdep_assert(rcu_read_lock_held() || \
+ lockdep_is_held(&wq_pool_mutex), \
+- "sched RCU or wq_pool_mutex should be held")
++ "RCU or wq_pool_mutex should be held")
+
+ #define assert_rcu_or_wq_mutex(wq) \
+- rcu_lockdep_assert(rcu_read_lock_sched_held() || \
++ rcu_lockdep_assert(rcu_read_lock_held() || \
+ lockdep_is_held(&wq->mutex), \
+- "sched RCU or wq->mutex should be held")
++ "RCU or wq->mutex should be held")
+
+ #define for_each_cpu_worker_pool(pool, cpu) \
+ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
+@@ -348,7 +357,7 @@
+ * @pool: iteration cursor
+ * @pi: integer used for iteration
+ *
+- * This must be called either with wq_pool_mutex held or sched RCU read
++ * This must be called either with wq_pool_mutex held or RCU read
+ * locked. If the pool needs to be used beyond the locking in effect, the
+ * caller is responsible for guaranteeing that the pool stays online.
+ *
+@@ -380,7 +389,7 @@
+ * @pwq: iteration cursor
+ * @wq: the target workqueue
+ *
+- * This must be called either with wq->mutex held or sched RCU read locked.
++ * This must be called either with wq->mutex held or RCU read locked.
+ * If the pwq needs to be used beyond the locking in effect, the caller is
+ * responsible for guaranteeing that the pwq stays online.
+ *
+@@ -392,6 +401,31 @@
+ if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
+ else
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++static inline void rt_lock_idle_list(struct worker_pool *pool)
++{
++ preempt_disable();
++}
++static inline void rt_unlock_idle_list(struct worker_pool *pool)
++{
++ preempt_enable();
++}
++static inline void sched_lock_idle_list(struct worker_pool *pool) { }
++static inline void sched_unlock_idle_list(struct worker_pool *pool) { }
++#else
++static inline void rt_lock_idle_list(struct worker_pool *pool) { }
++static inline void rt_unlock_idle_list(struct worker_pool *pool) { }
++static inline void sched_lock_idle_list(struct worker_pool *pool)
++{
++ spin_lock_irq(&pool->lock);
++}
++static inline void sched_unlock_idle_list(struct worker_pool *pool)
++{
++ spin_unlock_irq(&pool->lock);
++}
++#endif
++
++
+ #ifdef CONFIG_DEBUG_OBJECTS_WORK
+
+ static struct debug_obj_descr work_debug_descr;
+@@ -542,7 +576,7 @@
+ * @wq: the target workqueue
+ * @node: the node ID
+ *
+- * This must be called either with pwq_lock held or sched RCU read locked.
++ * This must be called either with pwq_lock held or RCU read locked.
+ * If the pwq needs to be used beyond the locking in effect, the caller is
+ * responsible for guaranteeing that the pwq stays online.
+ *
+@@ -646,8 +680,8 @@
+ * @work: the work item of interest
+ *
+ * Pools are created and destroyed under wq_pool_mutex, and allows read
+- * access under sched-RCU read lock. As such, this function should be
+- * called under wq_pool_mutex or with preemption disabled.
++ * access under RCU read lock. As such, this function should be
++ * called under wq_pool_mutex or inside of a rcu_read_lock() region.
+ *
+ * All fields of the returned pool are accessible as long as the above
+ * mentioned locking is in effect. If the returned pool needs to be used
+@@ -784,51 +818,44 @@
+ */
+ static void wake_up_worker(struct worker_pool *pool)
+ {
+- struct worker *worker = first_idle_worker(pool);
++ struct worker *worker;
++
++ rt_lock_idle_list(pool);
++
++ worker = first_idle_worker(pool);
+
+ if (likely(worker))
+ wake_up_process(worker->task);
++
++ rt_unlock_idle_list(pool);
+ }
+
+ /**
+- * wq_worker_waking_up - a worker is waking up
+- * @task: task waking up
+- * @cpu: CPU @task is waking up to
+- *
+- * This function is called during try_to_wake_up() when a worker is
+- * being awoken.
++ * wq_worker_running - a worker is running again
++ * @task: task returning from sleep
+ *
+- * CONTEXT:
+- * spin_lock_irq(rq->lock)
++ * This function is called when a worker returns from schedule()
+ */
+-void wq_worker_waking_up(struct task_struct *task, int cpu)
++void wq_worker_running(struct task_struct *task)
+ {
+ struct worker *worker = kthread_data(task);
+
+- if (!(worker->flags & WORKER_NOT_RUNNING)) {
+- WARN_ON_ONCE(worker->pool->cpu != cpu);
++ if (!worker->sleeping)
++ return;
++ if (!(worker->flags & WORKER_NOT_RUNNING))
+ atomic_inc(&worker->pool->nr_running);
+- }
++ worker->sleeping = 0;
+ }
+
+ /**
+ * wq_worker_sleeping - a worker is going to sleep
+ * @task: task going to sleep
+- * @cpu: CPU in question, must be the current CPU number
+- *
+- * This function is called during schedule() when a busy worker is
+- * going to sleep. Worker on the same cpu can be woken up by
+- * returning pointer to its task.
+- *
+- * CONTEXT:
+- * spin_lock_irq(rq->lock)
+- *
+- * Return:
+- * Worker task on @cpu to wake up, %NULL if none.
++ * This function is called from schedule() when a busy worker is
++ * going to sleep.
+ */
+-struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
++void wq_worker_sleeping(struct task_struct *task)
+ {
+- struct worker *worker = kthread_data(task), *to_wakeup = NULL;
++ struct worker *worker = kthread_data(task);
+ struct worker_pool *pool;
+
+ /*
+@@ -837,29 +864,26 @@
+ * checking NOT_RUNNING.
+ */
+ if (worker->flags & WORKER_NOT_RUNNING)
+- return NULL;
++ return;
+
+ pool = worker->pool;
+
+- /* this can only happen on the local cpu */
+- if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu))
+- return NULL;
++ if (WARN_ON_ONCE(worker->sleeping))
++ return;
++
++ worker->sleeping = 1;
+
+ /*
+ * The counterpart of the following dec_and_test, implied mb,
+ * worklist not empty test sequence is in insert_work().
+ * Please read comment there.
+- *
+- * NOT_RUNNING is clear. This means that we're bound to and
+- * running on the local cpu w/ rq lock held and preemption
+- * disabled, which in turn means that none else could be
+- * manipulating idle_list, so dereferencing idle_list without pool
+- * lock is safe.
+ */
+ if (atomic_dec_and_test(&pool->nr_running) &&
+- !list_empty(&pool->worklist))
+- to_wakeup = first_idle_worker(pool);
+- return to_wakeup ? to_wakeup->task : NULL;
++ !list_empty(&pool->worklist)) {
++ sched_lock_idle_list(pool);
++ wake_up_worker(pool);
++ sched_unlock_idle_list(pool);
++ }
+ }
+
+ /**
+@@ -1053,12 +1077,12 @@
+ {
+ if (pwq) {
+ /*
+- * As both pwqs and pools are sched-RCU protected, the
++ * As both pwqs and pools are RCU protected, the
+ * following lock operations are safe.
+ */
+- spin_lock_irq(&pwq->pool->lock);
++ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock);
+ put_pwq(pwq);
+- spin_unlock_irq(&pwq->pool->lock);
++ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock);
+ }
+ }
+
+@@ -1160,7 +1184,7 @@
+ struct worker_pool *pool;
+ struct pool_workqueue *pwq;
+
+- local_irq_save(*flags);
++ local_lock_irqsave(pendingb_lock, *flags);
+
+ /* try to steal the timer if it exists */
+ if (is_dwork) {
+@@ -1179,6 +1203,7 @@
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
+ return 0;
+
++ rcu_read_lock();
+ /*
+ * The queueing is in progress, or it is already queued. Try to
+ * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
+@@ -1217,14 +1242,16 @@
+ set_work_pool_and_keep_pending(work, pool->id);
+
+ spin_unlock(&pool->lock);
++ rcu_read_unlock();
+ return 1;
+ }
+ spin_unlock(&pool->lock);
+ fail:
+- local_irq_restore(*flags);
++ rcu_read_unlock();
++ local_unlock_irqrestore(pendingb_lock, *flags);
+ if (work_is_canceling(work))
+ return -ENOENT;
+- cpu_relax();
++ cpu_chill();
+ return -EAGAIN;
+ }
+
+@@ -1293,7 +1320,7 @@
+ * queued or lose PENDING. Grabbing PENDING and queueing should
+ * happen with IRQ disabled.
+ */
+- WARN_ON_ONCE(!irqs_disabled());
++ WARN_ON_ONCE_NONRT(!irqs_disabled());
+
+ debug_work_activate(work);
+
+@@ -1301,6 +1328,8 @@
+ if (unlikely(wq->flags & __WQ_DRAINING) &&
+ WARN_ON_ONCE(!is_chained_work(wq)))
+ return;
++
++ rcu_read_lock();
+ retry:
+ if (req_cpu == WORK_CPU_UNBOUND)
+ cpu = raw_smp_processor_id();
+@@ -1357,10 +1386,8 @@
+ /* pwq determined, queue */
+ trace_workqueue_queue_work(req_cpu, pwq, work);
+
+- if (WARN_ON(!list_empty(&work->entry))) {
+- spin_unlock(&pwq->pool->lock);
+- return;
+- }
++ if (WARN_ON(!list_empty(&work->entry)))
++ goto out;
+
+ pwq->nr_in_flight[pwq->work_color]++;
+ work_flags = work_color_to_flags(pwq->work_color);
+@@ -1376,7 +1403,9 @@
+
+ insert_work(pwq, work, worklist, work_flags);
+
++out:
+ spin_unlock(&pwq->pool->lock);
++ rcu_read_unlock();
+ }
+
+ /**
+@@ -1396,14 +1425,14 @@
+ bool ret = false;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pendingb_lock,flags);
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ __queue_work(cpu, wq, work);
+ ret = true;
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(queue_work_on);
+@@ -1470,14 +1499,14 @@
+ unsigned long flags;
+
+ /* read the comment in __queue_work() */
+- local_irq_save(flags);
++ local_lock_irqsave(pendingb_lock, flags);
+
+ if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
+ __queue_delayed_work(cpu, wq, dwork, delay);
+ ret = true;
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(queue_delayed_work_on);
+@@ -1512,7 +1541,7 @@
+
+ if (likely(ret >= 0)) {
+ __queue_delayed_work(cpu, wq, dwork, delay);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+ }
+
+ /* -ENOENT from try_to_grab_pending() becomes %true */
+@@ -1545,7 +1574,9 @@
+ worker->last_active = jiffies;
+
+ /* idle_list is LIFO */
++ rt_lock_idle_list(pool);
+ list_add(&worker->entry, &pool->idle_list);
++ rt_unlock_idle_list(pool);
+
+ if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
+ mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
+@@ -1578,7 +1609,9 @@
+ return;
+ worker_clr_flags(worker, WORKER_IDLE);
+ pool->nr_idle--;
++ rt_lock_idle_list(pool);
+ list_del_init(&worker->entry);
++ rt_unlock_idle_list(pool);
+ }
+
+ static struct worker *alloc_worker(int node)
+@@ -1746,7 +1779,9 @@
+ pool->nr_workers--;
+ pool->nr_idle--;
+
++ rt_lock_idle_list(pool);
+ list_del_init(&worker->entry);
++ rt_unlock_idle_list(pool);
+ worker->flags |= WORKER_DIE;
+ wake_up_process(worker->task);
+ }
+@@ -2641,14 +2676,14 @@
+
+ might_sleep();
+
+- local_irq_disable();
++ rcu_read_lock();
+ pool = get_work_pool(work);
+ if (!pool) {
+- local_irq_enable();
++ rcu_read_unlock();
+ return false;
+ }
+
+- spin_lock(&pool->lock);
++ spin_lock_irq(&pool->lock);
+ /* see the comment in try_to_grab_pending() with the same code */
+ pwq = get_work_pwq(work);
+ if (pwq) {
+@@ -2675,10 +2710,11 @@
+ else
+ lock_map_acquire_read(&pwq->wq->lockdep_map);
+ lock_map_release(&pwq->wq->lockdep_map);
+-
++ rcu_read_unlock();
+ return true;
+ already_gone:
+ spin_unlock_irq(&pool->lock);
++ rcu_read_unlock();
+ return false;
+ }
+
+@@ -2727,7 +2763,7 @@
+
+ /* tell other tasks trying to grab @work to back off */
+ mark_work_canceling(work);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+
+ flush_work(work);
+ clear_work_data(work);
+@@ -2772,10 +2808,10 @@
+ */
+ bool flush_delayed_work(struct delayed_work *dwork)
+ {
+- local_irq_disable();
++ local_lock_irq(pendingb_lock);
+ if (del_timer_sync(&dwork->timer))
+ __queue_work(dwork->cpu, dwork->wq, &dwork->work);
+- local_irq_enable();
++ local_unlock_irq(pendingb_lock);
+ return flush_work(&dwork->work);
+ }
+ EXPORT_SYMBOL(flush_delayed_work);
+@@ -2810,7 +2846,7 @@
+
+ set_work_pool_and_clear_pending(&dwork->work,
+ get_work_pool_id(&dwork->work));
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pendingb_lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL(cancel_delayed_work);
+@@ -2996,7 +3032,8 @@
+ const char *delim = "";
+ int node, written = 0;
+
+- rcu_read_lock_sched();
++ get_online_cpus();
++ rcu_read_lock();
+ for_each_node(node) {
+ written += scnprintf(buf + written, PAGE_SIZE - written,
+ "%s%d:%d", delim, node,
+@@ -3004,7 +3041,8 @@
+ delim = " ";
+ }
+ written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
+- rcu_read_unlock_sched();
++ rcu_read_unlock();
++ put_online_cpus();
+
+ return written;
+ }
+@@ -3372,7 +3410,7 @@
+ * put_unbound_pool - put a worker_pool
+ * @pool: worker_pool to put
+ *
+- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
++ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
+ * safe manner. get_unbound_pool() calls this function on its failure path
+ * and this function should be able to release pools which went through,
+ * successfully or not, init_worker_pool().
+@@ -3426,8 +3464,8 @@
+ del_timer_sync(&pool->idle_timer);
+ del_timer_sync(&pool->mayday_timer);
+
+- /* sched-RCU protected to allow dereferences from get_work_pool() */
+- call_rcu_sched(&pool->rcu, rcu_free_pool);
++ /* RCU protected to allow dereferences from get_work_pool() */
++ call_rcu(&pool->rcu, rcu_free_pool);
+ }
+
+ /**
+@@ -3532,7 +3570,7 @@
+ put_unbound_pool(pool);
+ mutex_unlock(&wq_pool_mutex);
+
+- call_rcu_sched(&pwq->rcu, rcu_free_pwq);
++ call_rcu(&pwq->rcu, rcu_free_pwq);
+
+ /*
+ * If we're the last pwq going away, @wq is already dead and no one
+@@ -4244,7 +4282,8 @@
+ struct pool_workqueue *pwq;
+ bool ret;
+
+- rcu_read_lock_sched();
++ rcu_read_lock();
++ preempt_disable();
+
+ if (cpu == WORK_CPU_UNBOUND)
+ cpu = smp_processor_id();
+@@ -4255,7 +4294,8 @@
+ pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
+
+ ret = !list_empty(&pwq->delayed_works);
+- rcu_read_unlock_sched();
++ preempt_enable();
++ rcu_read_unlock();
+
+ return ret;
+ }
+@@ -4281,16 +4321,15 @@
+ if (work_pending(work))
+ ret |= WORK_BUSY_PENDING;
+
+- local_irq_save(flags);
++ rcu_read_lock();
+ pool = get_work_pool(work);
+ if (pool) {
+- spin_lock(&pool->lock);
++ spin_lock_irqsave(&pool->lock, flags);
+ if (find_worker_executing_work(pool, work))
+ ret |= WORK_BUSY_RUNNING;
+- spin_unlock(&pool->lock);
++ spin_unlock_irqrestore(&pool->lock, flags);
+ }
+- local_irq_restore(flags);
+-
++ rcu_read_unlock();
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(work_busy);
+@@ -4719,16 +4758,16 @@
+ * nr_active is monotonically decreasing. It's safe
+ * to peek without lock.
+ */
+- rcu_read_lock_sched();
++ rcu_read_lock();
+ for_each_pwq(pwq, wq) {
+ WARN_ON_ONCE(pwq->nr_active < 0);
+ if (pwq->nr_active) {
+ busy = true;
+- rcu_read_unlock_sched();
++ rcu_read_unlock();
+ goto out_unlock;
+ }
+ }
+- rcu_read_unlock_sched();
++ rcu_read_unlock();
+ }
+ out_unlock:
+ mutex_unlock(&wq_pool_mutex);
+diff -Nur linux-3.18.8.orig/kernel/workqueue_internal.h linux-3.18.8/kernel/workqueue_internal.h
+--- linux-3.18.8.orig/kernel/workqueue_internal.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/kernel/workqueue_internal.h 2015-03-03 08:05:18.000000000 +0100
+@@ -43,6 +43,7 @@
+ unsigned long last_active; /* L: last active timestamp */
+ unsigned int flags; /* X: flags */
+ int id; /* I: worker id */
++ int sleeping; /* None */
+
+ /*
+ * Opaque string set with work_set_desc(). Printed out with task
+@@ -68,7 +69,7 @@
+ * Scheduler hooks for concurrency managed workqueue. Only to be used from
+ * sched/core.c and workqueue.c.
+ */
+-void wq_worker_waking_up(struct task_struct *task, int cpu);
+-struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu);
++void wq_worker_running(struct task_struct *task);
++void wq_worker_sleeping(struct task_struct *task);
+
+ #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
+diff -Nur linux-3.18.8.orig/lib/debugobjects.c linux-3.18.8/lib/debugobjects.c
+--- linux-3.18.8.orig/lib/debugobjects.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/lib/debugobjects.c 2015-03-03 08:05:18.000000000 +0100
+@@ -309,7 +309,10 @@
+ struct debug_obj *obj;
+ unsigned long flags;
+
+- fill_pool();
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (preempt_count() == 0 && !irqs_disabled())
++#endif
++ fill_pool();
+
+ db = get_bucket((unsigned long) addr);
+
+diff -Nur linux-3.18.8.orig/lib/idr.c linux-3.18.8/lib/idr.c
+--- linux-3.18.8.orig/lib/idr.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/lib/idr.c 2015-03-03 08:05:18.000000000 +0100
+@@ -31,6 +31,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/percpu.h>
+ #include <linux/hardirq.h>
++#include <linux/locallock.h>
+
+ #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
+ #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
+@@ -367,6 +368,35 @@
+ idr_mark_full(pa, id);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
++
++static inline void idr_preload_lock(void)
++{
++ local_lock(idr_lock);
++}
++
++static inline void idr_preload_unlock(void)
++{
++ local_unlock(idr_lock);
++}
++
++void idr_preload_end(void)
++{
++ idr_preload_unlock();
++}
++EXPORT_SYMBOL(idr_preload_end);
++#else
++static inline void idr_preload_lock(void)
++{
++ preempt_disable();
++}
++
++static inline void idr_preload_unlock(void)
++{
++ preempt_enable();
++}
++#endif
+
+ /**
+ * idr_preload - preload for idr_alloc()
+@@ -402,7 +432,7 @@
+ WARN_ON_ONCE(in_interrupt());
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+- preempt_disable();
++ idr_preload_lock();
+
+ /*
+ * idr_alloc() is likely to succeed w/o full idr_layer buffer and
+@@ -414,9 +444,9 @@
+ while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
+ struct idr_layer *new;
+
+- preempt_enable();
++ idr_preload_unlock();
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
+- preempt_disable();
++ idr_preload_lock();
+ if (!new)
+ break;
+
+diff -Nur linux-3.18.8.orig/lib/Kconfig linux-3.18.8/lib/Kconfig
+--- linux-3.18.8.orig/lib/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/lib/Kconfig 2015-03-03 08:05:18.000000000 +0100
+@@ -383,6 +383,7 @@
+
+ config CPUMASK_OFFSTACK
+ bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
++ depends on !PREEMPT_RT_FULL
+ help
+ Use dynamic allocation for cpumask_var_t, instead of putting
+ them on the stack. This is a bit more expensive, but avoids
+diff -Nur linux-3.18.8.orig/lib/Kconfig.debug linux-3.18.8/lib/Kconfig.debug
+--- linux-3.18.8.orig/lib/Kconfig.debug 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/lib/Kconfig.debug 2015-03-03 08:05:18.000000000 +0100
+@@ -639,7 +639,7 @@
+
+ config DEBUG_SHIRQ
+ bool "Debug shared IRQ handlers"
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && !PREEMPT_RT_BASE
+ help
+ Enable this to generate a spurious interrupt as soon as a shared
+ interrupt handler is registered, and just before one is deregistered.
+diff -Nur linux-3.18.8.orig/lib/locking-selftest.c linux-3.18.8/lib/locking-selftest.c
+--- linux-3.18.8.orig/lib/locking-selftest.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/lib/locking-selftest.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1858,6 +1858,7 @@
+
+ printk(" --------------------------------------------------------------------------\n");
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * irq-context testcases:
+ */
+@@ -1870,6 +1871,28 @@
+
+ DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
+ // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
++#else
++ /* On -rt, we only do hardirq context test for raw spinlock */
++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
++
++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
++
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
++
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
++#endif
+
+ ww_tests();
+
+diff -Nur linux-3.18.8.orig/lib/percpu_ida.c linux-3.18.8/lib/percpu_ida.c
+--- linux-3.18.8.orig/lib/percpu_ida.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/lib/percpu_ida.c 2015-03-03 08:05:18.000000000 +0100
+@@ -29,6 +29,9 @@
+ #include <linux/string.h>
+ #include <linux/spinlock.h>
+ #include <linux/percpu_ida.h>
++#include <linux/locallock.h>
++
++static DEFINE_LOCAL_IRQ_LOCK(irq_off_lock);
+
+ struct percpu_ida_cpu {
+ /*
+@@ -151,13 +154,13 @@
+ unsigned long flags;
+ int tag;
+
+- local_irq_save(flags);
++ local_lock_irqsave(irq_off_lock, flags);
+ tags = this_cpu_ptr(pool->tag_cpu);
+
+ /* Fastpath */
+ tag = alloc_local_tag(tags);
+ if (likely(tag >= 0)) {
+- local_irq_restore(flags);
++ local_unlock_irqrestore(irq_off_lock, flags);
+ return tag;
+ }
+
+@@ -176,6 +179,7 @@
+
+ if (!tags->nr_free)
+ alloc_global_tags(pool, tags);
++
+ if (!tags->nr_free)
+ steal_tags(pool, tags);
+
+@@ -187,7 +191,7 @@
+ }
+
+ spin_unlock(&pool->lock);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(irq_off_lock, flags);
+
+ if (tag >= 0 || state == TASK_RUNNING)
+ break;
+@@ -199,7 +203,7 @@
+
+ schedule();
+
+- local_irq_save(flags);
++ local_lock_irqsave(irq_off_lock, flags);
+ tags = this_cpu_ptr(pool->tag_cpu);
+ }
+ if (state != TASK_RUNNING)
+@@ -224,7 +228,7 @@
+
+ BUG_ON(tag >= pool->nr_tags);
+
+- local_irq_save(flags);
++ local_lock_irqsave(irq_off_lock, flags);
+ tags = this_cpu_ptr(pool->tag_cpu);
+
+ spin_lock(&tags->lock);
+@@ -256,7 +260,7 @@
+ spin_unlock(&pool->lock);
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(irq_off_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(percpu_ida_free);
+
+@@ -348,7 +352,7 @@
+ struct percpu_ida_cpu *remote;
+ unsigned cpu, i, err = 0;
+
+- local_irq_save(flags);
++ local_lock_irqsave(irq_off_lock, flags);
+ for_each_possible_cpu(cpu) {
+ remote = per_cpu_ptr(pool->tag_cpu, cpu);
+ spin_lock(&remote->lock);
+@@ -370,7 +374,7 @@
+ }
+ spin_unlock(&pool->lock);
+ out:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(irq_off_lock, flags);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
+diff -Nur linux-3.18.8.orig/lib/radix-tree.c linux-3.18.8/lib/radix-tree.c
+--- linux-3.18.8.orig/lib/radix-tree.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/lib/radix-tree.c 2015-03-03 08:05:18.000000000 +0100
+@@ -195,12 +195,13 @@
+ * succeed in getting a node here (and never reach
+ * kmem_cache_alloc)
+ */
+- rtp = this_cpu_ptr(&radix_tree_preloads);
++ rtp = &get_cpu_var(radix_tree_preloads);
+ if (rtp->nr) {
+ ret = rtp->nodes[rtp->nr - 1];
+ rtp->nodes[rtp->nr - 1] = NULL;
+ rtp->nr--;
+ }
++ put_cpu_var(radix_tree_preloads);
+ /*
+ * Update the allocation stack trace as this is more useful
+ * for debugging.
+@@ -240,6 +241,7 @@
+ call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Load up this CPU's radix_tree_node buffer with sufficient objects to
+ * ensure that the addition of a single element in the tree cannot fail. On
+@@ -305,6 +307,7 @@
+ return 0;
+ }
+ EXPORT_SYMBOL(radix_tree_maybe_preload);
++#endif
+
+ /*
+ * Return the maximum key which can be store into a
+diff -Nur linux-3.18.8.orig/lib/scatterlist.c linux-3.18.8/lib/scatterlist.c
+--- linux-3.18.8.orig/lib/scatterlist.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/lib/scatterlist.c 2015-03-03 08:05:18.000000000 +0100
+@@ -592,7 +592,7 @@
+ flush_kernel_dcache_page(miter->page);
+
+ if (miter->__flags & SG_MITER_ATOMIC) {
+- WARN_ON_ONCE(preemptible());
++ WARN_ON_ONCE(!pagefault_disabled());
+ kunmap_atomic(miter->addr);
+ } else
+ kunmap(miter->page);
+@@ -637,7 +637,7 @@
+ if (!sg_miter_skip(&miter, skip))
+ return false;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ while (sg_miter_next(&miter) && offset < buflen) {
+ unsigned int len;
+@@ -654,7 +654,7 @@
+
+ sg_miter_stop(&miter);
+
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ return offset;
+ }
+
+diff -Nur linux-3.18.8.orig/lib/smp_processor_id.c linux-3.18.8/lib/smp_processor_id.c
+--- linux-3.18.8.orig/lib/smp_processor_id.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/lib/smp_processor_id.c 2015-03-03 08:05:18.000000000 +0100
+@@ -39,8 +39,9 @@
+ if (!printk_ratelimit())
+ goto out_enable;
+
+- printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
+- what1, what2, preempt_count() - 1, current->comm, current->pid);
++ printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n",
++ what1, what2, preempt_count() - 1, __migrate_disabled(current),
++ current->comm, current->pid);
+
+ print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+ dump_stack();
+diff -Nur linux-3.18.8.orig/mm/filemap.c linux-3.18.8/mm/filemap.c
+--- linux-3.18.8.orig/mm/filemap.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/filemap.c 2015-03-03 08:05:18.000000000 +0100
+@@ -168,7 +168,9 @@
+ if (!workingset_node_pages(node) &&
+ list_empty(&node->private_list)) {
+ node->private_data = mapping;
+- list_lru_add(&workingset_shadow_nodes, &node->private_list);
++ local_lock(workingset_shadow_lock);
++ list_lru_add(&__workingset_shadow_nodes, &node->private_list);
++ local_unlock(workingset_shadow_lock);
+ }
+ }
+
+@@ -535,9 +537,12 @@
+ * node->private_list is protected by
+ * mapping->tree_lock.
+ */
+- if (!list_empty(&node->private_list))
+- list_lru_del(&workingset_shadow_nodes,
++ if (!list_empty(&node->private_list)) {
++ local_lock(workingset_shadow_lock);
++ list_lru_del(&__workingset_shadow_nodes,
+ &node->private_list);
++ local_unlock(workingset_shadow_lock);
++ }
+ }
+ return 0;
+ }
+diff -Nur linux-3.18.8.orig/mm/highmem.c linux-3.18.8/mm/highmem.c
+--- linux-3.18.8.orig/mm/highmem.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/highmem.c 2015-03-03 08:05:18.000000000 +0100
+@@ -29,10 +29,11 @@
+ #include <linux/kgdb.h>
+ #include <asm/tlbflush.h>
+
+-
++#ifndef CONFIG_PREEMPT_RT_FULL
+ #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+ DEFINE_PER_CPU(int, __kmap_atomic_idx);
+ #endif
++#endif
+
+ /*
+ * Virtual_count is not a pure "count".
+@@ -107,8 +108,9 @@
+ unsigned long totalhigh_pages __read_mostly;
+ EXPORT_SYMBOL(totalhigh_pages);
+
+-
++#ifndef CONFIG_PREEMPT_RT_FULL
+ EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
++#endif
+
+ unsigned int nr_free_highpages (void)
+ {
+diff -Nur linux-3.18.8.orig/mm/Kconfig linux-3.18.8/mm/Kconfig
+--- linux-3.18.8.orig/mm/Kconfig 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/Kconfig 2015-03-03 08:05:18.000000000 +0100
+@@ -408,7 +408,7 @@
+
+ config TRANSPARENT_HUGEPAGE
+ bool "Transparent Hugepage Support"
+- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
++ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL
+ select COMPACTION
+ help
+ Transparent Hugepages allows the kernel to use huge pages and
+diff -Nur linux-3.18.8.orig/mm/memcontrol.c linux-3.18.8/mm/memcontrol.c
+--- linux-3.18.8.orig/mm/memcontrol.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/memcontrol.c 2015-03-03 08:05:18.000000000 +0100
+@@ -60,6 +60,8 @@
+ #include <net/sock.h>
+ #include <net/ip.h>
+ #include <net/tcp_memcontrol.h>
++#include <linux/locallock.h>
++
+ #include "slab.h"
+
+ #include <asm/uaccess.h>
+@@ -87,6 +89,7 @@
+ #define do_swap_account 0
+ #endif
+
++static DEFINE_LOCAL_IRQ_LOCK(event_lock);
+
+ static const char * const mem_cgroup_stat_names[] = {
+ "cache",
+@@ -2376,14 +2379,17 @@
+ */
+ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ {
+- struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
++ struct memcg_stock_pcp *stock;
++ int cpu = get_cpu_light();
++
++ stock = &per_cpu(memcg_stock, cpu);
+
+ if (stock->cached != memcg) { /* reset if necessary */
+ drain_stock(stock);
+ stock->cached = memcg;
+ }
+ stock->nr_pages += nr_pages;
+- put_cpu_var(memcg_stock);
++ put_cpu_light();
+ }
+
+ /*
+@@ -2397,7 +2403,7 @@
+
+ /* Notify other cpus that system-wide "drain" is running */
+ get_online_cpus();
+- curcpu = get_cpu();
++ curcpu = get_cpu_light();
+ for_each_online_cpu(cpu) {
+ struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
+ struct mem_cgroup *memcg;
+@@ -2414,7 +2420,7 @@
+ schedule_work_on(cpu, &stock->work);
+ }
+ }
+- put_cpu();
++ put_cpu_light();
+
+ if (!sync)
+ goto out;
+@@ -3419,12 +3425,12 @@
+ move_unlock_mem_cgroup(from, &flags);
+ ret = 0;
+
+- local_irq_disable();
++ local_lock_irq(event_lock);
+ mem_cgroup_charge_statistics(to, page, nr_pages);
+ memcg_check_events(to, page);
+ mem_cgroup_charge_statistics(from, page, -nr_pages);
+ memcg_check_events(from, page);
+- local_irq_enable();
++ local_unlock_irq(event_lock);
+ out_unlock:
+ unlock_page(page);
+ out:
+@@ -6406,10 +6412,10 @@
+ VM_BUG_ON_PAGE(!PageTransHuge(page), page);
+ }
+
+- local_irq_disable();
++ local_lock_irq(event_lock);
+ mem_cgroup_charge_statistics(memcg, page, nr_pages);
+ memcg_check_events(memcg, page);
+- local_irq_enable();
++ local_unlock_irq(event_lock);
+
+ if (do_swap_account && PageSwapCache(page)) {
+ swp_entry_t entry = { .val = page_private(page) };
+@@ -6468,14 +6474,14 @@
+ memcg_oom_recover(memcg);
+ }
+
+- local_irq_save(flags);
++ local_lock_irqsave(event_lock, flags);
+ __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
+ __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
+ __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
+ __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
+ __this_cpu_add(memcg->stat->nr_page_events, nr_anon + nr_file);
+ memcg_check_events(memcg, dummy_page);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(event_lock, flags);
+ }
+
+ static void uncharge_list(struct list_head *page_list)
+diff -Nur linux-3.18.8.orig/mm/memory.c linux-3.18.8/mm/memory.c
+--- linux-3.18.8.orig/mm/memory.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/memory.c 2015-03-03 08:05:18.000000000 +0100
+@@ -3258,6 +3258,32 @@
+ return 0;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++void pagefault_disable(void)
++{
++ migrate_disable();
++ current->pagefault_disabled++;
++ /*
++ * make sure to have issued the store before a pagefault
++ * can hit.
++ */
++ barrier();
++}
++EXPORT_SYMBOL(pagefault_disable);
++
++void pagefault_enable(void)
++{
++ /*
++ * make sure to issue those last loads/stores before enabling
++ * the pagefault handler again.
++ */
++ barrier();
++ current->pagefault_disabled--;
++ migrate_enable();
++}
++EXPORT_SYMBOL(pagefault_enable);
++#endif
++
+ /*
+ * By the time we get here, we already hold the mm semaphore
+ *
+diff -Nur linux-3.18.8.orig/mm/mmu_context.c linux-3.18.8/mm/mmu_context.c
+--- linux-3.18.8.orig/mm/mmu_context.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/mmu_context.c 2015-03-03 08:05:18.000000000 +0100
+@@ -23,6 +23,7 @@
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
++ preempt_disable_rt();
+ active_mm = tsk->active_mm;
+ if (active_mm != mm) {
+ atomic_inc(&mm->mm_count);
+@@ -30,6 +31,7 @@
+ }
+ tsk->mm = mm;
+ switch_mm(active_mm, mm, tsk);
++ preempt_enable_rt();
+ task_unlock(tsk);
+ #ifdef finish_arch_post_lock_switch
+ finish_arch_post_lock_switch();
+diff -Nur linux-3.18.8.orig/mm/page_alloc.c linux-3.18.8/mm/page_alloc.c
+--- linux-3.18.8.orig/mm/page_alloc.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/page_alloc.c 2015-03-03 08:05:18.000000000 +0100
+@@ -59,6 +59,7 @@
+ #include <linux/page-debug-flags.h>
+ #include <linux/hugetlb.h>
+ #include <linux/sched/rt.h>
++#include <linux/locallock.h>
+
+ #include <asm/sections.h>
+ #include <asm/tlbflush.h>
+@@ -230,6 +231,18 @@
+ EXPORT_SYMBOL(nr_online_nodes);
+ #endif
+
++static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define cpu_lock_irqsave(cpu, flags) \
++ local_lock_irqsave_on(pa_lock, flags, cpu)
++# define cpu_unlock_irqrestore(cpu, flags) \
++ local_unlock_irqrestore_on(pa_lock, flags, cpu)
++#else
++# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
++# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
++#endif
++
+ int page_group_by_mobility_disabled __read_mostly;
+
+ void set_pageblock_migratetype(struct page *page, int migratetype)
+@@ -654,7 +667,7 @@
+ }
+
+ /*
+- * Frees a number of pages from the PCP lists
++ * Frees a number of pages which have been collected from the pcp lists.
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free.
+ *
+@@ -665,18 +678,51 @@
+ * pinned" detection logic.
+ */
+ static void free_pcppages_bulk(struct zone *zone, int count,
+- struct per_cpu_pages *pcp)
++ struct list_head *list)
+ {
+- int migratetype = 0;
+- int batch_free = 0;
+ int to_free = count;
+ unsigned long nr_scanned;
++ unsigned long flags;
++
++ spin_lock_irqsave(&zone->lock, flags);
+
+- spin_lock(&zone->lock);
+ nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
+ if (nr_scanned)
+ __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
+
++ while (!list_empty(list)) {
++ struct page *page = list_first_entry(list, struct page, lru);
++ int mt; /* migratetype of the to-be-freed page */
++
++ /* must delete as __free_one_page list manipulates */
++ list_del(&page->lru);
++
++ mt = get_freepage_migratetype(page);
++ if (unlikely(has_isolate_pageblock(zone)))
++ mt = get_pageblock_migratetype(page);
++
++ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
++ __free_one_page(page, page_to_pfn(page), zone, 0, mt);
++ trace_mm_page_pcpu_drain(page, 0, mt);
++ to_free--;
++ }
++ WARN_ON(to_free != 0);
++ spin_unlock_irqrestore(&zone->lock, flags);
++}
++
++/*
++ * Moves a number of pages from the PCP lists to free list which
++ * is freed outside of the locked region.
++ *
++ * Assumes all pages on list are in same zone, and of same order.
++ * count is the number of pages to free.
++ */
++static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
++ struct list_head *dst)
++{
++ int migratetype = 0;
++ int batch_free = 0;
++
+ while (to_free) {
+ struct page *page;
+ struct list_head *list;
+@@ -692,7 +738,7 @@
+ batch_free++;
+ if (++migratetype == MIGRATE_PCPTYPES)
+ migratetype = 0;
+- list = &pcp->lists[migratetype];
++ list = &src->lists[migratetype];
+ } while (list_empty(list));
+
+ /* This is the only non-empty list. Free them all. */
+@@ -700,21 +746,11 @@
+ batch_free = to_free;
+
+ do {
+- int mt; /* migratetype of the to-be-freed page */
+-
+- page = list_entry(list->prev, struct page, lru);
+- /* must delete as __free_one_page list manipulates */
++ page = list_last_entry(list, struct page, lru);
+ list_del(&page->lru);
+- mt = get_freepage_migratetype(page);
+- if (unlikely(has_isolate_pageblock(zone)))
+- mt = get_pageblock_migratetype(page);
+-
+- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+- __free_one_page(page, page_to_pfn(page), zone, 0, mt);
+- trace_mm_page_pcpu_drain(page, 0, mt);
++ list_add(&page->lru, dst);
+ } while (--to_free && --batch_free && !list_empty(list));
+ }
+- spin_unlock(&zone->lock);
+ }
+
+ static void free_one_page(struct zone *zone,
+@@ -723,7 +759,9 @@
+ int migratetype)
+ {
+ unsigned long nr_scanned;
+- spin_lock(&zone->lock);
++ unsigned long flags;
++
++ spin_lock_irqsave(&zone->lock, flags);
+ nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
+ if (nr_scanned)
+ __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
+@@ -733,7 +771,7 @@
+ migratetype = get_pfnblock_migratetype(page, pfn);
+ }
+ __free_one_page(page, pfn, zone, order, migratetype);
+- spin_unlock(&zone->lock);
++ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+
+ static bool free_pages_prepare(struct page *page, unsigned int order)
+@@ -773,11 +811,11 @@
+ return;
+
+ migratetype = get_pfnblock_migratetype(page, pfn);
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ __count_vm_events(PGFREE, 1 << order);
+ set_freepage_migratetype(page, migratetype);
+ free_one_page(page_zone(page), page, pfn, order, migratetype);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+
+ void __init __free_pages_bootmem(struct page *page, unsigned int order)
+@@ -1253,16 +1291,18 @@
+ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ {
+ unsigned long flags;
++ LIST_HEAD(dst);
+ int to_drain, batch;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ batch = ACCESS_ONCE(pcp->batch);
+ to_drain = min(pcp->count, batch);
+ if (to_drain > 0) {
+- free_pcppages_bulk(zone, to_drain, pcp);
++ isolate_pcp_pages(to_drain, pcp, &dst);
+ pcp->count -= to_drain;
+ }
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
++ free_pcppages_bulk(zone, to_drain, &dst);
+ }
+ #endif
+
+@@ -1281,16 +1321,21 @@
+ for_each_populated_zone(zone) {
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
++ LIST_HEAD(dst);
++ int count;
+
+- local_irq_save(flags);
++ cpu_lock_irqsave(cpu, flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
+
+ pcp = &pset->pcp;
+- if (pcp->count) {
+- free_pcppages_bulk(zone, pcp->count, pcp);
++ count = pcp->count;
++ if (count) {
++ isolate_pcp_pages(count, pcp, &dst);
+ pcp->count = 0;
+ }
+- local_irq_restore(flags);
++ cpu_unlock_irqrestore(cpu, flags);
++ if (count)
++ free_pcppages_bulk(zone, count, &dst);
+ }
+ }
+
+@@ -1343,7 +1388,12 @@
+ else
+ cpumask_clear_cpu(cpu, &cpus_with_pcps);
+ }
++#ifndef CONFIG_PREEMPT_RT_BASE
+ on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
++#else
++ for_each_cpu(cpu, &cpus_with_pcps)
++ drain_pages(cpu);
++#endif
+ }
+
+ #ifdef CONFIG_HIBERNATION
+@@ -1399,7 +1449,7 @@
+
+ migratetype = get_pfnblock_migratetype(page, pfn);
+ set_freepage_migratetype(page, migratetype);
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ __count_vm_event(PGFREE);
+
+ /*
+@@ -1425,12 +1475,17 @@
+ pcp->count++;
+ if (pcp->count >= pcp->high) {
+ unsigned long batch = ACCESS_ONCE(pcp->batch);
+- free_pcppages_bulk(zone, batch, pcp);
++ LIST_HEAD(dst);
++
++ isolate_pcp_pages(batch, pcp, &dst);
+ pcp->count -= batch;
++ local_unlock_irqrestore(pa_lock, flags);
++ free_pcppages_bulk(zone, batch, &dst);
++ return;
+ }
+
+ out:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+
+ /*
+@@ -1560,7 +1615,7 @@
+ struct per_cpu_pages *pcp;
+ struct list_head *list;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ list = &pcp->lists[migratetype];
+ if (list_empty(list)) {
+@@ -1592,13 +1647,15 @@
+ */
+ WARN_ON_ONCE(order > 1);
+ }
+- spin_lock_irqsave(&zone->lock, flags);
++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
+ page = __rmqueue(zone, order, migratetype);
+- spin_unlock(&zone->lock);
+- if (!page)
++ if (!page) {
++ spin_unlock(&zone->lock);
+ goto failed;
++ }
+ __mod_zone_freepage_state(zone, -(1 << order),
+ get_freepage_migratetype(page));
++ spin_unlock(&zone->lock);
+ }
+
+ __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
+@@ -1608,7 +1665,7 @@
+
+ __count_zone_vm_events(PGALLOC, zone, 1 << order);
+ zone_statistics(preferred_zone, zone, gfp_flags);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+
+ VM_BUG_ON_PAGE(bad_range(zone, page), page);
+ if (prep_new_page(page, order, gfp_flags))
+@@ -1616,7 +1673,7 @@
+ return page;
+
+ failed:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ return NULL;
+ }
+
+@@ -2327,8 +2384,8 @@
+ count_vm_event(COMPACTSTALL);
+
+ /* Page migration frees to the PCP lists but we want merging */
+- drain_pages(get_cpu());
+- put_cpu();
++ drain_pages(get_cpu_light());
++ put_cpu_light();
+
+ page = get_page_from_freelist(gfp_mask, nodemask,
+ order, zonelist, high_zoneidx,
+@@ -5567,6 +5624,7 @@
+ void __init page_alloc_init(void)
+ {
+ hotcpu_notifier(page_alloc_cpu_notify, 0);
++ local_irq_lock_init(pa_lock);
+ }
+
+ /*
+@@ -6461,7 +6519,7 @@
+ struct per_cpu_pageset *pset;
+
+ /* avoid races with drain_pages() */
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ if (zone->pageset != &boot_pageset) {
+ for_each_online_cpu(cpu) {
+ pset = per_cpu_ptr(zone->pageset, cpu);
+@@ -6470,7 +6528,7 @@
+ free_percpu(zone->pageset);
+ zone->pageset = &boot_pageset;
+ }
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+
+ #ifdef CONFIG_MEMORY_HOTREMOVE
+diff -Nur linux-3.18.8.orig/mm/slab.h linux-3.18.8/mm/slab.h
+--- linux-3.18.8.orig/mm/slab.h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/slab.h 2015-03-03 08:05:18.000000000 +0100
+@@ -315,7 +315,11 @@
+ * The slab lists for all objects.
+ */
+ struct kmem_cache_node {
++#ifdef CONFIG_SLUB
++ raw_spinlock_t list_lock;
++#else
+ spinlock_t list_lock;
++#endif
+
+ #ifdef CONFIG_SLAB
+ struct list_head slabs_partial; /* partial list first, better asm code */
+diff -Nur linux-3.18.8.orig/mm/slub.c linux-3.18.8/mm/slub.c
+--- linux-3.18.8.orig/mm/slub.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/slub.c 2015-03-03 08:05:18.000000000 +0100
+@@ -1044,7 +1044,7 @@
+ {
+ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
+- spin_lock_irqsave(&n->list_lock, *flags);
++ raw_spin_lock_irqsave(&n->list_lock, *flags);
+ slab_lock(page);
+
+ if (!check_slab(s, page))
+@@ -1091,7 +1091,7 @@
+
+ fail:
+ slab_unlock(page);
+- spin_unlock_irqrestore(&n->list_lock, *flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, *flags);
+ slab_fix(s, "Object at 0x%p not freed", object);
+ return NULL;
+ }
+@@ -1219,6 +1219,12 @@
+
+ #endif /* CONFIG_SLUB_DEBUG */
+
++struct slub_free_list {
++ raw_spinlock_t lock;
++ struct list_head list;
++};
++static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
++
+ /*
+ * Hooks for other subsystems that check memory allocations. In a typical
+ * production configuration these hooks all should produce no code at all.
+@@ -1303,10 +1309,15 @@
+ struct page *page;
+ struct kmem_cache_order_objects oo = s->oo;
+ gfp_t alloc_gfp;
++ bool enableirqs;
+
+ flags &= gfp_allowed_mask;
+
+- if (flags & __GFP_WAIT)
++ enableirqs = (flags & __GFP_WAIT) != 0;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ enableirqs |= system_state == SYSTEM_RUNNING;
++#endif
++ if (enableirqs)
+ local_irq_enable();
+
+ flags |= s->allocflags;
+@@ -1347,7 +1358,7 @@
+ kmemcheck_mark_unallocated_pages(page, pages);
+ }
+
+- if (flags & __GFP_WAIT)
++ if (enableirqs)
+ local_irq_disable();
+ if (!page)
+ return NULL;
+@@ -1365,8 +1376,10 @@
+ void *object)
+ {
+ setup_object_debug(s, page, object);
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (unlikely(s->ctor))
+ s->ctor(object);
++#endif
+ }
+
+ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+@@ -1442,6 +1455,16 @@
+ memcg_uncharge_slab(s, order);
+ }
+
++static void free_delayed(struct list_head *h)
++{
++ while(!list_empty(h)) {
++ struct page *page = list_first_entry(h, struct page, lru);
++
++ list_del(&page->lru);
++ __free_slab(page->slab_cache, page);
++ }
++}
++
+ #define need_reserve_slab_rcu \
+ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
+
+@@ -1476,6 +1499,12 @@
+ }
+
+ call_rcu(head, rcu_free_slab);
++ } else if (irqs_disabled()) {
++ struct slub_free_list *f = &__get_cpu_var(slub_free_list);
++
++ raw_spin_lock(&f->lock);
++ list_add(&page->lru, &f->list);
++ raw_spin_unlock(&f->lock);
+ } else
+ __free_slab(s, page);
+ }
+@@ -1589,7 +1618,7 @@
+ if (!n || !n->nr_partial)
+ return NULL;
+
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ list_for_each_entry_safe(page, page2, &n->partial, lru) {
+ void *t;
+
+@@ -1614,7 +1643,7 @@
+ break;
+
+ }
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ return object;
+ }
+
+@@ -1860,7 +1889,7 @@
+ * that acquire_slab() will see a slab page that
+ * is frozen
+ */
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ }
+ } else {
+ m = M_FULL;
+@@ -1871,7 +1900,7 @@
+ * slabs from diagnostic functions will not see
+ * any frozen slabs.
+ */
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ }
+ }
+
+@@ -1906,7 +1935,7 @@
+ goto redo;
+
+ if (lock)
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ if (m == M_FREE) {
+ stat(s, DEACTIVATE_EMPTY);
+@@ -1938,10 +1967,10 @@
+ n2 = get_node(s, page_to_nid(page));
+ if (n != n2) {
+ if (n)
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ n = n2;
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ }
+
+ do {
+@@ -1970,7 +1999,7 @@
+ }
+
+ if (n)
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ while (discard_page) {
+ page = discard_page;
+@@ -2008,14 +2037,21 @@
+ pobjects = oldpage->pobjects;
+ pages = oldpage->pages;
+ if (drain && pobjects > s->cpu_partial) {
++ struct slub_free_list *f;
+ unsigned long flags;
++ LIST_HEAD(tofree);
+ /*
+ * partial array is full. Move the existing
+ * set to the per node partial list.
+ */
+ local_irq_save(flags);
+ unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
++ f = &__get_cpu_var(slub_free_list);
++ raw_spin_lock(&f->lock);
++ list_splice_init(&f->list, &tofree);
++ raw_spin_unlock(&f->lock);
+ local_irq_restore(flags);
++ free_delayed(&tofree);
+ oldpage = NULL;
+ pobjects = 0;
+ pages = 0;
+@@ -2079,7 +2115,22 @@
+
+ static void flush_all(struct kmem_cache *s)
+ {
++ LIST_HEAD(tofree);
++ int cpu;
++
+ on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
++ for_each_online_cpu(cpu) {
++ struct slub_free_list *f;
++
++ if (!has_cpu_slab(cpu, s))
++ continue;
++
++ f = &per_cpu(slub_free_list, cpu);
++ raw_spin_lock_irq(&f->lock);
++ list_splice_init(&f->list, &tofree);
++ raw_spin_unlock_irq(&f->lock);
++ free_delayed(&tofree);
++ }
+ }
+
+ /*
+@@ -2115,10 +2166,10 @@
+ unsigned long x = 0;
+ struct page *page;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ x += get_count(page);
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ return x;
+ }
+ #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
+@@ -2255,9 +2306,11 @@
+ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ unsigned long addr, struct kmem_cache_cpu *c)
+ {
++ struct slub_free_list *f;
+ void *freelist;
+ struct page *page;
+ unsigned long flags;
++ LIST_HEAD(tofree);
+
+ local_irq_save(flags);
+ #ifdef CONFIG_PREEMPT
+@@ -2325,7 +2378,13 @@
+ VM_BUG_ON(!c->page->frozen);
+ c->freelist = get_freepointer(s, freelist);
+ c->tid = next_tid(c->tid);
++out:
++ f = &__get_cpu_var(slub_free_list);
++ raw_spin_lock(&f->lock);
++ list_splice_init(&f->list, &tofree);
++ raw_spin_unlock(&f->lock);
+ local_irq_restore(flags);
++ free_delayed(&tofree);
+ return freelist;
+
+ new_slab:
+@@ -2342,8 +2401,7 @@
+
+ if (unlikely(!freelist)) {
+ slab_out_of_memory(s, gfpflags, node);
+- local_irq_restore(flags);
+- return NULL;
++ goto out;
+ }
+
+ page = c->page;
+@@ -2358,8 +2416,7 @@
+ deactivate_slab(s, page, get_freepointer(s, freelist));
+ c->page = NULL;
+ c->freelist = NULL;
+- local_irq_restore(flags);
+- return freelist;
++ goto out;
+ }
+
+ /*
+@@ -2444,6 +2501,10 @@
+
+ if (unlikely(gfpflags & __GFP_ZERO) && object)
+ memset(object, 0, s->object_size);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (unlikely(s->ctor) && object)
++ s->ctor(object);
++#endif
+
+ slab_post_alloc_hook(s, gfpflags, object);
+
+@@ -2531,7 +2592,7 @@
+
+ do {
+ if (unlikely(n)) {
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ n = NULL;
+ }
+ prior = page->freelist;
+@@ -2563,7 +2624,7 @@
+ * Otherwise the list_lock will synchronize with
+ * other processors updating the list of slabs.
+ */
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+
+ }
+ }
+@@ -2605,7 +2666,7 @@
+ add_partial(n, page, DEACTIVATE_TO_TAIL);
+ stat(s, FREE_ADD_PARTIAL);
+ }
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ return;
+
+ slab_empty:
+@@ -2620,7 +2681,7 @@
+ remove_full(s, n, page);
+ }
+
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ stat(s, FREE_SLAB);
+ discard_slab(s, page);
+ }
+@@ -2816,7 +2877,7 @@
+ init_kmem_cache_node(struct kmem_cache_node *n)
+ {
+ n->nr_partial = 0;
+- spin_lock_init(&n->list_lock);
++ raw_spin_lock_init(&n->list_lock);
+ INIT_LIST_HEAD(&n->partial);
+ #ifdef CONFIG_SLUB_DEBUG
+ atomic_long_set(&n->nr_slabs, 0);
+@@ -3373,7 +3434,7 @@
+ for (i = 0; i < objects; i++)
+ INIT_LIST_HEAD(slabs_by_inuse + i);
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+
+ /*
+ * Build lists indexed by the items in use in each slab.
+@@ -3394,7 +3455,7 @@
+ for (i = objects - 1; i > 0; i--)
+ list_splice(slabs_by_inuse + i, n->partial.prev);
+
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+
+ /* Release empty slabs */
+ list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
+@@ -3567,6 +3628,12 @@
+ {
+ static __initdata struct kmem_cache boot_kmem_cache,
+ boot_kmem_cache_node;
++ int cpu;
++
++ for_each_possible_cpu(cpu) {
++ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
++ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
++ }
+
+ if (debug_guardpage_minorder())
+ slub_max_order = 0;
+@@ -3815,7 +3882,7 @@
+ struct page *page;
+ unsigned long flags;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+
+ list_for_each_entry(page, &n->partial, lru) {
+ validate_slab_slab(s, page, map);
+@@ -3837,7 +3904,7 @@
+ s->name, count, atomic_long_read(&n->nr_slabs));
+
+ out:
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ return count;
+ }
+
+@@ -4025,12 +4092,12 @@
+ if (!atomic_long_read(&n->nr_slabs))
+ continue;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ process_slab(&t, s, page, alloc, map);
+ list_for_each_entry(page, &n->full, lru)
+ process_slab(&t, s, page, alloc, map);
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ }
+
+ for (i = 0; i < t.count; i++) {
+diff -Nur linux-3.18.8.orig/mm/swap.c linux-3.18.8/mm/swap.c
+--- linux-3.18.8.orig/mm/swap.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/swap.c 2015-03-03 08:05:18.000000000 +0100
+@@ -31,6 +31,7 @@
+ #include <linux/memcontrol.h>
+ #include <linux/gfp.h>
+ #include <linux/uio.h>
++#include <linux/locallock.h>
+
+ #include "internal.h"
+
+@@ -44,6 +45,9 @@
+ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
+ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
+
++static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
++static DEFINE_LOCAL_IRQ_LOCK(swapvec_lock);
++
+ /*
+ * This path almost never happens for VM activity - pages are normally
+ * freed via pagevecs. But it gets used by networking.
+@@ -473,11 +477,11 @@
+ unsigned long flags;
+
+ page_cache_get(page);
+- local_irq_save(flags);
++ local_lock_irqsave(rotate_lock, flags);
+ pvec = this_cpu_ptr(&lru_rotate_pvecs);
+ if (!pagevec_add(pvec, page))
+ pagevec_move_tail(pvec);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(rotate_lock, flags);
+ }
+ }
+
+@@ -528,12 +532,13 @@
+ void activate_page(struct page *page)
+ {
+ if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
++ struct pagevec *pvec = &get_locked_var(swapvec_lock,
++ activate_page_pvecs);
+
+ page_cache_get(page);
+ if (!pagevec_add(pvec, page))
+ pagevec_lru_move_fn(pvec, __activate_page, NULL);
+- put_cpu_var(activate_page_pvecs);
++ put_locked_var(swapvec_lock, activate_page_pvecs);
+ }
+ }
+
+@@ -559,7 +564,7 @@
+
+ static void __lru_cache_activate_page(struct page *page)
+ {
+- struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
+ int i;
+
+ /*
+@@ -581,7 +586,7 @@
+ }
+ }
+
+- put_cpu_var(lru_add_pvec);
++ put_locked_var(swapvec_lock, lru_add_pvec);
+ }
+
+ /*
+@@ -620,13 +625,13 @@
+
+ static void __lru_cache_add(struct page *page)
+ {
+- struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
+
+ page_cache_get(page);
+ if (!pagevec_space(pvec))
+ __pagevec_lru_add(pvec);
+ pagevec_add(pvec, page);
+- put_cpu_var(lru_add_pvec);
++ put_locked_var(swapvec_lock, lru_add_pvec);
+ }
+
+ /**
+@@ -806,9 +811,9 @@
+ unsigned long flags;
+
+ /* No harm done if a racing interrupt already did this */
+- local_irq_save(flags);
++ local_lock_irqsave(rotate_lock, flags);
+ pagevec_move_tail(pvec);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(rotate_lock, flags);
+ }
+
+ pvec = &per_cpu(lru_deactivate_pvecs, cpu);
+@@ -836,18 +841,19 @@
+ return;
+
+ if (likely(get_page_unless_zero(page))) {
+- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
++ struct pagevec *pvec = &get_locked_var(swapvec_lock,
++ lru_deactivate_pvecs);
+
+ if (!pagevec_add(pvec, page))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+- put_cpu_var(lru_deactivate_pvecs);
++ put_locked_var(swapvec_lock, lru_deactivate_pvecs);
+ }
+ }
+
+ void lru_add_drain(void)
+ {
+- lru_add_drain_cpu(get_cpu());
+- put_cpu();
++ lru_add_drain_cpu(local_lock_cpu(swapvec_lock));
++ local_unlock_cpu(swapvec_lock);
+ }
+
+ static void lru_add_drain_per_cpu(struct work_struct *dummy)
+diff -Nur linux-3.18.8.orig/mm/truncate.c linux-3.18.8/mm/truncate.c
+--- linux-3.18.8.orig/mm/truncate.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/truncate.c 2015-03-03 08:05:18.000000000 +0100
+@@ -56,8 +56,11 @@
+ * protected by mapping->tree_lock.
+ */
+ if (!workingset_node_shadows(node) &&
+- !list_empty(&node->private_list))
+- list_lru_del(&workingset_shadow_nodes, &node->private_list);
++ !list_empty(&node->private_list)) {
++ local_lock(workingset_shadow_lock);
++ list_lru_del(&__workingset_shadow_nodes, &node->private_list);
++ local_unlock(workingset_shadow_lock);
++ }
+ __radix_tree_delete_node(&mapping->page_tree, node);
+ unlock:
+ spin_unlock_irq(&mapping->tree_lock);
+diff -Nur linux-3.18.8.orig/mm/vmalloc.c linux-3.18.8/mm/vmalloc.c
+--- linux-3.18.8.orig/mm/vmalloc.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/vmalloc.c 2015-03-03 08:05:18.000000000 +0100
+@@ -798,7 +798,7 @@
+ struct vmap_block *vb;
+ struct vmap_area *va;
+ unsigned long vb_idx;
+- int node, err;
++ int node, err, cpu;
+
+ node = numa_node_id();
+
+@@ -836,11 +836,12 @@
+ BUG_ON(err);
+ radix_tree_preload_end();
+
+- vbq = &get_cpu_var(vmap_block_queue);
++ cpu = get_cpu_light();
++ vbq = &__get_cpu_var(vmap_block_queue);
+ spin_lock(&vbq->lock);
+ list_add_rcu(&vb->free_list, &vbq->free);
+ spin_unlock(&vbq->lock);
+- put_cpu_var(vmap_block_queue);
++ put_cpu_light();
+
+ return vb;
+ }
+@@ -908,6 +909,7 @@
+ struct vmap_block *vb;
+ unsigned long addr = 0;
+ unsigned int order;
++ int cpu = 0;
+
+ BUG_ON(size & ~PAGE_MASK);
+ BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+@@ -923,7 +925,8 @@
+
+ again:
+ rcu_read_lock();
+- vbq = &get_cpu_var(vmap_block_queue);
++ cpu = get_cpu_light();
++ vbq = &__get_cpu_var(vmap_block_queue);
+ list_for_each_entry_rcu(vb, &vbq->free, free_list) {
+ int i;
+
+@@ -947,7 +950,7 @@
+ spin_unlock(&vb->lock);
+ }
+
+- put_cpu_var(vmap_block_queue);
++ put_cpu_light();
+ rcu_read_unlock();
+
+ if (!addr) {
+diff -Nur linux-3.18.8.orig/mm/vmstat.c linux-3.18.8/mm/vmstat.c
+--- linux-3.18.8.orig/mm/vmstat.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/vmstat.c 2015-03-03 08:05:18.000000000 +0100
+@@ -221,6 +221,7 @@
+ long x;
+ long t;
+
++ preempt_disable_rt();
+ x = delta + __this_cpu_read(*p);
+
+ t = __this_cpu_read(pcp->stat_threshold);
+@@ -230,6 +231,7 @@
+ x = 0;
+ }
+ __this_cpu_write(*p, x);
++ preempt_enable_rt();
+ }
+ EXPORT_SYMBOL(__mod_zone_page_state);
+
+@@ -262,6 +264,7 @@
+ s8 __percpu *p = pcp->vm_stat_diff + item;
+ s8 v, t;
+
++ preempt_disable_rt();
+ v = __this_cpu_inc_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v > t)) {
+@@ -270,6 +273,7 @@
+ zone_page_state_add(v + overstep, zone, item);
+ __this_cpu_write(*p, -overstep);
+ }
++ preempt_enable_rt();
+ }
+
+ void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
+@@ -284,6 +288,7 @@
+ s8 __percpu *p = pcp->vm_stat_diff + item;
+ s8 v, t;
+
++ preempt_disable_rt();
+ v = __this_cpu_dec_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v < - t)) {
+@@ -292,6 +297,7 @@
+ zone_page_state_add(v - overstep, zone, item);
+ __this_cpu_write(*p, overstep);
+ }
++ preempt_enable_rt();
+ }
+
+ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
+diff -Nur linux-3.18.8.orig/mm/workingset.c linux-3.18.8/mm/workingset.c
+--- linux-3.18.8.orig/mm/workingset.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/mm/workingset.c 2015-03-03 08:05:18.000000000 +0100
+@@ -264,7 +264,8 @@
+ * point where they would still be useful.
+ */
+
+-struct list_lru workingset_shadow_nodes;
++struct list_lru __workingset_shadow_nodes;
++DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
+
+ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
+ struct shrink_control *sc)
+@@ -274,9 +275,9 @@
+ unsigned long pages;
+
+ /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
+- local_irq_disable();
+- shadow_nodes = list_lru_count_node(&workingset_shadow_nodes, sc->nid);
+- local_irq_enable();
++ local_lock_irq(workingset_shadow_lock);
++ shadow_nodes = list_lru_count_node(&__workingset_shadow_nodes, sc->nid);
++ local_unlock_irq(workingset_shadow_lock);
+
+ pages = node_present_pages(sc->nid);
+ /*
+@@ -362,9 +363,9 @@
+ spin_unlock(&mapping->tree_lock);
+ ret = LRU_REMOVED_RETRY;
+ out:
+- local_irq_enable();
++ local_unlock_irq(workingset_shadow_lock);
+ cond_resched();
+- local_irq_disable();
++ local_lock_irq(workingset_shadow_lock);
+ spin_lock(lru_lock);
+ return ret;
+ }
+@@ -375,10 +376,10 @@
+ unsigned long ret;
+
+ /* list_lru lock nests inside IRQ-safe mapping->tree_lock */
+- local_irq_disable();
+- ret = list_lru_walk_node(&workingset_shadow_nodes, sc->nid,
++ local_lock_irq(workingset_shadow_lock);
++ ret = list_lru_walk_node(&__workingset_shadow_nodes, sc->nid,
+ shadow_lru_isolate, NULL, &sc->nr_to_scan);
+- local_irq_enable();
++ local_unlock_irq(workingset_shadow_lock);
+ return ret;
+ }
+
+@@ -399,7 +400,7 @@
+ {
+ int ret;
+
+- ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
++ ret = list_lru_init_key(&__workingset_shadow_nodes, &shadow_nodes_key);
+ if (ret)
+ goto err;
+ ret = register_shrinker(&workingset_shadow_shrinker);
+@@ -407,7 +408,7 @@
+ goto err_list_lru;
+ return 0;
+ err_list_lru:
+- list_lru_destroy(&workingset_shadow_nodes);
++ list_lru_destroy(&__workingset_shadow_nodes);
+ err:
+ return ret;
+ }
+diff -Nur linux-3.18.8.orig/net/core/dev.c linux-3.18.8/net/core/dev.c
+--- linux-3.18.8.orig/net/core/dev.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/net/core/dev.c 2015-03-03 08:05:18.000000000 +0100
+@@ -182,6 +182,7 @@
+ static DEFINE_HASHTABLE(napi_hash, 8);
+
+ static seqcount_t devnet_rename_seq;
++static DEFINE_MUTEX(devnet_rename_mutex);
+
+ static inline void dev_base_seq_inc(struct net *net)
+ {
+@@ -203,14 +204,14 @@
+ static inline void rps_lock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- spin_lock(&sd->input_pkt_queue.lock);
++ raw_spin_lock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+
+ static inline void rps_unlock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- spin_unlock(&sd->input_pkt_queue.lock);
++ raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+
+@@ -832,7 +833,8 @@
+ strcpy(name, dev->name);
+ rcu_read_unlock();
+ if (read_seqcount_retry(&devnet_rename_seq, seq)) {
+- cond_resched();
++ mutex_lock(&devnet_rename_mutex);
++ mutex_unlock(&devnet_rename_mutex);
+ goto retry;
+ }
+
+@@ -1101,20 +1103,17 @@
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+- write_seqcount_begin(&devnet_rename_seq);
++ mutex_lock(&devnet_rename_mutex);
++ __raw_write_seqcount_begin(&devnet_rename_seq);
+
+- if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
+- write_seqcount_end(&devnet_rename_seq);
+- return 0;
+- }
++ if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
++ goto outunlock;
+
+ memcpy(oldname, dev->name, IFNAMSIZ);
+
+ err = dev_get_valid_name(net, dev, newname);
+- if (err < 0) {
+- write_seqcount_end(&devnet_rename_seq);
+- return err;
+- }
++ if (err < 0)
++ goto outunlock;
+
+ if (oldname[0] && !strchr(oldname, '%'))
+ netdev_info(dev, "renamed from %s\n", oldname);
+@@ -1127,11 +1126,12 @@
+ if (ret) {
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ dev->name_assign_type = old_assign_type;
+- write_seqcount_end(&devnet_rename_seq);
+- return ret;
++ err = ret;
++ goto outunlock;
+ }
+
+- write_seqcount_end(&devnet_rename_seq);
++ __raw_write_seqcount_end(&devnet_rename_seq);
++ mutex_unlock(&devnet_rename_mutex);
+
+ netdev_adjacent_rename_links(dev, oldname);
+
+@@ -1152,7 +1152,8 @@
+ /* err >= 0 after dev_alloc_name() or stores the first errno */
+ if (err >= 0) {
+ err = ret;
+- write_seqcount_begin(&devnet_rename_seq);
++ mutex_lock(&devnet_rename_mutex);
++ __raw_write_seqcount_begin(&devnet_rename_seq);
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ memcpy(oldname, newname, IFNAMSIZ);
+ dev->name_assign_type = old_assign_type;
+@@ -1165,6 +1166,11 @@
+ }
+
+ return err;
++
++outunlock:
++ __raw_write_seqcount_end(&devnet_rename_seq);
++ mutex_unlock(&devnet_rename_mutex);
++ return err;
+ }
+
+ /**
+@@ -2160,6 +2166,7 @@
+ sd->output_queue_tailp = &q->next_sched;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ void __netif_schedule(struct Qdisc *q)
+@@ -2241,6 +2248,7 @@
+ __this_cpu_write(softnet_data.completion_queue, skb);
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(__dev_kfree_skb_irq);
+
+@@ -3336,6 +3344,7 @@
+ rps_unlock(sd);
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+
+ atomic_long_inc(&skb->dev->rx_dropped);
+ kfree_skb(skb);
+@@ -3354,7 +3363,7 @@
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+ int cpu;
+
+- preempt_disable();
++ migrate_disable();
+ rcu_read_lock();
+
+ cpu = get_rps_cpu(skb->dev, skb, &rflow);
+@@ -3364,13 +3373,13 @@
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+
+ rcu_read_unlock();
+- preempt_enable();
++ migrate_enable();
+ } else
+ #endif
+ {
+ unsigned int qtail;
+- ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
+- put_cpu();
++ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
++ put_cpu_light();
+ }
+ return ret;
+ }
+@@ -3404,16 +3413,44 @@
+
+ trace_netif_rx_ni_entry(skb);
+
+- preempt_disable();
++ local_bh_disable();
+ err = netif_rx_internal(skb);
+- if (local_softirq_pending())
+- do_softirq();
+- preempt_enable();
++ local_bh_enable();
+
+ return err;
+ }
+ EXPORT_SYMBOL(netif_rx_ni);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * RT runs ksoftirqd as a real time thread and the root_lock is a
++ * "sleeping spinlock". If the trylock fails then we can go into an
++ * infinite loop when ksoftirqd preempted the task which actually
++ * holds the lock, because we requeue q and raise NET_TX softirq
++ * causing ksoftirqd to loop forever.
++ *
++ * It's safe to use spin_lock on RT here as softirqs run in thread
++ * context and cannot deadlock against the thread which is holding
++ * root_lock.
++ *
++ * On !RT the trylock might fail, but there we bail out from the
++ * softirq loop after 10 attempts which we can't do on RT. And the
++ * task holding root_lock cannot be preempted, so the only downside of
++ * that trylock is that we need 10 loops to decide that we should have
++ * given up in the first one :)
++ */
++static inline int take_root_lock(spinlock_t *lock)
++{
++ spin_lock(lock);
++ return 1;
++}
++#else
++static inline int take_root_lock(spinlock_t *lock)
++{
++ return spin_trylock(lock);
++}
++#endif
++
+ static void net_tx_action(struct softirq_action *h)
+ {
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+@@ -3455,7 +3492,7 @@
+ head = head->next_sched;
+
+ root_lock = qdisc_lock(q);
+- if (spin_trylock(root_lock)) {
++ if (take_root_lock(root_lock)) {
+ smp_mb__before_atomic();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
+@@ -3848,7 +3885,7 @@
+ skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &sd->input_pkt_queue);
+- kfree_skb(skb);
++ __skb_queue_tail(&sd->tofree_queue, skb);
+ input_queue_head_incr(sd);
+ }
+ }
+@@ -3857,10 +3894,13 @@
+ skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &sd->process_queue);
+- kfree_skb(skb);
++ __skb_queue_tail(&sd->tofree_queue, skb);
+ input_queue_head_incr(sd);
+ }
+ }
++
++ if (!skb_queue_empty(&sd->tofree_queue))
++ raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ }
+
+ static int napi_gro_complete(struct sk_buff *skb)
+@@ -4323,6 +4363,7 @@
+ } else
+ #endif
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ static int process_backlog(struct napi_struct *napi, int quota)
+@@ -4394,6 +4435,7 @@
+ local_irq_save(flags);
+ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(__napi_schedule);
+
+@@ -4516,10 +4558,17 @@
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+ unsigned long time_limit = jiffies + 2;
+ int budget = netdev_budget;
++ struct sk_buff *skb;
+ void *have;
+
+ local_irq_disable();
+
++ while ((skb = __skb_dequeue(&sd->tofree_queue))) {
++ local_irq_enable();
++ kfree_skb(skb);
++ local_irq_disable();
++ }
++
+ while (!list_empty(&sd->poll_list)) {
+ struct napi_struct *n;
+ int work, weight;
+@@ -7008,6 +7057,7 @@
+
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ /* Process offline CPU's input_pkt_queue */
+ while ((skb = __skb_dequeue(&oldsd->process_queue))) {
+@@ -7018,6 +7068,9 @@
+ netif_rx_internal(skb);
+ input_queue_head_incr(oldsd);
+ }
++ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
++ kfree_skb(skb);
++ }
+
+ return NOTIFY_OK;
+ }
+@@ -7319,8 +7372,9 @@
+ for_each_possible_cpu(i) {
+ struct softnet_data *sd = &per_cpu(softnet_data, i);
+
+- skb_queue_head_init(&sd->input_pkt_queue);
+- skb_queue_head_init(&sd->process_queue);
++ skb_queue_head_init_raw(&sd->input_pkt_queue);
++ skb_queue_head_init_raw(&sd->process_queue);
++ skb_queue_head_init_raw(&sd->tofree_queue);
+ INIT_LIST_HEAD(&sd->poll_list);
+ sd->output_queue_tailp = &sd->output_queue;
+ #ifdef CONFIG_RPS
+diff -Nur linux-3.18.8.orig/net/core/skbuff.c linux-3.18.8/net/core/skbuff.c
+--- linux-3.18.8.orig/net/core/skbuff.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/net/core/skbuff.c 2015-03-03 08:05:18.000000000 +0100
+@@ -63,6 +63,7 @@
+ #include <linux/errqueue.h>
+ #include <linux/prefetch.h>
+ #include <linux/if_vlan.h>
++#include <linux/locallock.h>
+
+ #include <net/protocol.h>
+ #include <net/dst.h>
+@@ -336,6 +337,7 @@
+ unsigned int pagecnt_bias;
+ };
+ static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
++static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
+
+ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+ {
+@@ -344,7 +346,7 @@
+ int order;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(netdev_alloc_lock, flags);
+ nc = this_cpu_ptr(&netdev_alloc_cache);
+ if (unlikely(!nc->frag.page)) {
+ refill:
+@@ -389,7 +391,7 @@
+ nc->frag.offset += fragsz;
+ nc->pagecnt_bias--;
+ end:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(netdev_alloc_lock, flags);
+ return data;
+ }
+
+diff -Nur linux-3.18.8.orig/net/core/sock.c linux-3.18.8/net/core/sock.c
+--- linux-3.18.8.orig/net/core/sock.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/net/core/sock.c 2015-03-03 08:05:18.000000000 +0100
+@@ -2326,12 +2326,11 @@
+ if (sk->sk_lock.owned)
+ __lock_sock(sk);
+ sk->sk_lock.owned = 1;
+- spin_unlock(&sk->sk_lock.slock);
++ spin_unlock_bh(&sk->sk_lock.slock);
+ /*
+ * The sk_lock has mutex_lock() semantics here:
+ */
+ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+- local_bh_enable();
+ }
+ EXPORT_SYMBOL(lock_sock_nested);
+
+diff -Nur linux-3.18.8.orig/net/ipv4/icmp.c linux-3.18.8/net/ipv4/icmp.c
+--- linux-3.18.8.orig/net/ipv4/icmp.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/net/ipv4/icmp.c 2015-03-03 08:05:18.000000000 +0100
+@@ -69,6 +69,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/kernel.h>
+ #include <linux/fcntl.h>
++#include <linux/sysrq.h>
+ #include <linux/socket.h>
+ #include <linux/in.h>
+ #include <linux/inet.h>
+@@ -864,6 +865,30 @@
+ }
+
+ /*
++ * 32bit and 64bit have different timestamp length, so we check for
++ * the cookie at offset 20 and verify it is repeated at offset 50
++ */
++#define CO_POS0 20
++#define CO_POS1 50
++#define CO_SIZE sizeof(int)
++#define ICMP_SYSRQ_SIZE 57
++
++/*
++ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie
++ * pattern and if it matches send the next byte as a trigger to sysrq.
++ */
++static void icmp_check_sysrq(struct net *net, struct sk_buff *skb)
++{
++ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq);
++ char *p = skb->data;
++
++ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) &&
++ !memcmp(&cookie, p + CO_POS1, CO_SIZE) &&
++ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE])
++ handle_sysrq(p[CO_POS0 + CO_SIZE]);
++}
++
++/*
+ * Handle ICMP_ECHO ("ping") requests.
+ *
+ * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
+@@ -890,6 +915,11 @@
+ icmp_param.data_len = skb->len;
+ icmp_param.head_len = sizeof(struct icmphdr);
+ icmp_reply(&icmp_param, skb);
++
++ if (skb->len == ICMP_SYSRQ_SIZE &&
++ net->ipv4.sysctl_icmp_echo_sysrq) {
++ icmp_check_sysrq(net, skb);
++ }
+ }
+ }
+
+diff -Nur linux-3.18.8.orig/net/ipv4/ip_output.c linux-3.18.8/net/ipv4/ip_output.c
+--- linux-3.18.8.orig/net/ipv4/ip_output.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/net/ipv4/ip_output.c 2015-03-03 21:30:51.000000000 +0100
+@@ -79,6 +79,7 @@
+ #include <linux/mroute.h>
+ #include <linux/netlink.h>
+ #include <linux/tcp.h>
++#include <linux/locallock.h>
+
+ int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
+ EXPORT_SYMBOL(sysctl_ip_default_ttl);
+@@ -1507,6 +1508,7 @@
+ * Generic function to send a packet as reply to another packet.
+ * Used to send some TCP resets/acks so far.
+ */
++
+ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
+ const struct ip_options *sopt,
+ __be32 daddr, __be32 saddr,
+diff -Nur linux-3.18.8.orig/net/ipv4/sysctl_net_ipv4.c linux-3.18.8/net/ipv4/sysctl_net_ipv4.c
+--- linux-3.18.8.orig/net/ipv4/sysctl_net_ipv4.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/net/ipv4/sysctl_net_ipv4.c 2015-03-03 08:05:18.000000000 +0100
+@@ -779,6 +779,13 @@
+ .proc_handler = proc_dointvec
+ },
+ {
++ .procname = "icmp_echo_sysrq",
++ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
++ {
+ .procname = "icmp_ignore_bogus_error_responses",
+ .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
+ .maxlen = sizeof(int),
+diff -Nur linux-3.18.8.orig/net/mac80211/rx.c linux-3.18.8/net/mac80211/rx.c
+--- linux-3.18.8.orig/net/mac80211/rx.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/net/mac80211/rx.c 2015-03-03 08:05:18.000000000 +0100
+@@ -3356,7 +3356,7 @@
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+
+- WARN_ON_ONCE(softirq_count() == 0);
++ WARN_ON_ONCE_NONRT(softirq_count() == 0);
+
+ if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
+ goto drop;
+diff -Nur linux-3.18.8.orig/net/netfilter/core.c linux-3.18.8/net/netfilter/core.c
+--- linux-3.18.8.orig/net/netfilter/core.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/net/netfilter/core.c 2015-03-03 08:05:18.000000000 +0100
+@@ -21,11 +21,17 @@
+ #include <linux/proc_fs.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/locallock.h>
+ #include <net/net_namespace.h>
+ #include <net/sock.h>
+
+ #include "nf_internals.h"
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++DEFINE_LOCAL_IRQ_LOCK(xt_write_lock);
++EXPORT_PER_CPU_SYMBOL(xt_write_lock);
++#endif
++
+ static DEFINE_MUTEX(afinfo_mutex);
+
+ const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
+diff -Nur linux-3.18.8.orig/net/packet/af_packet.c linux-3.18.8/net/packet/af_packet.c
+--- linux-3.18.8.orig/net/packet/af_packet.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/net/packet/af_packet.c 2015-03-03 08:05:18.000000000 +0100
+@@ -63,6 +63,7 @@
+ #include <linux/if_packet.h>
+ #include <linux/wireless.h>
+ #include <linux/kernel.h>
++#include <linux/delay.h>
+ #include <linux/kmod.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+@@ -692,7 +693,7 @@
+ if (BLOCK_NUM_PKTS(pbd)) {
+ while (atomic_read(&pkc->blk_fill_in_prog)) {
+ /* Waiting for skb_copy_bits to finish... */
+- cpu_relax();
++ cpu_chill();
+ }
+ }
+
+@@ -943,7 +944,7 @@
+ if (!(status & TP_STATUS_BLK_TMO)) {
+ while (atomic_read(&pkc->blk_fill_in_prog)) {
+ /* Waiting for skb_copy_bits to finish... */
+- cpu_relax();
++ cpu_chill();
+ }
+ }
+ prb_close_block(pkc, pbd, po, status);
+diff -Nur linux-3.18.8.orig/net/rds/ib_rdma.c linux-3.18.8/net/rds/ib_rdma.c
+--- linux-3.18.8.orig/net/rds/ib_rdma.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/net/rds/ib_rdma.c 2015-03-03 08:05:18.000000000 +0100
+@@ -34,6 +34,7 @@
+ #include <linux/slab.h>
+ #include <linux/rculist.h>
+ #include <linux/llist.h>
++#include <linux/delay.h>
+
+ #include "rds.h"
+ #include "ib.h"
+@@ -286,7 +287,7 @@
+ for_each_online_cpu(cpu) {
+ flag = &per_cpu(clean_list_grace, cpu);
+ while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
+- cpu_relax();
++ cpu_chill();
+ }
+ }
+
+diff -Nur linux-3.18.8.orig/net/sched/sch_generic.c linux-3.18.8/net/sched/sch_generic.c
+--- linux-3.18.8.orig/net/sched/sch_generic.c 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/net/sched/sch_generic.c 2015-03-03 08:05:18.000000000 +0100
+@@ -894,7 +894,7 @@
+ /* Wait for outstanding qdisc_run calls. */
+ list_for_each_entry(dev, head, close_list)
+ while (some_qdisc_is_busy(dev))
+- yield();
++ msleep(1);
+ }
+
+ void dev_deactivate(struct net_device *dev)
+diff -Nur linux-3.18.8.orig/scripts/mkcompile_h linux-3.18.8/scripts/mkcompile_h
+--- linux-3.18.8.orig/scripts/mkcompile_h 2015-02-27 02:49:36.000000000 +0100
++++ linux-3.18.8/scripts/mkcompile_h 2015-03-03 08:05:18.000000000 +0100
+@@ -4,7 +4,8 @@
+ ARCH=$2
+ SMP=$3
+ PREEMPT=$4
+-CC=$5
++RT=$5
++CC=$6
+
+ vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
+
+@@ -57,6 +58,7 @@
+ CONFIG_FLAGS=""
+ if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
+ if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
++if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
+ UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
+
+ # Truncate to maximum length
diff --git a/target/linux/patches/3.18.9/patch-yaffs2 b/target/linux/patches/3.18.9/patch-yaffs2
new file mode 100644
index 000000000..bb244c7ca
--- /dev/null
+++ b/target/linux/patches/3.18.9/patch-yaffs2
@@ -0,0 +1,16551 @@
+diff -Nur linux-3.15-rc5.orig/fs/Kconfig linux-3.15-rc5/fs/Kconfig
+--- linux-3.15-rc5.orig/fs/Kconfig 2014-05-09 22:10:52.000000000 +0200
++++ linux-3.15-rc5/fs/Kconfig 2014-05-17 01:53:17.000000000 +0200
+@@ -190,6 +190,7 @@
+ source "fs/befs/Kconfig"
+ source "fs/bfs/Kconfig"
+ source "fs/efs/Kconfig"
++source "fs/yaffs2/Kconfig"
+ source "fs/jffs2/Kconfig"
+ # UBIFS File system configuration
+ source "fs/ubifs/Kconfig"
+diff -Nur linux-3.15-rc5.orig/fs/Makefile linux-3.15-rc5/fs/Makefile
+--- linux-3.15-rc5.orig/fs/Makefile 2014-05-09 22:10:52.000000000 +0200
++++ linux-3.15-rc5/fs/Makefile 2014-05-17 01:53:25.000000000 +0200
+@@ -126,3 +126,4 @@
+ obj-$(CONFIG_CEPH_FS) += ceph/
+ obj-$(CONFIG_PSTORE) += pstore/
+ obj-$(CONFIG_EFIVAR_FS) += efivarfs/
++obj-$(CONFIG_YAFFS_FS) += yaffs2/
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/Kconfig linux-3.15-rc5/fs/yaffs2/Kconfig
+--- linux-3.15-rc5.orig/fs/yaffs2/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/Kconfig 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,171 @@
++#
++# yaffs file system configurations
++#
++
++config YAFFS_FS
++ tristate "yaffs2 file system support"
++ default n
++ depends on MTD_BLOCK
++ select YAFFS_YAFFS1
++ select YAFFS_YAFFS2
++ help
++ yaffs2, or Yet Another Flash File System, is a file system
++ optimised for NAND Flash chips.
++
++ To compile the yaffs2 file system support as a module, choose M
++ here: the module will be called yaffs2.
++
++ If unsure, say N.
++
++ Further information on yaffs2 is available at
++ <http://www.aleph1.co.uk/yaffs/>.
++
++config YAFFS_YAFFS1
++ bool "512 byte / page devices"
++ depends on YAFFS_FS
++ default y
++ help
++ Enable yaffs1 support -- yaffs for 512 byte / page devices
++
++ Not needed for 2K-page devices.
++
++ If unsure, say Y.
++
++config YAFFS_9BYTE_TAGS
++ bool "Use older-style on-NAND data format with pageStatus byte"
++ depends on YAFFS_YAFFS1
++ default n
++ help
++
++ Older-style on-NAND data format has a "pageStatus" byte to record
++ chunk/page state. This byte is zero when the page is discarded.
++ Choose this option if you have existing on-NAND data using this
++ format that you need to continue to support. New data written
++ also uses the older-style format. Note: Use of this option
++ generally requires that MTD's oob layout be adjusted to use the
++ older-style format. See notes on tags formats and MTD versions
++ in yaffs_mtdif1.c.
++
++ If unsure, say N.
++
++config YAFFS_DOES_ECC
++ bool "Lets yaffs do its own ECC"
++ depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
++ default n
++ help
++ This enables yaffs to use its own ECC functions instead of using
++ the ones from the generic MTD-NAND driver.
++
++ If unsure, say N.
++
++config YAFFS_ECC_WRONG_ORDER
++ bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
++ depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
++ default n
++ help
++ This makes yaffs_ecc.c use the same ecc byte order as Steven
++ Hill's nand_ecc.c. If not set, then you get the same ecc byte
++ order as SmartMedia.
++
++ If unsure, say N.
++
++config YAFFS_YAFFS2
++ bool "2048 byte (or larger) / page devices"
++ depends on YAFFS_FS
++ default y
++ help
++ Enable yaffs2 support -- yaffs for >= 2K bytes per page devices
++
++ If unsure, say Y.
++
++config YAFFS_AUTO_YAFFS2
++ bool "Autoselect yaffs2 format"
++ depends on YAFFS_YAFFS2
++ default y
++ help
++ Without this, you need to explicitely use yaffs2 as the file
++ system type. With this, you can say "yaffs" and yaffs or yaffs2
++ will be used depending on the device page size (yaffs on
++ 512-byte page devices, yaffs2 on 2K page devices).
++
++ If unsure, say Y.
++
++config YAFFS_DISABLE_TAGS_ECC
++ bool "Disable yaffs from doing ECC on tags by default"
++ depends on YAFFS_FS && YAFFS_YAFFS2
++ default n
++ help
++ This defaults yaffs to using its own ECC calculations on tags instead of
++ just relying on the MTD.
++ This behavior can also be overridden with tags_ecc_on and
++ tags_ecc_off mount options.
++
++ If unsure, say N.
++
++config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
++ bool "Force chunk erase check"
++ depends on YAFFS_FS
++ default n
++ help
++ Normally yaffs only checks chunks before writing until an erased
++ chunk is found. This helps to detect any partially written
++ chunks that might have happened due to power loss.
++
++ Enabling this forces on the test that chunks are erased in flash
++ before writing to them. This takes more time but is potentially
++ a bit more secure.
++
++ Suggest setting Y during development and ironing out driver
++ issues etc. Suggest setting to N if you want faster writing.
++
++ If unsure, say Y.
++
++config YAFFS_EMPTY_LOST_AND_FOUND
++ bool "Empty lost and found on boot"
++ depends on YAFFS_FS
++ default n
++ help
++ If this is enabled then the contents of lost and found is
++ automatically dumped at mount.
++
++ If unsure, say N.
++
++config YAFFS_DISABLE_BLOCK_REFRESHING
++ bool "Disable yaffs2 block refreshing"
++ depends on YAFFS_FS
++ default n
++ help
++ If this is set, then block refreshing is disabled.
++ Block refreshing infrequently refreshes the oldest block in
++ a yaffs2 file system. This mechanism helps to refresh flash to
++ mitigate against data loss. This is particularly useful for MLC.
++
++ If unsure, say N.
++
++config YAFFS_DISABLE_BACKGROUND
++ bool "Disable yaffs2 background processing"
++ depends on YAFFS_FS
++ default n
++ help
++ If this is set, then background processing is disabled.
++ Background processing makes many foreground activities faster.
++
++ If unsure, say N.
++
++config YAFFS_DISABLE_BAD_BLOCK_MARKING
++ bool "Disable yaffs2 bad block marking"
++ depends on YAFFS_FS
++ default n
++ help
++ Useful during early flash bring up to prevent problems causing
++ lots of bad block marking.
++
++ If unsure, say N.
++
++config YAFFS_XATTR
++ bool "Enable yaffs2 xattr support"
++ depends on YAFFS_FS
++ default y
++ help
++ If this is set then yaffs2 will provide xattr support.
++ If unsure, say Y.
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/Makefile linux-3.15-rc5/fs/yaffs2/Makefile
+--- linux-3.15-rc5.orig/fs/yaffs2/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/Makefile 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,18 @@
++#
++# Makefile for the linux YAFFS filesystem routines.
++#
++
++obj-$(CONFIG_YAFFS_FS) += yaffs.o
++
++yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o
++yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
++yaffs-y += yaffs_tagscompat.o yaffs_tagsmarshall.o
++yaffs-y += yaffs_mtdif.o
++yaffs-y += yaffs_nameval.o yaffs_attribs.o
++yaffs-y += yaffs_allocator.o
++yaffs-y += yaffs_yaffs1.o
++yaffs-y += yaffs_yaffs2.o
++yaffs-y += yaffs_bitmap.o
++yaffs-y += yaffs_summary.o
++yaffs-y += yaffs_verify.o
++
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_allocator.c linux-3.15-rc5/fs/yaffs2/yaffs_allocator.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_allocator.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_allocator.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,357 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_allocator.h"
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yportenv.h"
++
++/*
++ * Each entry in yaffs_tnode_list and yaffs_obj_list hold blocks
++ * of approx 100 objects that are themn allocated singly.
++ * This is basically a simplified slab allocator.
++ *
++ * We don't use the Linux slab allocator because slab does not allow
++ * us to dump all the objects in one hit when we do a umount and tear
++ * down all the tnodes and objects. slab requires that we first free
++ * the individual objects.
++ *
++ * Once yaffs has been mainlined I shall try to motivate for a change
++ * to slab to provide the extra features we need here.
++ */
++
++struct yaffs_tnode_list {
++ struct yaffs_tnode_list *next;
++ struct yaffs_tnode *tnodes;
++};
++
++struct yaffs_obj_list {
++ struct yaffs_obj_list *next;
++ struct yaffs_obj *objects;
++};
++
++struct yaffs_allocator {
++ int n_tnodes_created;
++ struct yaffs_tnode *free_tnodes;
++ int n_free_tnodes;
++ struct yaffs_tnode_list *alloc_tnode_list;
++
++ int n_obj_created;
++ struct list_head free_objs;
++ int n_free_objects;
++
++ struct yaffs_obj_list *allocated_obj_list;
++};
++
++static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev)
++{
++ struct yaffs_allocator *allocator =
++ (struct yaffs_allocator *)dev->allocator;
++ struct yaffs_tnode_list *tmp;
++
++ if (!allocator) {
++ BUG();
++ return;
++ }
++
++ while (allocator->alloc_tnode_list) {
++ tmp = allocator->alloc_tnode_list->next;
++
++ kfree(allocator->alloc_tnode_list->tnodes);
++ kfree(allocator->alloc_tnode_list);
++ allocator->alloc_tnode_list = tmp;
++ }
++
++ allocator->free_tnodes = NULL;
++ allocator->n_free_tnodes = 0;
++ allocator->n_tnodes_created = 0;
++}
++
++static void yaffs_init_raw_tnodes(struct yaffs_dev *dev)
++{
++ struct yaffs_allocator *allocator = dev->allocator;
++
++ if (!allocator) {
++ BUG();
++ return;
++ }
++
++ allocator->alloc_tnode_list = NULL;
++ allocator->free_tnodes = NULL;
++ allocator->n_free_tnodes = 0;
++ allocator->n_tnodes_created = 0;
++}
++
++static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes)
++{
++ struct yaffs_allocator *allocator =
++ (struct yaffs_allocator *)dev->allocator;
++ int i;
++ struct yaffs_tnode *new_tnodes;
++ u8 *mem;
++ struct yaffs_tnode *curr;
++ struct yaffs_tnode *next;
++ struct yaffs_tnode_list *tnl;
++
++ if (!allocator) {
++ BUG();
++ return YAFFS_FAIL;
++ }
++
++ if (n_tnodes < 1)
++ return YAFFS_OK;
++
++ /* make these things */
++ new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS);
++ mem = (u8 *) new_tnodes;
++
++ if (!new_tnodes) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs: Could not allocate Tnodes");
++ return YAFFS_FAIL;
++ }
++
++ /* New hookup for wide tnodes */
++ for (i = 0; i < n_tnodes - 1; i++) {
++ curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size];
++ next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size];
++ curr->internal[0] = next;
++ }
++
++ curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size];
++ curr->internal[0] = allocator->free_tnodes;
++ allocator->free_tnodes = (struct yaffs_tnode *)mem;
++
++ allocator->n_free_tnodes += n_tnodes;
++ allocator->n_tnodes_created += n_tnodes;
++
++ /* Now add this bunch of tnodes to a list for freeing up.
++ * NB If we can't add this to the management list it isn't fatal
++ * but it just means we can't free this bunch of tnodes later.
++ */
++ tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS);
++ if (!tnl) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "Could not add tnodes to management list");
++ return YAFFS_FAIL;
++ } else {
++ tnl->tnodes = new_tnodes;
++ tnl->next = allocator->alloc_tnode_list;
++ allocator->alloc_tnode_list = tnl;
++ }
++
++ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Tnodes added");
++
++ return YAFFS_OK;
++}
++
++struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
++{
++ struct yaffs_allocator *allocator =
++ (struct yaffs_allocator *)dev->allocator;
++ struct yaffs_tnode *tn = NULL;
++
++ if (!allocator) {
++ BUG();
++ return NULL;
++ }
++
++ /* If there are none left make more */
++ if (!allocator->free_tnodes)
++ yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
++
++ if (allocator->free_tnodes) {
++ tn = allocator->free_tnodes;
++ allocator->free_tnodes = allocator->free_tnodes->internal[0];
++ allocator->n_free_tnodes--;
++ }
++
++ return tn;
++}
++
++/* FreeTnode frees up a tnode and puts it back on the free list */
++void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
++{
++ struct yaffs_allocator *allocator = dev->allocator;
++
++ if (!allocator) {
++ BUG();
++ return;
++ }
++
++ if (tn) {
++ tn->internal[0] = allocator->free_tnodes;
++ allocator->free_tnodes = tn;
++ allocator->n_free_tnodes++;
++ }
++ dev->checkpoint_blocks_required = 0; /* force recalculation */
++}
++
++/*--------------- yaffs_obj alloaction ------------------------
++ *
++ * Free yaffs_objs are stored in a list using obj->siblings.
++ * The blocks of allocated objects are stored in a linked list.
++ */
++
++static void yaffs_init_raw_objs(struct yaffs_dev *dev)
++{
++ struct yaffs_allocator *allocator = dev->allocator;
++
++ if (!allocator) {
++ BUG();
++ return;
++ }
++
++ allocator->allocated_obj_list = NULL;
++ INIT_LIST_HEAD(&allocator->free_objs);
++ allocator->n_free_objects = 0;
++}
++
++static void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
++{
++ struct yaffs_allocator *allocator = dev->allocator;
++ struct yaffs_obj_list *tmp;
++
++ if (!allocator) {
++ BUG();
++ return;
++ }
++
++ while (allocator->allocated_obj_list) {
++ tmp = allocator->allocated_obj_list->next;
++ kfree(allocator->allocated_obj_list->objects);
++ kfree(allocator->allocated_obj_list);
++ allocator->allocated_obj_list = tmp;
++ }
++
++ INIT_LIST_HEAD(&allocator->free_objs);
++ allocator->n_free_objects = 0;
++ allocator->n_obj_created = 0;
++}
++
++static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj)
++{
++ struct yaffs_allocator *allocator = dev->allocator;
++ int i;
++ struct yaffs_obj *new_objs;
++ struct yaffs_obj_list *list;
++
++ if (!allocator) {
++ BUG();
++ return YAFFS_FAIL;
++ }
++
++ if (n_obj < 1)
++ return YAFFS_OK;
++
++ /* make these things */
++ new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS);
++ list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS);
++
++ if (!new_objs || !list) {
++ kfree(new_objs);
++ new_objs = NULL;
++ kfree(list);
++ list = NULL;
++ yaffs_trace(YAFFS_TRACE_ALLOCATE,
++ "Could not allocate more objects");
++ return YAFFS_FAIL;
++ }
++
++ /* Hook them into the free list */
++ for (i = 0; i < n_obj; i++)
++ list_add(&new_objs[i].siblings, &allocator->free_objs);
++
++ allocator->n_free_objects += n_obj;
++ allocator->n_obj_created += n_obj;
++
++ /* Now add this bunch of Objects to a list for freeing up. */
++
++ list->objects = new_objs;
++ list->next = allocator->allocated_obj_list;
++ allocator->allocated_obj_list = list;
++
++ return YAFFS_OK;
++}
++
++struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj = NULL;
++ struct list_head *lh;
++ struct yaffs_allocator *allocator = dev->allocator;
++
++ if (!allocator) {
++ BUG();
++ return obj;
++ }
++
++ /* If there are none left make more */
++ if (list_empty(&allocator->free_objs))
++ yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
++
++ if (!list_empty(&allocator->free_objs)) {
++ lh = allocator->free_objs.next;
++ obj = list_entry(lh, struct yaffs_obj, siblings);
++ list_del_init(lh);
++ allocator->n_free_objects--;
++ }
++
++ return obj;
++}
++
++void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
++{
++
++ struct yaffs_allocator *allocator = dev->allocator;
++
++ if (!allocator) {
++ BUG();
++ return;
++ }
++
++ /* Link into the free list. */
++ list_add(&obj->siblings, &allocator->free_objs);
++ allocator->n_free_objects++;
++}
++
++void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
++{
++
++ if (!dev->allocator) {
++ BUG();
++ return;
++ }
++
++ yaffs_deinit_raw_tnodes(dev);
++ yaffs_deinit_raw_objs(dev);
++ kfree(dev->allocator);
++ dev->allocator = NULL;
++}
++
++void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
++{
++ struct yaffs_allocator *allocator;
++
++ if (dev->allocator) {
++ BUG();
++ return;
++ }
++
++ allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS);
++ if (allocator) {
++ dev->allocator = allocator;
++ yaffs_init_raw_tnodes(dev);
++ yaffs_init_raw_objs(dev);
++ }
++}
++
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_allocator.h linux-3.15-rc5/fs/yaffs2/yaffs_allocator.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_allocator.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_allocator.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,30 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_ALLOCATOR_H__
++#define __YAFFS_ALLOCATOR_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev);
++void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev);
++
++struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev);
++void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn);
++
++struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev);
++void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj);
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_attribs.c linux-3.15-rc5/fs/yaffs2/yaffs_attribs.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_attribs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_attribs.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,166 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_attribs.h"
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
++static inline uid_t ia_uid_read(const struct iattr *iattr)
++{
++ return from_kuid(&init_user_ns, iattr->ia_uid);
++}
++
++static inline gid_t ia_gid_read(const struct iattr *iattr)
++{
++ return from_kgid(&init_user_ns, iattr->ia_gid);
++}
++
++static inline void ia_uid_write(struct iattr *iattr, uid_t uid)
++{
++ iattr->ia_uid = make_kuid(&init_user_ns, uid);
++}
++
++static inline void ia_gid_write(struct iattr *iattr, gid_t gid)
++{
++ iattr->ia_gid = make_kgid(&init_user_ns, gid);
++}
++#else
++static inline uid_t ia_uid_read(const struct iattr *iattr)
++{
++ return iattr->ia_uid;
++}
++
++static inline gid_t ia_gid_read(const struct iattr *inode)
++{
++ return iattr->ia_gid;
++}
++
++static inline void ia_uid_write(struct iattr *iattr, uid_t uid)
++{
++ iattr->ia_uid = uid;
++}
++
++static inline void ia_gid_write(struct iattr *iattr, gid_t gid)
++{
++ iattr->ia_gid = gid;
++}
++#endif
++
++void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
++{
++ obj->yst_uid = oh->yst_uid;
++ obj->yst_gid = oh->yst_gid;
++ obj->yst_atime = oh->yst_atime;
++ obj->yst_mtime = oh->yst_mtime;
++ obj->yst_ctime = oh->yst_ctime;
++ obj->yst_rdev = oh->yst_rdev;
++}
++
++void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj)
++{
++ oh->yst_uid = obj->yst_uid;
++ oh->yst_gid = obj->yst_gid;
++ oh->yst_atime = obj->yst_atime;
++ oh->yst_mtime = obj->yst_mtime;
++ oh->yst_ctime = obj->yst_ctime;
++ oh->yst_rdev = obj->yst_rdev;
++
++}
++
++void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c)
++{
++ obj->yst_mtime = Y_CURRENT_TIME;
++ if (do_a)
++ obj->yst_atime = obj->yst_mtime;
++ if (do_c)
++ obj->yst_ctime = obj->yst_mtime;
++}
++
++void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev)
++{
++ yaffs_load_current_time(obj, 1, 1);
++ obj->yst_rdev = rdev;
++ obj->yst_uid = uid;
++ obj->yst_gid = gid;
++}
++
++static loff_t yaffs_get_file_size(struct yaffs_obj *obj)
++{
++ YCHAR *alias = NULL;
++ obj = yaffs_get_equivalent_obj(obj);
++
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ return obj->variant.file_variant.file_size;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ alias = obj->variant.symlink_variant.alias;
++ if (!alias)
++ return 0;
++ return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH);
++ default:
++ return 0;
++ }
++}
++
++int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr)
++{
++ unsigned int valid = attr->ia_valid;
++
++ if (valid & ATTR_MODE)
++ obj->yst_mode = attr->ia_mode;
++ if (valid & ATTR_UID)
++ obj->yst_uid = ia_uid_read(attr);
++ if (valid & ATTR_GID)
++ obj->yst_gid = ia_gid_read(attr);
++
++ if (valid & ATTR_ATIME)
++ obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
++ if (valid & ATTR_CTIME)
++ obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
++ if (valid & ATTR_MTIME)
++ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
++
++ if (valid & ATTR_SIZE)
++ yaffs_resize_file(obj, attr->ia_size);
++
++ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
++
++ return YAFFS_OK;
++
++}
++
++int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr)
++{
++ unsigned int valid = 0;
++
++ attr->ia_mode = obj->yst_mode;
++ valid |= ATTR_MODE;
++ ia_uid_write(attr, obj->yst_uid);
++ valid |= ATTR_UID;
++ ia_gid_write(attr, obj->yst_gid);
++ valid |= ATTR_GID;
++
++ Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
++ valid |= ATTR_ATIME;
++ Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
++ valid |= ATTR_CTIME;
++ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
++ valid |= ATTR_MTIME;
++
++ attr->ia_size = yaffs_get_file_size(obj);
++ valid |= ATTR_SIZE;
++
++ attr->ia_valid = valid;
++
++ return YAFFS_OK;
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_attribs.h linux-3.15-rc5/fs/yaffs2/yaffs_attribs.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_attribs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_attribs.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,28 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_ATTRIBS_H__
++#define __YAFFS_ATTRIBS_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh);
++void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj);
++void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev);
++void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c);
++int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr);
++int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr);
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_bitmap.c linux-3.15-rc5/fs/yaffs2/yaffs_bitmap.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_bitmap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_bitmap.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,97 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_bitmap.h"
++#include "yaffs_trace.h"
++/*
++ * Chunk bitmap manipulations
++ */
++
++static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
++{
++ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "BlockBits block %d is not valid",
++ blk);
++ BUG();
++ }
++ return dev->chunk_bits +
++ (dev->chunk_bit_stride * (blk - dev->internal_start_block));
++}
++
++void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
++{
++ if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
++ chunk < 0 || chunk >= dev->param.chunks_per_block) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "Chunk Id (%d:%d) invalid",
++ blk, chunk);
++ BUG();
++ }
++}
++
++void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
++{
++ u8 *blk_bits = yaffs_block_bits(dev, blk);
++
++ memset(blk_bits, 0, dev->chunk_bit_stride);
++}
++
++void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
++{
++ u8 *blk_bits = yaffs_block_bits(dev, blk);
++
++ yaffs_verify_chunk_bit_id(dev, blk, chunk);
++ blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
++}
++
++void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
++{
++ u8 *blk_bits = yaffs_block_bits(dev, blk);
++
++ yaffs_verify_chunk_bit_id(dev, blk, chunk);
++ blk_bits[chunk / 8] |= (1 << (chunk & 7));
++}
++
++int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
++{
++ u8 *blk_bits = yaffs_block_bits(dev, blk);
++
++ yaffs_verify_chunk_bit_id(dev, blk, chunk);
++ return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
++}
++
++int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
++{
++ u8 *blk_bits = yaffs_block_bits(dev, blk);
++ int i;
++
++ for (i = 0; i < dev->chunk_bit_stride; i++) {
++ if (*blk_bits)
++ return 1;
++ blk_bits++;
++ }
++ return 0;
++}
++
++int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
++{
++ u8 *blk_bits = yaffs_block_bits(dev, blk);
++ int i;
++ int n = 0;
++
++ for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
++ n += hweight8(*blk_bits);
++
++ return n;
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_bitmap.h linux-3.15-rc5/fs/yaffs2/yaffs_bitmap.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_bitmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_bitmap.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,33 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/*
++ * Chunk bitmap manipulations
++ */
++
++#ifndef __YAFFS_BITMAP_H__
++#define __YAFFS_BITMAP_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
++void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
++void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
++void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
++int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
++int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
++int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_checkptrw.c linux-3.15-rc5/fs/yaffs2/yaffs_checkptrw.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_checkptrw.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_checkptrw.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,474 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_checkptrw.h"
++#include "yaffs_getblockinfo.h"
++
++struct yaffs_checkpt_chunk_hdr {
++ int version;
++ int seq;
++ u32 sum;
++ u32 xor;
++} ;
++
++
++static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
++{
++ return chunk - dev->chunk_offset;
++}
++
++static int apply_block_offset(struct yaffs_dev *dev, int block)
++{
++ return block - dev->block_offset;
++}
++
++static void yaffs2_checkpt_init_chunk_hdr(struct yaffs_dev *dev)
++{
++ struct yaffs_checkpt_chunk_hdr hdr;
++
++ hdr.version = YAFFS_CHECKPOINT_VERSION;
++ hdr.seq = dev->checkpt_page_seq;
++ hdr.sum = dev->checkpt_sum;
++ hdr.xor = dev->checkpt_xor;
++
++ dev->checkpt_byte_offs = sizeof(hdr);
++
++ memcpy(dev->checkpt_buffer, &hdr, sizeof(hdr));
++}
++
++static int yaffs2_checkpt_check_chunk_hdr(struct yaffs_dev *dev)
++{
++ struct yaffs_checkpt_chunk_hdr hdr;
++
++ memcpy(&hdr, dev->checkpt_buffer, sizeof(hdr));
++
++ dev->checkpt_byte_offs = sizeof(hdr);
++
++ return hdr.version == YAFFS_CHECKPOINT_VERSION &&
++ hdr.seq == dev->checkpt_page_seq &&
++ hdr.sum == dev->checkpt_sum &&
++ hdr.xor == dev->checkpt_xor;
++}
++
++static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
++{
++ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "checkpt blocks_avail = %d", blocks_avail);
++
++ return (blocks_avail <= 0) ? 0 : 1;
++}
++
++static int yaffs_checkpt_erase(struct yaffs_dev *dev)
++{
++ int i;
++
++ if (!dev->drv.drv_erase_fn)
++ return 0;
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "checking blocks %d to %d",
++ dev->internal_start_block, dev->internal_end_block);
++
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
++ int offset_i = apply_block_offset(dev, i);
++ int result;
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "erasing checkpt block %d", i);
++
++ dev->n_erasures++;
++
++ result = dev->drv.drv_erase_fn(dev, offset_i);
++ if(result) {
++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++ dev->n_erased_blocks++;
++ dev->n_free_chunks +=
++ dev->param.chunks_per_block;
++ } else {
++ dev->drv.drv_mark_bad_fn(dev, offset_i);
++ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
++ }
++ }
++ }
++
++ dev->blocks_in_checkpt = 0;
++
++ return 1;
++}
++
++static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
++{
++ int i;
++ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "allocating checkpt block: erased %d reserved %d avail %d next %d ",
++ dev->n_erased_blocks, dev->param.n_reserved_blocks,
++ blocks_avail, dev->checkpt_next_block);
++
++ if (dev->checkpt_next_block >= 0 &&
++ dev->checkpt_next_block <= dev->internal_end_block &&
++ blocks_avail > 0) {
++
++ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
++ i++) {
++ struct yaffs_block_info *bi;
++
++ bi = yaffs_get_block_info(dev, i);
++ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++ dev->checkpt_next_block = i + 1;
++ dev->checkpt_cur_block = i;
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "allocating checkpt block %d", i);
++ return;
++ }
++ }
++ }
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
++
++ dev->checkpt_next_block = -1;
++ dev->checkpt_cur_block = -1;
++}
++
++static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
++{
++ int i;
++ struct yaffs_ext_tags tags;
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "find next checkpt block: start: blocks %d next %d",
++ dev->blocks_in_checkpt, dev->checkpt_next_block);
++
++ if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
++ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
++ i++) {
++ int chunk = i * dev->param.chunks_per_block;
++ enum yaffs_block_state state;
++ u32 seq;
++
++ dev->tagger.read_chunk_tags_fn(dev,
++ apply_chunk_offset(dev, chunk),
++ NULL, &tags);
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "find next checkpt block: search: block %d state %d oid %d seq %d eccr %d",
++ i, (int) state,
++ tags.obj_id, tags.seq_number,
++ tags.ecc_result);
++
++ if (tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA)
++ continue;
++
++ dev->tagger.query_block_fn(dev,
++ apply_block_offset(dev, i),
++ &state, &seq);
++ if (state == YAFFS_BLOCK_STATE_DEAD)
++ continue;
++
++ /* Right kind of block */
++ dev->checkpt_next_block = tags.obj_id;
++ dev->checkpt_cur_block = i;
++ dev->checkpt_block_list[dev->blocks_in_checkpt] = i;
++ dev->blocks_in_checkpt++;
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "found checkpt block %d", i);
++ return;
++ }
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
++
++ dev->checkpt_next_block = -1;
++ dev->checkpt_cur_block = -1;
++}
++
++int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
++{
++ int i;
++
++ dev->checkpt_open_write = writing;
++
++ /* Got the functions we need? */
++ if (!dev->tagger.write_chunk_tags_fn ||
++ !dev->tagger.read_chunk_tags_fn ||
++ !dev->drv.drv_erase_fn ||
++ !dev->drv.drv_mark_bad_fn)
++ return 0;
++
++ if (writing && !yaffs2_checkpt_space_ok(dev))
++ return 0;
++
++ if (!dev->checkpt_buffer)
++ dev->checkpt_buffer =
++ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
++ if (!dev->checkpt_buffer)
++ return 0;
++
++ dev->checkpt_page_seq = 0;
++ dev->checkpt_byte_count = 0;
++ dev->checkpt_sum = 0;
++ dev->checkpt_xor = 0;
++ dev->checkpt_cur_block = -1;
++ dev->checkpt_cur_chunk = -1;
++ dev->checkpt_next_block = dev->internal_start_block;
++
++ if (writing) {
++ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
++ yaffs2_checkpt_init_chunk_hdr(dev);
++ return yaffs_checkpt_erase(dev);
++ }
++
++ /* Opening for a read */
++ /* Set to a value that will kick off a read */
++ dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
++ /* A checkpoint block list of 1 checkpoint block per 16 block is
++ * (hopefully) going to be way more than we need */
++ dev->blocks_in_checkpt = 0;
++ dev->checkpt_max_blocks =
++ (dev->internal_end_block - dev->internal_start_block) / 16 + 2;
++ dev->checkpt_block_list =
++ kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
++
++ if (!dev->checkpt_block_list)
++ return 0;
++
++ for (i = 0; i < dev->checkpt_max_blocks; i++)
++ dev->checkpt_block_list[i] = -1;
++
++ return 1;
++}
++
++int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
++{
++ u32 composite_sum;
++
++ composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xff);
++ *sum = composite_sum;
++ return 1;
++}
++
++static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
++{
++ int chunk;
++ int offset_chunk;
++ struct yaffs_ext_tags tags;
++
++ if (dev->checkpt_cur_block < 0) {
++ yaffs2_checkpt_find_erased_block(dev);
++ dev->checkpt_cur_chunk = 0;
++ }
++
++ if (dev->checkpt_cur_block < 0)
++ return 0;
++
++ tags.is_deleted = 0;
++ tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */
++ tags.chunk_id = dev->checkpt_page_seq + 1;
++ tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
++ tags.n_bytes = dev->data_bytes_per_chunk;
++ if (dev->checkpt_cur_chunk == 0) {
++ /* First chunk we write for the block? Set block state to
++ checkpoint */
++ struct yaffs_block_info *bi =
++ yaffs_get_block_info(dev, dev->checkpt_cur_block);
++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
++ dev->blocks_in_checkpt++;
++ }
++
++ chunk =
++ dev->checkpt_cur_block * dev->param.chunks_per_block +
++ dev->checkpt_cur_chunk;
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
++ chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
++ tags.obj_id, tags.chunk_id);
++
++ offset_chunk = apply_chunk_offset(dev, chunk);
++
++ dev->n_page_writes++;
++
++ dev->tagger.write_chunk_tags_fn(dev, offset_chunk,
++ dev->checkpt_buffer, &tags);
++ dev->checkpt_page_seq++;
++ dev->checkpt_cur_chunk++;
++ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
++ dev->checkpt_cur_chunk = 0;
++ dev->checkpt_cur_block = -1;
++ }
++ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
++
++ yaffs2_checkpt_init_chunk_hdr(dev);
++
++
++ return 1;
++}
++
++int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
++{
++ int i = 0;
++ int ok = 1;
++ u8 *data_bytes = (u8 *) data;
++
++ if (!dev->checkpt_buffer)
++ return 0;
++
++ if (!dev->checkpt_open_write)
++ return -1;
++
++ while (i < n_bytes && ok) {
++ dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
++ dev->checkpt_sum += *data_bytes;
++ dev->checkpt_xor ^= *data_bytes;
++
++ dev->checkpt_byte_offs++;
++ i++;
++ data_bytes++;
++ dev->checkpt_byte_count++;
++
++ if (dev->checkpt_byte_offs < 0 ||
++ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
++ ok = yaffs2_checkpt_flush_buffer(dev);
++ }
++
++ return i;
++}
++
++int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
++{
++ int i = 0;
++ int ok = 1;
++ struct yaffs_ext_tags tags;
++ int chunk;
++ int offset_chunk;
++ u8 *data_bytes = (u8 *) data;
++
++ if (!dev->checkpt_buffer)
++ return 0;
++
++ if (dev->checkpt_open_write)
++ return -1;
++
++ while (i < n_bytes && ok) {
++
++ if (dev->checkpt_byte_offs < 0 ||
++ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
++
++ if (dev->checkpt_cur_block < 0) {
++ yaffs2_checkpt_find_block(dev);
++ dev->checkpt_cur_chunk = 0;
++ }
++
++ if (dev->checkpt_cur_block < 0) {
++ ok = 0;
++ break;
++ }
++
++ chunk = dev->checkpt_cur_block *
++ dev->param.chunks_per_block +
++ dev->checkpt_cur_chunk;
++
++ offset_chunk = apply_chunk_offset(dev, chunk);
++ dev->n_page_reads++;
++
++ /* read in the next chunk */
++ dev->tagger.read_chunk_tags_fn(dev,
++ offset_chunk,
++ dev->checkpt_buffer,
++ &tags);
++
++ if (tags.chunk_id != (dev->checkpt_page_seq + 1) ||
++ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
++ tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA) {
++ ok = 0;
++ break;
++ }
++ if(!yaffs2_checkpt_check_chunk_hdr(dev)) {
++ ok = 0;
++ break;
++ }
++
++ dev->checkpt_page_seq++;
++ dev->checkpt_cur_chunk++;
++
++ if (dev->checkpt_cur_chunk >=
++ dev->param.chunks_per_block)
++ dev->checkpt_cur_block = -1;
++
++ }
++
++ *data_bytes = dev->checkpt_buffer[dev->checkpt_byte_offs];
++ dev->checkpt_sum += *data_bytes;
++ dev->checkpt_xor ^= *data_bytes;
++ dev->checkpt_byte_offs++;
++ i++;
++ data_bytes++;
++ dev->checkpt_byte_count++;
++ }
++
++ return i;
++}
++
++int yaffs_checkpt_close(struct yaffs_dev *dev)
++{
++ int i;
++
++ if (dev->checkpt_open_write) {
++ if (dev->checkpt_byte_offs !=
++ sizeof(sizeof(struct yaffs_checkpt_chunk_hdr)))
++ yaffs2_checkpt_flush_buffer(dev);
++ } else if (dev->checkpt_block_list) {
++ for (i = 0;
++ i < dev->blocks_in_checkpt &&
++ dev->checkpt_block_list[i] >= 0; i++) {
++ int blk = dev->checkpt_block_list[i];
++ struct yaffs_block_info *bi = NULL;
++
++ if (dev->internal_start_block <= blk &&
++ blk <= dev->internal_end_block)
++ bi = yaffs_get_block_info(dev, blk);
++ if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
++ }
++ kfree(dev->checkpt_block_list);
++ dev->checkpt_block_list = NULL;
++ }
++
++ dev->n_free_chunks -=
++ dev->blocks_in_checkpt * dev->param.chunks_per_block;
++ dev->n_erased_blocks -= dev->blocks_in_checkpt;
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "checkpoint byte count %d",
++ dev->checkpt_byte_count);
++
++ if (dev->checkpt_buffer) {
++ /* free the buffer */
++ kfree(dev->checkpt_buffer);
++ dev->checkpt_buffer = NULL;
++ return 1;
++ } else {
++ return 0;
++ }
++}
++
++int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
++{
++ /* Erase the checkpoint data */
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "checkpoint invalidate of %d blocks",
++ dev->blocks_in_checkpt);
++
++ return yaffs_checkpt_erase(dev);
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_checkptrw.h linux-3.15-rc5/fs/yaffs2/yaffs_checkptrw.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_checkptrw.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_checkptrw.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,33 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_CHECKPTRW_H__
++#define __YAFFS_CHECKPTRW_H__
++
++#include "yaffs_guts.h"
++
++int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing);
++
++int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes);
++
++int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes);
++
++int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum);
++
++int yaffs_checkpt_close(struct yaffs_dev *dev);
++
++int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev);
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_ecc.c linux-3.15-rc5/fs/yaffs2/yaffs_ecc.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_ecc.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_ecc.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,281 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This code implements the ECC algorithm used in SmartMedia.
++ *
++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
++ * The two unused bit are set to 1.
++ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two
++ * such ECC blocks are used on a 512-byte NAND page.
++ *
++ */
++
++#include "yportenv.h"
++
++#include "yaffs_ecc.h"
++
++/* Table generated by gen-ecc.c
++ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
++ * for each byte of data. These are instead provided in a table in bits7..2.
++ * Bit 0 of each entry indicates whether the entry has an odd or even parity,
++ * and therefore this bytes influence on the line parity.
++ */
++
++static const unsigned char column_parity_table[] = {
++ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
++ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
++ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
++ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
++ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
++ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
++ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
++ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
++ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
++ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
++ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
++ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
++ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
++ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
++ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
++ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
++ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
++ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
++ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
++ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
++ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
++ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
++ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
++ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
++ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
++ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
++ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
++ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
++ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
++ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
++ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
++ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
++};
++
++
++/* Calculate the ECC for a 256-byte block of data */
++void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc)
++{
++ unsigned int i;
++ unsigned char col_parity = 0;
++ unsigned char line_parity = 0;
++ unsigned char line_parity_prime = 0;
++ unsigned char t;
++ unsigned char b;
++
++ for (i = 0; i < 256; i++) {
++ b = column_parity_table[*data++];
++ col_parity ^= b;
++
++ if (b & 0x01) { /* odd number of bits in the byte */
++ line_parity ^= i;
++ line_parity_prime ^= ~i;
++ }
++ }
++
++ ecc[2] = (~col_parity) | 0x03;
++
++ t = 0;
++ if (line_parity & 0x80)
++ t |= 0x80;
++ if (line_parity_prime & 0x80)
++ t |= 0x40;
++ if (line_parity & 0x40)
++ t |= 0x20;
++ if (line_parity_prime & 0x40)
++ t |= 0x10;
++ if (line_parity & 0x20)
++ t |= 0x08;
++ if (line_parity_prime & 0x20)
++ t |= 0x04;
++ if (line_parity & 0x10)
++ t |= 0x02;
++ if (line_parity_prime & 0x10)
++ t |= 0x01;
++ ecc[1] = ~t;
++
++ t = 0;
++ if (line_parity & 0x08)
++ t |= 0x80;
++ if (line_parity_prime & 0x08)
++ t |= 0x40;
++ if (line_parity & 0x04)
++ t |= 0x20;
++ if (line_parity_prime & 0x04)
++ t |= 0x10;
++ if (line_parity & 0x02)
++ t |= 0x08;
++ if (line_parity_prime & 0x02)
++ t |= 0x04;
++ if (line_parity & 0x01)
++ t |= 0x02;
++ if (line_parity_prime & 0x01)
++ t |= 0x01;
++ ecc[0] = ~t;
++
++}
++
++/* Correct the ECC on a 256 byte block of data */
++
++int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
++ const unsigned char *test_ecc)
++{
++ unsigned char d0, d1, d2; /* deltas */
++
++ d0 = read_ecc[0] ^ test_ecc[0];
++ d1 = read_ecc[1] ^ test_ecc[1];
++ d2 = read_ecc[2] ^ test_ecc[2];
++
++ if ((d0 | d1 | d2) == 0)
++ return 0; /* no error */
++
++ if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
++ ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
++ ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
++ /* Single bit (recoverable) error in data */
++
++ unsigned byte;
++ unsigned bit;
++
++ bit = byte = 0;
++
++ if (d1 & 0x80)
++ byte |= 0x80;
++ if (d1 & 0x20)
++ byte |= 0x40;
++ if (d1 & 0x08)
++ byte |= 0x20;
++ if (d1 & 0x02)
++ byte |= 0x10;
++ if (d0 & 0x80)
++ byte |= 0x08;
++ if (d0 & 0x20)
++ byte |= 0x04;
++ if (d0 & 0x08)
++ byte |= 0x02;
++ if (d0 & 0x02)
++ byte |= 0x01;
++
++ if (d2 & 0x80)
++ bit |= 0x04;
++ if (d2 & 0x20)
++ bit |= 0x02;
++ if (d2 & 0x08)
++ bit |= 0x01;
++
++ data[byte] ^= (1 << bit);
++
++ return 1; /* Corrected the error */
++ }
++
++ if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) {
++ /* Reccoverable error in ecc */
++
++ read_ecc[0] = test_ecc[0];
++ read_ecc[1] = test_ecc[1];
++ read_ecc[2] = test_ecc[2];
++
++ return 1; /* Corrected the error */
++ }
++
++ /* Unrecoverable error */
++
++ return -1;
++
++}
++
++/*
++ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
++ */
++void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
++ struct yaffs_ecc_other *ecc_other)
++{
++ unsigned int i;
++ unsigned char col_parity = 0;
++ unsigned line_parity = 0;
++ unsigned line_parity_prime = 0;
++ unsigned char b;
++
++ for (i = 0; i < n_bytes; i++) {
++ b = column_parity_table[*data++];
++ col_parity ^= b;
++
++ if (b & 0x01) {
++ /* odd number of bits in the byte */
++ line_parity ^= i;
++ line_parity_prime ^= ~i;
++ }
++
++ }
++
++ ecc_other->col_parity = (col_parity >> 2) & 0x3f;
++ ecc_other->line_parity = line_parity;
++ ecc_other->line_parity_prime = line_parity_prime;
++}
++
++int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
++ struct yaffs_ecc_other *read_ecc,
++ const struct yaffs_ecc_other *test_ecc)
++{
++ unsigned char delta_col; /* column parity delta */
++ unsigned delta_line; /* line parity delta */
++ unsigned delta_line_prime; /* line parity delta */
++ unsigned bit;
++
++ delta_col = read_ecc->col_parity ^ test_ecc->col_parity;
++ delta_line = read_ecc->line_parity ^ test_ecc->line_parity;
++ delta_line_prime =
++ read_ecc->line_parity_prime ^ test_ecc->line_parity_prime;
++
++ if ((delta_col | delta_line | delta_line_prime) == 0)
++ return 0; /* no error */
++
++ if (delta_line == ~delta_line_prime &&
++ (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) {
++ /* Single bit (recoverable) error in data */
++
++ bit = 0;
++
++ if (delta_col & 0x20)
++ bit |= 0x04;
++ if (delta_col & 0x08)
++ bit |= 0x02;
++ if (delta_col & 0x02)
++ bit |= 0x01;
++
++ if (delta_line >= n_bytes)
++ return -1;
++
++ data[delta_line] ^= (1 << bit);
++
++ return 1; /* corrected */
++ }
++
++ if ((hweight32(delta_line) +
++ hweight32(delta_line_prime) +
++ hweight8(delta_col)) == 1) {
++ /* Reccoverable error in ecc */
++
++ *read_ecc = *test_ecc;
++ return 1; /* corrected */
++ }
++
++ /* Unrecoverable error */
++
++ return -1;
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_ecc.h linux-3.15-rc5/fs/yaffs2/yaffs_ecc.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_ecc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_ecc.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,44 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/*
++ * This code implements the ECC algorithm used in SmartMedia.
++ *
++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
++ * The two unused bit are set to 1.
++ * The ECC can correct single bit errors in a 256-byte page of data.
++ * Thus, two such ECC blocks are used on a 512-byte NAND page.
++ *
++ */
++
++#ifndef __YAFFS_ECC_H__
++#define __YAFFS_ECC_H__
++
++struct yaffs_ecc_other {
++ unsigned char col_parity;
++ unsigned line_parity;
++ unsigned line_parity_prime;
++};
++
++void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc);
++int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
++ const unsigned char *test_ecc);
++
++void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
++ struct yaffs_ecc_other *ecc);
++int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
++ struct yaffs_ecc_other *read_ecc,
++ const struct yaffs_ecc_other *test_ecc);
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_getblockinfo.h linux-3.15-rc5/fs/yaffs2/yaffs_getblockinfo.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_getblockinfo.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_getblockinfo.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,35 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_GETBLOCKINFO_H__
++#define __YAFFS_GETBLOCKINFO_H__
++
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++
++/* Function to manipulate block info */
++static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
++ *dev, int blk)
++{
++ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>> yaffs: get_block_info block %d is not valid",
++ blk);
++ BUG();
++ }
++ return &dev->block_info[blk - dev->internal_start_block];
++}
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_guts.c linux-3.15-rc5/fs/yaffs2/yaffs_guts.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_guts.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_guts.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,5146 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yportenv.h"
++#include "yaffs_trace.h"
++
++#include "yaffs_guts.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_tagscompat.h"
++#include "yaffs_tagsmarshall.h"
++#include "yaffs_nand.h"
++#include "yaffs_yaffs1.h"
++#include "yaffs_yaffs2.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_verify.h"
++#include "yaffs_nand.h"
++#include "yaffs_packedtags2.h"
++#include "yaffs_nameval.h"
++#include "yaffs_allocator.h"
++#include "yaffs_attribs.h"
++#include "yaffs_summary.h"
++
++/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
++#define YAFFS_GC_GOOD_ENOUGH 2
++#define YAFFS_GC_PASSIVE_THRESHOLD 4
++
++#include "yaffs_ecc.h"
++
++/* Forward declarations */
++
++static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
++ const u8 *buffer, int n_bytes, int use_reserve);
++
++static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
++ int buffer_size);
++
++/* Function to calculate chunk and offset */
++
++void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
++ int *chunk_out, u32 *offset_out)
++{
++ int chunk;
++ u32 offset;
++
++ chunk = (u32) (addr >> dev->chunk_shift);
++
++ if (dev->chunk_div == 1) {
++ /* easy power of 2 case */
++ offset = (u32) (addr & dev->chunk_mask);
++ } else {
++ /* Non power-of-2 case */
++
++ loff_t chunk_base;
++
++ chunk /= dev->chunk_div;
++
++ chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
++ offset = (u32) (addr - chunk_base);
++ }
++
++ *chunk_out = chunk;
++ *offset_out = offset;
++}
++
++/* Function to return the number of shifts for a power of 2 greater than or
++ * equal to the given number
++ * Note we don't try to cater for all possible numbers and this does not have to
++ * be hellishly efficient.
++ */
++
++static inline u32 calc_shifts_ceiling(u32 x)
++{
++ int extra_bits;
++ int shifts;
++
++ shifts = extra_bits = 0;
++
++ while (x > 1) {
++ if (x & 1)
++ extra_bits++;
++ x >>= 1;
++ shifts++;
++ }
++
++ if (extra_bits)
++ shifts++;
++
++ return shifts;
++}
++
++/* Function to return the number of shifts to get a 1 in bit 0
++ */
++
++static inline u32 calc_shifts(u32 x)
++{
++ u32 shifts;
++
++ shifts = 0;
++
++ if (!x)
++ return 0;
++
++ while (!(x & 1)) {
++ x >>= 1;
++ shifts++;
++ }
++
++ return shifts;
++}
++
++/*
++ * Temporary buffer manipulations.
++ */
++
++static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
++{
++ int i;
++ u8 *buf = (u8 *) 1;
++
++ memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
++
++ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
++ dev->temp_buffer[i].in_use = 0;
++ buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
++ dev->temp_buffer[i].buffer = buf;
++ }
++
++ return buf ? YAFFS_OK : YAFFS_FAIL;
++}
++
++u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
++{
++ int i;
++
++ dev->temp_in_use++;
++ if (dev->temp_in_use > dev->max_temp)
++ dev->max_temp = dev->temp_in_use;
++
++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++ if (dev->temp_buffer[i].in_use == 0) {
++ dev->temp_buffer[i].in_use = 1;
++ return dev->temp_buffer[i].buffer;
++ }
++ }
++
++ yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
++ /*
++ * If we got here then we have to allocate an unmanaged one
++ * This is not good.
++ */
++
++ dev->unmanaged_buffer_allocs++;
++ return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
++
++}
++
++void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
++{
++ int i;
++
++ dev->temp_in_use--;
++
++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++ if (dev->temp_buffer[i].buffer == buffer) {
++ dev->temp_buffer[i].in_use = 0;
++ return;
++ }
++ }
++
++ if (buffer) {
++ /* assume it is an unmanaged one. */
++ yaffs_trace(YAFFS_TRACE_BUFFERS,
++ "Releasing unmanaged temp buffer");
++ kfree(buffer);
++ dev->unmanaged_buffer_deallocs++;
++ }
++
++}
++
++/*
++ * Functions for robustisizing TODO
++ *
++ */
++
++static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
++ const u8 *data,
++ const struct yaffs_ext_tags *tags)
++{
++ (void) dev;
++ (void) nand_chunk;
++ (void) data;
++ (void) tags;
++}
++
++static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
++ const struct yaffs_ext_tags *tags)
++{
++ (void) dev;
++ (void) nand_chunk;
++ (void) tags;
++}
++
++void yaffs_handle_chunk_error(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi)
++{
++ if (!bi->gc_prioritise) {
++ bi->gc_prioritise = 1;
++ dev->has_pending_prioritised_gc = 1;
++ bi->chunk_error_strikes++;
++
++ if (bi->chunk_error_strikes > 3) {
++ bi->needs_retiring = 1; /* Too many stikes, so retire */
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: Block struck out");
++
++ }
++ }
++}
++
++static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
++ int erased_ok)
++{
++ int flash_block = nand_chunk / dev->param.chunks_per_block;
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
++
++ yaffs_handle_chunk_error(dev, bi);
++
++ if (erased_ok) {
++ /* Was an actual write failure,
++ * so mark the block for retirement.*/
++ bi->needs_retiring = 1;
++ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ "**>> Block %d needs retiring", flash_block);
++ }
++
++ /* Delete the chunk */
++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++ yaffs_skip_rest_of_block(dev);
++}
++
++/*
++ * Verification code
++ */
++
++/*
++ * Simple hash function. Needs to have a reasonable spread
++ */
++
++static inline int yaffs_hash_fn(int n)
++{
++ if (n < 0)
++ n = -n;
++ return n % YAFFS_NOBJECT_BUCKETS;
++}
++
++/*
++ * Access functions to useful fake objects.
++ * Note that root might have a presence in NAND if permissions are set.
++ */
++
++struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
++{
++ return dev->root_dir;
++}
++
++struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
++{
++ return dev->lost_n_found;
++}
++
++/*
++ * Erased NAND checking functions
++ */
++
++int yaffs_check_ff(u8 *buffer, int n_bytes)
++{
++ /* Horrible, slow implementation */
++ while (n_bytes--) {
++ if (*buffer != 0xff)
++ return 0;
++ buffer++;
++ }
++ return 1;
++}
++
++static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
++{
++ int retval = YAFFS_OK;
++ u8 *data = yaffs_get_temp_buffer(dev);
++ struct yaffs_ext_tags tags;
++ int result;
++
++ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
++
++ if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
++ retval = YAFFS_FAIL;
++
++ if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
++ tags.chunk_used) {
++ yaffs_trace(YAFFS_TRACE_NANDACCESS,
++ "Chunk %d not erased", nand_chunk);
++ retval = YAFFS_FAIL;
++ }
++
++ yaffs_release_temp_buffer(dev, data);
++
++ return retval;
++
++}
++
++static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
++ int nand_chunk,
++ const u8 *data,
++ struct yaffs_ext_tags *tags)
++{
++ int retval = YAFFS_OK;
++ struct yaffs_ext_tags temp_tags;
++ u8 *buffer = yaffs_get_temp_buffer(dev);
++ int result;
++
++ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
++ if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
++ temp_tags.obj_id != tags->obj_id ||
++ temp_tags.chunk_id != tags->chunk_id ||
++ temp_tags.n_bytes != tags->n_bytes)
++ retval = YAFFS_FAIL;
++
++ yaffs_release_temp_buffer(dev, buffer);
++
++ return retval;
++}
++
++
++int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
++{
++ int reserved_chunks;
++ int reserved_blocks = dev->param.n_reserved_blocks;
++ int checkpt_blocks;
++
++ checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
++
++ reserved_chunks =
++ (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
++
++ return (dev->n_free_chunks > (reserved_chunks + n_chunks));
++}
++
++static int yaffs_find_alloc_block(struct yaffs_dev *dev)
++{
++ int i;
++ struct yaffs_block_info *bi;
++
++ if (dev->n_erased_blocks < 1) {
++ /* Hoosterman we've got a problem.
++ * Can't get space to gc
++ */
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: no more erased blocks");
++
++ return -1;
++ }
++
++ /* Find an empty block. */
++
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ dev->alloc_block_finder++;
++ if (dev->alloc_block_finder < dev->internal_start_block
++ || dev->alloc_block_finder > dev->internal_end_block) {
++ dev->alloc_block_finder = dev->internal_start_block;
++ }
++
++ bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++ bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
++ dev->seq_number++;
++ bi->seq_number = dev->seq_number;
++ dev->n_erased_blocks--;
++ yaffs_trace(YAFFS_TRACE_ALLOCATE,
++ "Allocated block %d, seq %d, %d left" ,
++ dev->alloc_block_finder, dev->seq_number,
++ dev->n_erased_blocks);
++ return dev->alloc_block_finder;
++ }
++ }
++
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs tragedy: no more erased blocks, but there should have been %d",
++ dev->n_erased_blocks);
++
++ return -1;
++}
++
++static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
++ struct yaffs_block_info **block_ptr)
++{
++ int ret_val;
++ struct yaffs_block_info *bi;
++
++ if (dev->alloc_block < 0) {
++ /* Get next block to allocate off */
++ dev->alloc_block = yaffs_find_alloc_block(dev);
++ dev->alloc_page = 0;
++ }
++
++ if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
++ /* No space unless we're allowed to use the reserve. */
++ return -1;
++ }
++
++ if (dev->n_erased_blocks < dev->param.n_reserved_blocks
++ && dev->alloc_page == 0)
++ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
++
++ /* Next page please.... */
++ if (dev->alloc_block >= 0) {
++ bi = yaffs_get_block_info(dev, dev->alloc_block);
++
++ ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
++ dev->alloc_page;
++ bi->pages_in_use++;
++ yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
++
++ dev->alloc_page++;
++
++ dev->n_free_chunks--;
++
++ /* If the block is full set the state to full */
++ if (dev->alloc_page >= dev->param.chunks_per_block) {
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ dev->alloc_block = -1;
++ }
++
++ if (block_ptr)
++ *block_ptr = bi;
++
++ return ret_val;
++ }
++
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
++
++ return -1;
++}
++
++static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
++{
++ int n;
++
++ n = dev->n_erased_blocks * dev->param.chunks_per_block;
++
++ if (dev->alloc_block > 0)
++ n += (dev->param.chunks_per_block - dev->alloc_page);
++
++ return n;
++
++}
++
++/*
++ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
++ * if we don't want to write to it.
++ */
++void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
++{
++ struct yaffs_block_info *bi;
++
++ if (dev->alloc_block > 0) {
++ bi = yaffs_get_block_info(dev, dev->alloc_block);
++ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ dev->alloc_block = -1;
++ }
++ }
++}
++
++static int yaffs_write_new_chunk(struct yaffs_dev *dev,
++ const u8 *data,
++ struct yaffs_ext_tags *tags, int use_reserver)
++{
++ int attempts = 0;
++ int write_ok = 0;
++ int chunk;
++
++ yaffs2_checkpt_invalidate(dev);
++
++ do {
++ struct yaffs_block_info *bi = 0;
++ int erased_ok = 0;
++
++ chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
++ if (chunk < 0) {
++ /* no space */
++ break;
++ }
++
++ /* First check this chunk is erased, if it needs
++ * checking. The checking policy (unless forced
++ * always on) is as follows:
++ *
++ * Check the first page we try to write in a block.
++ * If the check passes then we don't need to check any
++ * more. If the check fails, we check again...
++ * If the block has been erased, we don't need to check.
++ *
++ * However, if the block has been prioritised for gc,
++ * then we think there might be something odd about
++ * this block and stop using it.
++ *
++ * Rationale: We should only ever see chunks that have
++ * not been erased if there was a partially written
++ * chunk due to power loss. This checking policy should
++ * catch that case with very few checks and thus save a
++ * lot of checks that are most likely not needed.
++ *
++ * Mods to the above
++ * If an erase check fails or the write fails we skip the
++ * rest of the block.
++ */
++
++ /* let's give it a try */
++ attempts++;
++
++ if (dev->param.always_check_erased)
++ bi->skip_erased_check = 0;
++
++ if (!bi->skip_erased_check) {
++ erased_ok = yaffs_check_chunk_erased(dev, chunk);
++ if (erased_ok != YAFFS_OK) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>> yaffs chunk %d was not erased",
++ chunk);
++
++ /* If not erased, delete this one,
++ * skip rest of block and
++ * try another chunk */
++ yaffs_chunk_del(dev, chunk, 1, __LINE__);
++ yaffs_skip_rest_of_block(dev);
++ continue;
++ }
++ }
++
++ write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
++
++ if (!bi->skip_erased_check)
++ write_ok =
++ yaffs_verify_chunk_written(dev, chunk, data, tags);
++
++ if (write_ok != YAFFS_OK) {
++ /* Clean up aborted write, skip to next block and
++ * try another chunk */
++ yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
++ continue;
++ }
++
++ bi->skip_erased_check = 1;
++
++ /* Copy the data into the robustification buffer */
++ yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
++
++ } while (write_ok != YAFFS_OK &&
++ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
++
++ if (!write_ok)
++ chunk = -1;
++
++ if (attempts > 1) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>> yaffs write required %d attempts",
++ attempts);
++ dev->n_retried_writes += (attempts - 1);
++ }
++
++ return chunk;
++}
++
++/*
++ * Block retiring for handling a broken block.
++ */
++
++static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
++{
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
++
++ yaffs2_checkpt_invalidate(dev);
++
++ yaffs2_clear_oldest_dirty_seq(dev, bi);
++
++ if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
++ if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: Failed to mark bad and erase block %d",
++ flash_block);
++ } else {
++ struct yaffs_ext_tags tags;
++ int chunk_id =
++ flash_block * dev->param.chunks_per_block;
++
++ u8 *buffer = yaffs_get_temp_buffer(dev);
++
++ memset(buffer, 0xff, dev->data_bytes_per_chunk);
++ memset(&tags, 0, sizeof(tags));
++ tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
++ if (dev->tagger.write_chunk_tags_fn(dev, chunk_id -
++ dev->chunk_offset,
++ buffer,
++ &tags) != YAFFS_OK)
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: Failed to write bad block marker to block %d",
++ flash_block);
++
++ yaffs_release_temp_buffer(dev, buffer);
++ }
++ }
++
++ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
++ bi->gc_prioritise = 0;
++ bi->needs_retiring = 0;
++
++ dev->n_retired_blocks++;
++}
++
++/*---------------- Name handling functions ------------*/
++
++static u16 yaffs_calc_name_sum(const YCHAR *name)
++{
++ u16 sum = 0;
++ u16 i = 1;
++
++ if (!name)
++ return 0;
++
++ while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
++
++ /* 0x1f mask is case insensitive */
++ sum += ((*name) & 0x1f) * i;
++ i++;
++ name++;
++ }
++ return sum;
++}
++
++
++void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
++{
++ memset(obj->short_name, 0, sizeof(obj->short_name));
++
++ if (name && !name[0]) {
++ yaffs_fix_null_name(obj, obj->short_name,
++ YAFFS_SHORT_NAME_LENGTH);
++ name = obj->short_name;
++ } else if (name &&
++ strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
++ YAFFS_SHORT_NAME_LENGTH) {
++ strcpy(obj->short_name, name);
++ }
++
++ obj->sum = yaffs_calc_name_sum(name);
++}
++
++void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
++ const struct yaffs_obj_hdr *oh)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++ YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
++ memset(tmp_name, 0, sizeof(tmp_name));
++ yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
++ YAFFS_MAX_NAME_LENGTH + 1);
++ yaffs_set_obj_name(obj, tmp_name);
++#else
++ yaffs_set_obj_name(obj, oh->name);
++#endif
++}
++
++loff_t yaffs_max_file_size(struct yaffs_dev *dev)
++{
++ if(sizeof(loff_t) < 8)
++ return YAFFS_MAX_FILE_SIZE_32;
++ else
++ return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
++}
++
++/*-------------------- TNODES -------------------
++
++ * List of spare tnodes
++ * The list is hooked together using the first pointer
++ * in the tnode.
++ */
++
++struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
++{
++ struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
++
++ if (tn) {
++ memset(tn, 0, dev->tnode_size);
++ dev->n_tnodes++;
++ }
++
++ dev->checkpoint_blocks_required = 0; /* force recalculation */
++
++ return tn;
++}
++
++/* FreeTnode frees up a tnode and puts it back on the free list */
++static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
++{
++ yaffs_free_raw_tnode(dev, tn);
++ dev->n_tnodes--;
++ dev->checkpoint_blocks_required = 0; /* force recalculation */
++}
++
++static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
++{
++ yaffs_deinit_raw_tnodes_and_objs(dev);
++ dev->n_obj = 0;
++ dev->n_tnodes = 0;
++}
++
++static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
++ unsigned pos, unsigned val)
++{
++ u32 *map = (u32 *) tn;
++ u32 bit_in_map;
++ u32 bit_in_word;
++ u32 word_in_map;
++ u32 mask;
++
++ pos &= YAFFS_TNODES_LEVEL0_MASK;
++ val >>= dev->chunk_grp_bits;
++
++ bit_in_map = pos * dev->tnode_width;
++ word_in_map = bit_in_map / 32;
++ bit_in_word = bit_in_map & (32 - 1);
++
++ mask = dev->tnode_mask << bit_in_word;
++
++ map[word_in_map] &= ~mask;
++ map[word_in_map] |= (mask & (val << bit_in_word));
++
++ if (dev->tnode_width > (32 - bit_in_word)) {
++ bit_in_word = (32 - bit_in_word);
++ word_in_map++;
++ mask =
++ dev->tnode_mask >> bit_in_word;
++ map[word_in_map] &= ~mask;
++ map[word_in_map] |= (mask & (val >> bit_in_word));
++ }
++}
++
++u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
++ unsigned pos)
++{
++ u32 *map = (u32 *) tn;
++ u32 bit_in_map;
++ u32 bit_in_word;
++ u32 word_in_map;
++ u32 val;
++
++ pos &= YAFFS_TNODES_LEVEL0_MASK;
++
++ bit_in_map = pos * dev->tnode_width;
++ word_in_map = bit_in_map / 32;
++ bit_in_word = bit_in_map & (32 - 1);
++
++ val = map[word_in_map] >> bit_in_word;
++
++ if (dev->tnode_width > (32 - bit_in_word)) {
++ bit_in_word = (32 - bit_in_word);
++ word_in_map++;
++ val |= (map[word_in_map] << bit_in_word);
++ }
++
++ val &= dev->tnode_mask;
++ val <<= dev->chunk_grp_bits;
++
++ return val;
++}
++
++/* ------------------- End of individual tnode manipulation -----------------*/
++
++/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
++ * The look up tree is represented by the top tnode and the number of top_level
++ * in the tree. 0 means only the level 0 tnode is in the tree.
++ */
++
++/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
++struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
++ struct yaffs_file_var *file_struct,
++ u32 chunk_id)
++{
++ struct yaffs_tnode *tn = file_struct->top;
++ u32 i;
++ int required_depth;
++ int level = file_struct->top_level;
++
++ (void) dev;
++
++ /* Check sane level and chunk Id */
++ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
++ return NULL;
++
++ if (chunk_id > YAFFS_MAX_CHUNK_ID)
++ return NULL;
++
++ /* First check we're tall enough (ie enough top_level) */
++
++ i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
++ required_depth = 0;
++ while (i) {
++ i >>= YAFFS_TNODES_INTERNAL_BITS;
++ required_depth++;
++ }
++
++ if (required_depth > file_struct->top_level)
++ return NULL; /* Not tall enough, so we can't find it */
++
++ /* Traverse down to level 0 */
++ while (level > 0 && tn) {
++ tn = tn->internal[(chunk_id >>
++ (YAFFS_TNODES_LEVEL0_BITS +
++ (level - 1) *
++ YAFFS_TNODES_INTERNAL_BITS)) &
++ YAFFS_TNODES_INTERNAL_MASK];
++ level--;
++ }
++
++ return tn;
++}
++
++/* add_find_tnode_0 finds the level 0 tnode if it exists,
++ * otherwise first expands the tree.
++ * This happens in two steps:
++ * 1. If the tree isn't tall enough, then make it taller.
++ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
++ *
++ * Used when modifying the tree.
++ *
++ * If the tn argument is NULL, then a fresh tnode will be added otherwise the
++ * specified tn will be plugged into the ttree.
++ */
++
++struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
++ struct yaffs_file_var *file_struct,
++ u32 chunk_id,
++ struct yaffs_tnode *passed_tn)
++{
++ int required_depth;
++ int i;
++ int l;
++ struct yaffs_tnode *tn;
++ u32 x;
++
++ /* Check sane level and page Id */
++ if (file_struct->top_level < 0 ||
++ file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
++ return NULL;
++
++ if (chunk_id > YAFFS_MAX_CHUNK_ID)
++ return NULL;
++
++ /* First check we're tall enough (ie enough top_level) */
++
++ x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
++ required_depth = 0;
++ while (x) {
++ x >>= YAFFS_TNODES_INTERNAL_BITS;
++ required_depth++;
++ }
++
++ if (required_depth > file_struct->top_level) {
++ /* Not tall enough, gotta make the tree taller */
++ for (i = file_struct->top_level; i < required_depth; i++) {
++
++ tn = yaffs_get_tnode(dev);
++
++ if (tn) {
++ tn->internal[0] = file_struct->top;
++ file_struct->top = tn;
++ file_struct->top_level++;
++ } else {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs: no more tnodes");
++ return NULL;
++ }
++ }
++ }
++
++ /* Traverse down to level 0, adding anything we need */
++
++ l = file_struct->top_level;
++ tn = file_struct->top;
++
++ if (l > 0) {
++ while (l > 0 && tn) {
++ x = (chunk_id >>
++ (YAFFS_TNODES_LEVEL0_BITS +
++ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
++ YAFFS_TNODES_INTERNAL_MASK;
++
++ if ((l > 1) && !tn->internal[x]) {
++ /* Add missing non-level-zero tnode */
++ tn->internal[x] = yaffs_get_tnode(dev);
++ if (!tn->internal[x])
++ return NULL;
++ } else if (l == 1) {
++ /* Looking from level 1 at level 0 */
++ if (passed_tn) {
++ /* If we already have one, release it */
++ if (tn->internal[x])
++ yaffs_free_tnode(dev,
++ tn->internal[x]);
++ tn->internal[x] = passed_tn;
++
++ } else if (!tn->internal[x]) {
++ /* Don't have one, none passed in */
++ tn->internal[x] = yaffs_get_tnode(dev);
++ if (!tn->internal[x])
++ return NULL;
++ }
++ }
++
++ tn = tn->internal[x];
++ l--;
++ }
++ } else {
++ /* top is level 0 */
++ if (passed_tn) {
++ memcpy(tn, passed_tn,
++ (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
++ yaffs_free_tnode(dev, passed_tn);
++ }
++ }
++
++ return tn;
++}
++
++static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
++ int chunk_obj)
++{
++ return (tags->chunk_id == chunk_obj &&
++ tags->obj_id == obj_id &&
++ !tags->is_deleted) ? 1 : 0;
++
++}
++
++static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
++ struct yaffs_ext_tags *tags, int obj_id,
++ int inode_chunk)
++{
++ int j;
++
++ for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
++ if (yaffs_check_chunk_bit
++ (dev, the_chunk / dev->param.chunks_per_block,
++ the_chunk % dev->param.chunks_per_block)) {
++
++ if (dev->chunk_grp_size == 1)
++ return the_chunk;
++ else {
++ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
++ tags);
++ if (yaffs_tags_match(tags,
++ obj_id, inode_chunk)) {
++ /* found it; */
++ return the_chunk;
++ }
++ }
++ }
++ the_chunk++;
++ }
++ return -1;
++}
++
++int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
++ struct yaffs_ext_tags *tags)
++{
++ /*Get the Tnode, then get the level 0 offset chunk offset */
++ struct yaffs_tnode *tn;
++ int the_chunk = -1;
++ struct yaffs_ext_tags local_tags;
++ int ret_val = -1;
++ struct yaffs_dev *dev = in->my_dev;
++
++ if (!tags) {
++ /* Passed a NULL, so use our own tags space */
++ tags = &local_tags;
++ }
++
++ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
++
++ if (!tn)
++ return ret_val;
++
++ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
++
++ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
++ inode_chunk);
++ return ret_val;
++}
++
++static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
++ struct yaffs_ext_tags *tags)
++{
++ /* Get the Tnode, then get the level 0 offset chunk offset */
++ struct yaffs_tnode *tn;
++ int the_chunk = -1;
++ struct yaffs_ext_tags local_tags;
++ struct yaffs_dev *dev = in->my_dev;
++ int ret_val = -1;
++
++ if (!tags) {
++ /* Passed a NULL, so use our own tags space */
++ tags = &local_tags;
++ }
++
++ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
++
++ if (!tn)
++ return ret_val;
++
++ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
++
++ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
++ inode_chunk);
++
++ /* Delete the entry in the filestructure (if found) */
++ if (ret_val != -1)
++ yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
++
++ return ret_val;
++}
++
++int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
++ int nand_chunk, int in_scan)
++{
++ /* NB in_scan is zero unless scanning.
++ * For forward scanning, in_scan is > 0;
++ * for backward scanning in_scan is < 0
++ *
++ * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
++ */
++
++ struct yaffs_tnode *tn;
++ struct yaffs_dev *dev = in->my_dev;
++ int existing_cunk;
++ struct yaffs_ext_tags existing_tags;
++ struct yaffs_ext_tags new_tags;
++ unsigned existing_serial, new_serial;
++
++ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
++ /* Just ignore an attempt at putting a chunk into a non-file
++ * during scanning.
++ * If it is not during Scanning then something went wrong!
++ */
++ if (!in_scan) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy:attempt to put data chunk into a non-file"
++ );
++ BUG();
++ }
++
++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++ return YAFFS_OK;
++ }
++
++ tn = yaffs_add_find_tnode_0(dev,
++ &in->variant.file_variant,
++ inode_chunk, NULL);
++ if (!tn)
++ return YAFFS_FAIL;
++
++ if (!nand_chunk)
++ /* Dummy insert, bail now */
++ return YAFFS_OK;
++
++ existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
++
++ if (in_scan != 0) {
++ /* If we're scanning then we need to test for duplicates
++ * NB This does not need to be efficient since it should only
++ * happen when the power fails during a write, then only one
++ * chunk should ever be affected.
++ *
++ * Correction for YAFFS2: This could happen quite a lot and we
++ * need to think about efficiency! TODO
++ * Update: For backward scanning we don't need to re-read tags
++ * so this is quite cheap.
++ */
++
++ if (existing_cunk > 0) {
++ /* NB Right now existing chunk will not be real
++ * chunk_id if the chunk group size > 1
++ * thus we have to do a FindChunkInFile to get the
++ * real chunk id.
++ *
++ * We have a duplicate now we need to decide which
++ * one to use:
++ *
++ * Backwards scanning YAFFS2: The old one is what
++ * we use, dump the new one.
++ * YAFFS1: Get both sets of tags and compare serial
++ * numbers.
++ */
++
++ if (in_scan > 0) {
++ /* Only do this for forward scanning */
++ yaffs_rd_chunk_tags_nand(dev,
++ nand_chunk,
++ NULL, &new_tags);
++
++ /* Do a proper find */
++ existing_cunk =
++ yaffs_find_chunk_in_file(in, inode_chunk,
++ &existing_tags);
++ }
++
++ if (existing_cunk <= 0) {
++ /*Hoosterman - how did this happen? */
++
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: existing chunk < 0 in scan"
++ );
++
++ }
++
++ /* NB The deleted flags should be false, otherwise
++ * the chunks will not be loaded during a scan
++ */
++
++ if (in_scan > 0) {
++ new_serial = new_tags.serial_number;
++ existing_serial = existing_tags.serial_number;
++ }
++
++ if ((in_scan > 0) &&
++ (existing_cunk <= 0 ||
++ ((existing_serial + 1) & 3) == new_serial)) {
++ /* Forward scanning.
++ * Use new
++ * Delete the old one and drop through to
++ * update the tnode
++ */
++ yaffs_chunk_del(dev, existing_cunk, 1,
++ __LINE__);
++ } else {
++ /* Backward scanning or we want to use the
++ * existing one
++ * Delete the new one and return early so that
++ * the tnode isn't changed
++ */
++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++ return YAFFS_OK;
++ }
++ }
++
++ }
++
++ if (existing_cunk == 0)
++ in->n_data_chunks++;
++
++ yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
++
++ return YAFFS_OK;
++}
++
++static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
++{
++ struct yaffs_block_info *the_block;
++ unsigned block_no;
++
++ yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
++
++ block_no = chunk / dev->param.chunks_per_block;
++ the_block = yaffs_get_block_info(dev, block_no);
++ if (the_block) {
++ the_block->soft_del_pages++;
++ dev->n_free_chunks++;
++ yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
++ }
++}
++
++/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
++ * the chunks in the file.
++ * All soft deleting does is increment the block's softdelete count and pulls
++ * the chunk out of the tnode.
++ * Thus, essentially this is the same as DeleteWorker except that the chunks
++ * are soft deleted.
++ */
++
++static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
++ u32 level, int chunk_offset)
++{
++ int i;
++ int the_chunk;
++ int all_done = 1;
++ struct yaffs_dev *dev = in->my_dev;
++
++ if (!tn)
++ return 1;
++
++ if (level > 0) {
++ for (i = YAFFS_NTNODES_INTERNAL - 1;
++ all_done && i >= 0;
++ i--) {
++ if (tn->internal[i]) {
++ all_done =
++ yaffs_soft_del_worker(in,
++ tn->internal[i],
++ level - 1,
++ (chunk_offset <<
++ YAFFS_TNODES_INTERNAL_BITS)
++ + i);
++ if (all_done) {
++ yaffs_free_tnode(dev,
++ tn->internal[i]);
++ tn->internal[i] = NULL;
++ } else {
++ /* Can this happen? */
++ }
++ }
++ }
++ return (all_done) ? 1 : 0;
++ }
++
++ /* level 0 */
++ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
++ the_chunk = yaffs_get_group_base(dev, tn, i);
++ if (the_chunk) {
++ yaffs_soft_del_chunk(dev, the_chunk);
++ yaffs_load_tnode_0(dev, tn, i, 0);
++ }
++ }
++ return 1;
++}
++
++static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
++{
++ struct yaffs_dev *dev = obj->my_dev;
++ struct yaffs_obj *parent;
++
++ yaffs_verify_obj_in_dir(obj);
++ parent = obj->parent;
++
++ yaffs_verify_dir(parent);
++
++ if (dev && dev->param.remove_obj_fn)
++ dev->param.remove_obj_fn(obj);
++
++ list_del_init(&obj->siblings);
++ obj->parent = NULL;
++
++ yaffs_verify_dir(parent);
++}
++
++void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
++{
++ if (!directory) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "tragedy: Trying to add an object to a null pointer directory"
++ );
++ BUG();
++ return;
++ }
++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "tragedy: Trying to add an object to a non-directory"
++ );
++ BUG();
++ }
++
++ if (obj->siblings.prev == NULL) {
++ /* Not initialised */
++ BUG();
++ }
++
++ yaffs_verify_dir(directory);
++
++ yaffs_remove_obj_from_dir(obj);
++
++ /* Now add it */
++ list_add(&obj->siblings, &directory->variant.dir_variant.children);
++ obj->parent = directory;
++
++ if (directory == obj->my_dev->unlinked_dir
++ || directory == obj->my_dev->del_dir) {
++ obj->unlinked = 1;
++ obj->my_dev->n_unlinked_files++;
++ obj->rename_allowed = 0;
++ }
++
++ yaffs_verify_dir(directory);
++ yaffs_verify_obj_in_dir(obj);
++}
++
++static int yaffs_change_obj_name(struct yaffs_obj *obj,
++ struct yaffs_obj *new_dir,
++ const YCHAR *new_name, int force, int shadows)
++{
++ int unlink_op;
++ int del_op;
++ struct yaffs_obj *existing_target;
++
++ if (new_dir == NULL)
++ new_dir = obj->parent; /* use the old directory */
++
++ if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "tragedy: yaffs_change_obj_name: new_dir is not a directory"
++ );
++ BUG();
++ }
++
++ unlink_op = (new_dir == obj->my_dev->unlinked_dir);
++ del_op = (new_dir == obj->my_dev->del_dir);
++
++ existing_target = yaffs_find_by_name(new_dir, new_name);
++
++ /* If the object is a file going into the unlinked directory,
++ * then it is OK to just stuff it in since duplicate names are OK.
++ * else only proceed if the new name does not exist and we're putting
++ * it into a directory.
++ */
++ if (!(unlink_op || del_op || force ||
++ shadows > 0 || !existing_target) ||
++ new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++ return YAFFS_FAIL;
++
++ yaffs_set_obj_name(obj, new_name);
++ obj->dirty = 1;
++ yaffs_add_obj_to_dir(new_dir, obj);
++
++ if (unlink_op)
++ obj->unlinked = 1;
++
++ /* If it is a deletion then we mark it as a shrink for gc */
++ if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
++ return YAFFS_OK;
++
++ return YAFFS_FAIL;
++}
++
++/*------------------------ Short Operations Cache ------------------------------
++ * In many situations where there is no high level buffering a lot of
++ * reads might be short sequential reads, and a lot of writes may be short
++ * sequential writes. eg. scanning/writing a jpeg file.
++ * In these cases, a short read/write cache can provide a huge perfomance
++ * benefit with dumb-as-a-rock code.
++ * In Linux, the page cache provides read buffering and the short op cache
++ * provides write buffering.
++ *
++ * There are a small number (~10) of cache chunks per device so that we don't
++ * need a very intelligent search.
++ */
++
++static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
++{
++ struct yaffs_dev *dev = obj->my_dev;
++ int i;
++ struct yaffs_cache *cache;
++ int n_caches = obj->my_dev->param.n_caches;
++
++ for (i = 0; i < n_caches; i++) {
++ cache = &dev->cache[i];
++ if (cache->object == obj && cache->dirty)
++ return 1;
++ }
++
++ return 0;
++}
++
++static void yaffs_flush_file_cache(struct yaffs_obj *obj)
++{
++ struct yaffs_dev *dev = obj->my_dev;
++ int lowest = -99; /* Stop compiler whining. */
++ int i;
++ struct yaffs_cache *cache;
++ int chunk_written = 0;
++ int n_caches = obj->my_dev->param.n_caches;
++
++ if (n_caches < 1)
++ return;
++ do {
++ cache = NULL;
++
++ /* Find the lowest dirty chunk for this object */
++ for (i = 0; i < n_caches; i++) {
++ if (dev->cache[i].object == obj &&
++ dev->cache[i].dirty) {
++ if (!cache ||
++ dev->cache[i].chunk_id < lowest) {
++ cache = &dev->cache[i];
++ lowest = cache->chunk_id;
++ }
++ }
++ }
++
++ if (cache && !cache->locked) {
++ /* Write it out and free it up */
++ chunk_written =
++ yaffs_wr_data_obj(cache->object,
++ cache->chunk_id,
++ cache->data,
++ cache->n_bytes, 1);
++ cache->dirty = 0;
++ cache->object = NULL;
++ }
++ } while (cache && chunk_written > 0);
++
++ if (cache)
++ /* Hoosterman, disk full while writing cache out. */
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: no space during cache write");
++}
++
++/*yaffs_flush_whole_cache(dev)
++ *
++ *
++ */
++
++void yaffs_flush_whole_cache(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj;
++ int n_caches = dev->param.n_caches;
++ int i;
++
++ /* Find a dirty object in the cache and flush it...
++ * until there are no further dirty objects.
++ */
++ do {
++ obj = NULL;
++ for (i = 0; i < n_caches && !obj; i++) {
++ if (dev->cache[i].object && dev->cache[i].dirty)
++ obj = dev->cache[i].object;
++ }
++ if (obj)
++ yaffs_flush_file_cache(obj);
++ } while (obj);
++
++}
++
++/* Grab us a cache chunk for use.
++ * First look for an empty one.
++ * Then look for the least recently used non-dirty one.
++ * Then look for the least recently used dirty one...., flush and look again.
++ */
++static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
++{
++ int i;
++
++ if (dev->param.n_caches > 0) {
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (!dev->cache[i].object)
++ return &dev->cache[i];
++ }
++ }
++ return NULL;
++}
++
++static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
++{
++ struct yaffs_cache *cache;
++ struct yaffs_obj *the_obj;
++ int usage;
++ int i;
++ int pushout;
++
++ if (dev->param.n_caches < 1)
++ return NULL;
++
++ /* Try find a non-dirty one... */
++
++ cache = yaffs_grab_chunk_worker(dev);
++
++ if (!cache) {
++ /* They were all dirty, find the LRU object and flush
++ * its cache, then find again.
++ * NB what's here is not very accurate,
++ * we actually flush the object with the LRU chunk.
++ */
++
++ /* With locking we can't assume we can use entry zero,
++ * Set the_obj to a valid pointer for Coverity. */
++ the_obj = dev->cache[0].object;
++ usage = -1;
++ cache = NULL;
++ pushout = -1;
++
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].object &&
++ !dev->cache[i].locked &&
++ (dev->cache[i].last_use < usage ||
++ !cache)) {
++ usage = dev->cache[i].last_use;
++ the_obj = dev->cache[i].object;
++ cache = &dev->cache[i];
++ pushout = i;
++ }
++ }
++
++ if (!cache || cache->dirty) {
++ /* Flush and try again */
++ yaffs_flush_file_cache(the_obj);
++ cache = yaffs_grab_chunk_worker(dev);
++ }
++ }
++ return cache;
++}
++
++/* Find a cached chunk */
++static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
++ int chunk_id)
++{
++ struct yaffs_dev *dev = obj->my_dev;
++ int i;
++
++ if (dev->param.n_caches < 1)
++ return NULL;
++
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].object == obj &&
++ dev->cache[i].chunk_id == chunk_id) {
++ dev->cache_hits++;
++
++ return &dev->cache[i];
++ }
++ }
++ return NULL;
++}
++
++/* Mark the chunk for the least recently used algorithym */
++static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
++ int is_write)
++{
++ int i;
++
++ if (dev->param.n_caches < 1)
++ return;
++
++ if (dev->cache_last_use < 0 ||
++ dev->cache_last_use > 100000000) {
++ /* Reset the cache usages */
++ for (i = 1; i < dev->param.n_caches; i++)
++ dev->cache[i].last_use = 0;
++
++ dev->cache_last_use = 0;
++ }
++ dev->cache_last_use++;
++ cache->last_use = dev->cache_last_use;
++
++ if (is_write)
++ cache->dirty = 1;
++}
++
++/* Invalidate a single cache page.
++ * Do this when a whole page gets written,
++ * ie the short cache for this page is no longer valid.
++ */
++static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
++{
++ struct yaffs_cache *cache;
++
++ if (object->my_dev->param.n_caches > 0) {
++ cache = yaffs_find_chunk_cache(object, chunk_id);
++
++ if (cache)
++ cache->object = NULL;
++ }
++}
++
++/* Invalidate all the cache pages associated with this object
++ * Do this whenever ther file is deleted or resized.
++ */
++static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
++{
++ int i;
++ struct yaffs_dev *dev = in->my_dev;
++
++ if (dev->param.n_caches > 0) {
++ /* Invalidate it. */
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].object == in)
++ dev->cache[i].object = NULL;
++ }
++ }
++}
++
++static void yaffs_unhash_obj(struct yaffs_obj *obj)
++{
++ int bucket;
++ struct yaffs_dev *dev = obj->my_dev;
++
++ /* If it is still linked into the bucket list, free from the list */
++ if (!list_empty(&obj->hash_link)) {
++ list_del_init(&obj->hash_link);
++ bucket = yaffs_hash_fn(obj->obj_id);
++ dev->obj_bucket[bucket].count--;
++ }
++}
++
++/* FreeObject frees up a Object and puts it back on the free list */
++static void yaffs_free_obj(struct yaffs_obj *obj)
++{
++ struct yaffs_dev *dev;
++
++ if (!obj) {
++ BUG();
++ return;
++ }
++ dev = obj->my_dev;
++ yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
++ obj, obj->my_inode);
++ if (obj->parent)
++ BUG();
++ if (!list_empty(&obj->siblings))
++ BUG();
++
++ if (obj->my_inode) {
++ /* We're still hooked up to a cached inode.
++ * Don't delete now, but mark for later deletion
++ */
++ obj->defered_free = 1;
++ return;
++ }
++
++ yaffs_unhash_obj(obj);
++
++ yaffs_free_raw_obj(dev, obj);
++ dev->n_obj--;
++ dev->checkpoint_blocks_required = 0; /* force recalculation */
++}
++
++void yaffs_handle_defered_free(struct yaffs_obj *obj)
++{
++ if (obj->defered_free)
++ yaffs_free_obj(obj);
++}
++
++static int yaffs_generic_obj_del(struct yaffs_obj *in)
++{
++ /* Iinvalidate the file's data in the cache, without flushing. */
++ yaffs_invalidate_whole_cache(in);
++
++ if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
++ /* Move to unlinked directory so we have a deletion record */
++ yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
++ 0);
++ }
++
++ yaffs_remove_obj_from_dir(in);
++ yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
++ in->hdr_chunk = 0;
++
++ yaffs_free_obj(in);
++ return YAFFS_OK;
++
++}
++
++static void yaffs_soft_del_file(struct yaffs_obj *obj)
++{
++ if (!obj->deleted ||
++ obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
++ obj->soft_del)
++ return;
++
++ if (obj->n_data_chunks <= 0) {
++ /* Empty file with no duplicate object headers,
++ * just delete it immediately */
++ yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
++ obj->variant.file_variant.top = NULL;
++ yaffs_trace(YAFFS_TRACE_TRACING,
++ "yaffs: Deleting empty file %d",
++ obj->obj_id);
++ yaffs_generic_obj_del(obj);
++ } else {
++ yaffs_soft_del_worker(obj,
++ obj->variant.file_variant.top,
++ obj->variant.
++ file_variant.top_level, 0);
++ obj->soft_del = 1;
++ }
++}
++
++/* Pruning removes any part of the file structure tree that is beyond the
++ * bounds of the file (ie that does not point to chunks).
++ *
++ * A file should only get pruned when its size is reduced.
++ *
++ * Before pruning, the chunks must be pulled from the tree and the
++ * level 0 tnode entries must be zeroed out.
++ * Could also use this for file deletion, but that's probably better handled
++ * by a special case.
++ *
++ * This function is recursive. For levels > 0 the function is called again on
++ * any sub-tree. For level == 0 we just check if the sub-tree has data.
++ * If there is no data in a subtree then it is pruned.
++ */
++
++static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
++ struct yaffs_tnode *tn, u32 level,
++ int del0)
++{
++ int i;
++ int has_data;
++
++ if (!tn)
++ return tn;
++
++ has_data = 0;
++
++ if (level > 0) {
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
++ if (tn->internal[i]) {
++ tn->internal[i] =
++ yaffs_prune_worker(dev,
++ tn->internal[i],
++ level - 1,
++ (i == 0) ? del0 : 1);
++ }
++
++ if (tn->internal[i])
++ has_data++;
++ }
++ } else {
++ int tnode_size_u32 = dev->tnode_size / sizeof(u32);
++ u32 *map = (u32 *) tn;
++
++ for (i = 0; !has_data && i < tnode_size_u32; i++) {
++ if (map[i])
++ has_data++;
++ }
++ }
++
++ if (has_data == 0 && del0) {
++ /* Free and return NULL */
++ yaffs_free_tnode(dev, tn);
++ tn = NULL;
++ }
++ return tn;
++}
++
++static int yaffs_prune_tree(struct yaffs_dev *dev,
++ struct yaffs_file_var *file_struct)
++{
++ int i;
++ int has_data;
++ int done = 0;
++ struct yaffs_tnode *tn;
++
++ if (file_struct->top_level < 1)
++ return YAFFS_OK;
++
++ file_struct->top =
++ yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
++
++ /* Now we have a tree with all the non-zero branches NULL but
++ * the height is the same as it was.
++ * Let's see if we can trim internal tnodes to shorten the tree.
++ * We can do this if only the 0th element in the tnode is in use
++ * (ie all the non-zero are NULL)
++ */
++
++ while (file_struct->top_level && !done) {
++ tn = file_struct->top;
++
++ has_data = 0;
++ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
++ if (tn->internal[i])
++ has_data++;
++ }
++
++ if (!has_data) {
++ file_struct->top = tn->internal[0];
++ file_struct->top_level--;
++ yaffs_free_tnode(dev, tn);
++ } else {
++ done = 1;
++ }
++ }
++
++ return YAFFS_OK;
++}
++
++/*-------------------- End of File Structure functions.-------------------*/
++
++/* alloc_empty_obj gets us a clean Object.*/
++static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
++
++ if (!obj)
++ return obj;
++
++ dev->n_obj++;
++
++ /* Now sweeten it up... */
++
++ memset(obj, 0, sizeof(struct yaffs_obj));
++ obj->being_created = 1;
++
++ obj->my_dev = dev;
++ obj->hdr_chunk = 0;
++ obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
++ INIT_LIST_HEAD(&(obj->hard_links));
++ INIT_LIST_HEAD(&(obj->hash_link));
++ INIT_LIST_HEAD(&obj->siblings);
++
++ /* Now make the directory sane */
++ if (dev->root_dir) {
++ obj->parent = dev->root_dir;
++ list_add(&(obj->siblings),
++ &dev->root_dir->variant.dir_variant.children);
++ }
++
++ /* Add it to the lost and found directory.
++ * NB Can't put root or lost-n-found in lost-n-found so
++ * check if lost-n-found exists first
++ */
++ if (dev->lost_n_found)
++ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
++
++ obj->being_created = 0;
++
++ dev->checkpoint_blocks_required = 0; /* force recalculation */
++
++ return obj;
++}
++
++static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
++{
++ int i;
++ int l = 999;
++ int lowest = 999999;
++
++ /* Search for the shortest list or one that
++ * isn't too long.
++ */
++
++ for (i = 0; i < 10 && lowest > 4; i++) {
++ dev->bucket_finder++;
++ dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
++ if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
++ lowest = dev->obj_bucket[dev->bucket_finder].count;
++ l = dev->bucket_finder;
++ }
++ }
++
++ return l;
++}
++
++static int yaffs_new_obj_id(struct yaffs_dev *dev)
++{
++ int bucket = yaffs_find_nice_bucket(dev);
++ int found = 0;
++ struct list_head *i;
++ u32 n = (u32) bucket;
++
++ /* Now find an object value that has not already been taken
++ * by scanning the list.
++ */
++
++ while (!found) {
++ found = 1;
++ n += YAFFS_NOBJECT_BUCKETS;
++ if (1 || dev->obj_bucket[bucket].count > 0) {
++ list_for_each(i, &dev->obj_bucket[bucket].list) {
++ /* If there is already one in the list */
++ if (i && list_entry(i, struct yaffs_obj,
++ hash_link)->obj_id == n) {
++ found = 0;
++ }
++ }
++ }
++ }
++ return n;
++}
++
++static void yaffs_hash_obj(struct yaffs_obj *in)
++{
++ int bucket = yaffs_hash_fn(in->obj_id);
++ struct yaffs_dev *dev = in->my_dev;
++
++ list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
++ dev->obj_bucket[bucket].count++;
++}
++
++struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
++{
++ int bucket = yaffs_hash_fn(number);
++ struct list_head *i;
++ struct yaffs_obj *in;
++
++ list_for_each(i, &dev->obj_bucket[bucket].list) {
++ /* Look if it is in the list */
++ in = list_entry(i, struct yaffs_obj, hash_link);
++ if (in->obj_id == number) {
++ /* Don't show if it is defered free */
++ if (in->defered_free)
++ return NULL;
++ return in;
++ }
++ }
++
++ return NULL;
++}
++
++static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
++ enum yaffs_obj_type type)
++{
++ struct yaffs_obj *the_obj = NULL;
++ struct yaffs_tnode *tn = NULL;
++
++ if (number < 0)
++ number = yaffs_new_obj_id(dev);
++
++ if (type == YAFFS_OBJECT_TYPE_FILE) {
++ tn = yaffs_get_tnode(dev);
++ if (!tn)
++ return NULL;
++ }
++
++ the_obj = yaffs_alloc_empty_obj(dev);
++ if (!the_obj) {
++ if (tn)
++ yaffs_free_tnode(dev, tn);
++ return NULL;
++ }
++
++ the_obj->fake = 0;
++ the_obj->rename_allowed = 1;
++ the_obj->unlink_allowed = 1;
++ the_obj->obj_id = number;
++ yaffs_hash_obj(the_obj);
++ the_obj->variant_type = type;
++ yaffs_load_current_time(the_obj, 1, 1);
++
++ switch (type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ the_obj->variant.file_variant.file_size = 0;
++ the_obj->variant.file_variant.scanned_size = 0;
++ the_obj->variant.file_variant.shrink_size =
++ yaffs_max_file_size(dev);
++ the_obj->variant.file_variant.top_level = 0;
++ the_obj->variant.file_variant.top = tn;
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
++ INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* No action required */
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* todo this should not happen */
++ break;
++ }
++ return the_obj;
++}
++
++static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
++ int number, u32 mode)
++{
++
++ struct yaffs_obj *obj =
++ yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
++
++ if (!obj)
++ return NULL;
++
++ obj->fake = 1; /* it is fake so it might not use NAND */
++ obj->rename_allowed = 0;
++ obj->unlink_allowed = 0;
++ obj->deleted = 0;
++ obj->unlinked = 0;
++ obj->yst_mode = mode;
++ obj->my_dev = dev;
++ obj->hdr_chunk = 0; /* Not a valid chunk. */
++ return obj;
++
++}
++
++
++static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
++{
++ int i;
++
++ dev->n_obj = 0;
++ dev->n_tnodes = 0;
++ yaffs_init_raw_tnodes_and_objs(dev);
++
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ INIT_LIST_HEAD(&dev->obj_bucket[i].list);
++ dev->obj_bucket[i].count = 0;
++ }
++}
++
++struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
++ int number,
++ enum yaffs_obj_type type)
++{
++ struct yaffs_obj *the_obj = NULL;
++
++ if (number > 0)
++ the_obj = yaffs_find_by_number(dev, number);
++
++ if (!the_obj)
++ the_obj = yaffs_new_obj(dev, number, type);
++
++ return the_obj;
++
++}
++
++YCHAR *yaffs_clone_str(const YCHAR *str)
++{
++ YCHAR *new_str = NULL;
++ int len;
++
++ if (!str)
++ str = _Y("");
++
++ len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
++ new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
++ if (new_str) {
++ strncpy(new_str, str, len);
++ new_str[len] = 0;
++ }
++ return new_str;
++
++}
++/*
++ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
++ * link (ie. name) is created or deleted in the directory.
++ *
++ * ie.
++ * create dir/a : update dir's mtime/ctime
++ * rm dir/a: update dir's mtime/ctime
++ * modify dir/a: don't update dir's mtimme/ctime
++ *
++ * This can be handled immediately or defered. Defering helps reduce the number
++ * of updates when many files in a directory are changed within a brief period.
++ *
++ * If the directory updating is defered then yaffs_update_dirty_dirs must be
++ * called periodically.
++ */
++
++static void yaffs_update_parent(struct yaffs_obj *obj)
++{
++ struct yaffs_dev *dev;
++
++ if (!obj)
++ return;
++ dev = obj->my_dev;
++ obj->dirty = 1;
++ yaffs_load_current_time(obj, 0, 1);
++ if (dev->param.defered_dir_update) {
++ struct list_head *link = &obj->variant.dir_variant.dirty;
++
++ if (list_empty(link)) {
++ list_add(link, &dev->dirty_dirs);
++ yaffs_trace(YAFFS_TRACE_BACKGROUND,
++ "Added object %d to dirty directories",
++ obj->obj_id);
++ }
++
++ } else {
++ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
++ }
++}
++
++void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
++{
++ struct list_head *link;
++ struct yaffs_obj *obj;
++ struct yaffs_dir_var *d_s;
++ union yaffs_obj_var *o_v;
++
++ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
++
++ while (!list_empty(&dev->dirty_dirs)) {
++ link = dev->dirty_dirs.next;
++ list_del_init(link);
++
++ d_s = list_entry(link, struct yaffs_dir_var, dirty);
++ o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
++ obj = list_entry(o_v, struct yaffs_obj, variant);
++
++ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
++ obj->obj_id);
++
++ if (obj->dirty)
++ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
++ }
++}
++
++/*
++ * Mknod (create) a new object.
++ * equiv_obj only has meaning for a hard link;
++ * alias_str only has meaning for a symlink.
++ * rdev only has meaning for devices (a subset of special objects)
++ */
++
++static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
++ struct yaffs_obj *parent,
++ const YCHAR *name,
++ u32 mode,
++ u32 uid,
++ u32 gid,
++ struct yaffs_obj *equiv_obj,
++ const YCHAR *alias_str, u32 rdev)
++{
++ struct yaffs_obj *in;
++ YCHAR *str = NULL;
++ struct yaffs_dev *dev = parent->my_dev;
++
++ /* Check if the entry exists.
++ * If it does then fail the call since we don't want a dup. */
++ if (yaffs_find_by_name(parent, name))
++ return NULL;
++
++ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
++ str = yaffs_clone_str(alias_str);
++ if (!str)
++ return NULL;
++ }
++
++ in = yaffs_new_obj(dev, -1, type);
++
++ if (!in) {
++ kfree(str);
++ return NULL;
++ }
++
++ in->hdr_chunk = 0;
++ in->valid = 1;
++ in->variant_type = type;
++
++ in->yst_mode = mode;
++
++ yaffs_attribs_init(in, gid, uid, rdev);
++
++ in->n_data_chunks = 0;
++
++ yaffs_set_obj_name(in, name);
++ in->dirty = 1;
++
++ yaffs_add_obj_to_dir(parent, in);
++
++ in->my_dev = parent->my_dev;
++
++ switch (type) {
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ in->variant.symlink_variant.alias = str;
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ in->variant.hardlink_variant.equiv_obj = equiv_obj;
++ in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
++ list_add(&in->hard_links, &equiv_obj->hard_links);
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* do nothing */
++ break;
++ }
++
++ if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
++ /* Could not create the object header, fail */
++ yaffs_del_obj(in);
++ in = NULL;
++ }
++
++ if (in)
++ yaffs_update_parent(parent);
++
++ return in;
++}
++
++struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
++ const YCHAR *name, u32 mode, u32 uid,
++ u32 gid)
++{
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
++ uid, gid, NULL, NULL, 0);
++}
++
++struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
++ u32 mode, u32 uid, u32 gid)
++{
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
++ mode, uid, gid, NULL, NULL, 0);
++}
++
++struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
++ const YCHAR *name, u32 mode, u32 uid,
++ u32 gid, u32 rdev)
++{
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
++ uid, gid, NULL, NULL, rdev);
++}
++
++struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
++ const YCHAR *name, u32 mode, u32 uid,
++ u32 gid, const YCHAR *alias)
++{
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
++ uid, gid, NULL, alias, 0);
++}
++
++/* yaffs_link_obj returns the object id of the equivalent object.*/
++struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
++ struct yaffs_obj *equiv_obj)
++{
++ /* Get the real object in case we were fed a hard link obj */
++ equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
++
++ if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
++ parent, name, 0, 0, 0,
++ equiv_obj, NULL, 0))
++ return equiv_obj;
++
++ return NULL;
++
++}
++
++
++
++/*---------------------- Block Management and Page Allocation -------------*/
++
++static void yaffs_deinit_blocks(struct yaffs_dev *dev)
++{
++ if (dev->block_info_alt && dev->block_info)
++ vfree(dev->block_info);
++ else
++ kfree(dev->block_info);
++
++ dev->block_info_alt = 0;
++
++ dev->block_info = NULL;
++
++ if (dev->chunk_bits_alt && dev->chunk_bits)
++ vfree(dev->chunk_bits);
++ else
++ kfree(dev->chunk_bits);
++ dev->chunk_bits_alt = 0;
++ dev->chunk_bits = NULL;
++}
++
++static int yaffs_init_blocks(struct yaffs_dev *dev)
++{
++ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
++
++ dev->block_info = NULL;
++ dev->chunk_bits = NULL;
++ dev->alloc_block = -1; /* force it to get a new one */
++
++ /* If the first allocation strategy fails, thry the alternate one */
++ dev->block_info =
++ kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
++ if (!dev->block_info) {
++ dev->block_info =
++ vmalloc(n_blocks * sizeof(struct yaffs_block_info));
++ dev->block_info_alt = 1;
++ } else {
++ dev->block_info_alt = 0;
++ }
++
++ if (!dev->block_info)
++ goto alloc_error;
++
++ /* Set up dynamic blockinfo stuff. Round up bytes. */
++ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
++ dev->chunk_bits =
++ kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
++ if (!dev->chunk_bits) {
++ dev->chunk_bits =
++ vmalloc(dev->chunk_bit_stride * n_blocks);
++ dev->chunk_bits_alt = 1;
++ } else {
++ dev->chunk_bits_alt = 0;
++ }
++ if (!dev->chunk_bits)
++ goto alloc_error;
++
++
++ memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
++ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
++ return YAFFS_OK;
++
++alloc_error:
++ yaffs_deinit_blocks(dev);
++ return YAFFS_FAIL;
++}
++
++
++void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
++{
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
++ int erased_ok = 0;
++ int i;
++
++ /* If the block is still healthy erase it and mark as clean.
++ * If the block has had a data failure, then retire it.
++ */
++
++ yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
++ "yaffs_block_became_dirty block %d state %d %s",
++ block_no, bi->block_state,
++ (bi->needs_retiring) ? "needs retiring" : "");
++
++ yaffs2_clear_oldest_dirty_seq(dev, bi);
++
++ bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
++
++ /* If this is the block being garbage collected then stop gc'ing */
++ if (block_no == dev->gc_block)
++ dev->gc_block = 0;
++
++ /* If this block is currently the best candidate for gc
++ * then drop as a candidate */
++ if (block_no == dev->gc_dirtiest) {
++ dev->gc_dirtiest = 0;
++ dev->gc_pages_in_use = 0;
++ }
++
++ if (!bi->needs_retiring) {
++ yaffs2_checkpt_invalidate(dev);
++ erased_ok = yaffs_erase_block(dev, block_no);
++ if (!erased_ok) {
++ dev->n_erase_failures++;
++ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ "**>> Erasure failed %d", block_no);
++ }
++ }
++
++ /* Verify erasure if needed */
++ if (erased_ok &&
++ ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
++ !yaffs_skip_verification(dev))) {
++ for (i = 0; i < dev->param.chunks_per_block; i++) {
++ if (!yaffs_check_chunk_erased(dev,
++ block_no * dev->param.chunks_per_block + i)) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ ">>Block %d erasure supposedly OK, but chunk %d not erased",
++ block_no, i);
++ }
++ }
++ }
++
++ if (!erased_ok) {
++ /* We lost a block of free space */
++ dev->n_free_chunks -= dev->param.chunks_per_block;
++ yaffs_retire_block(dev, block_no);
++ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ "**>> Block %d retired", block_no);
++ return;
++ }
++
++ /* Clean it up... */
++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++ bi->seq_number = 0;
++ dev->n_erased_blocks++;
++ bi->pages_in_use = 0;
++ bi->soft_del_pages = 0;
++ bi->has_shrink_hdr = 0;
++ bi->skip_erased_check = 1; /* Clean, so no need to check */
++ bi->gc_prioritise = 0;
++ bi->has_summary = 0;
++
++ yaffs_clear_chunk_bits(dev, block_no);
++
++ yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
++}
++
++static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi,
++ int old_chunk, u8 *buffer)
++{
++ int new_chunk;
++ int mark_flash = 1;
++ struct yaffs_ext_tags tags;
++ struct yaffs_obj *object;
++ int matching_chunk;
++ int ret_val = YAFFS_OK;
++
++ memset(&tags, 0, sizeof(tags));
++ yaffs_rd_chunk_tags_nand(dev, old_chunk,
++ buffer, &tags);
++ object = yaffs_find_by_number(dev, tags.obj_id);
++
++ yaffs_trace(YAFFS_TRACE_GC_DETAIL,
++ "Collecting chunk in block %d, %d %d %d ",
++ dev->gc_chunk, tags.obj_id,
++ tags.chunk_id, tags.n_bytes);
++
++ if (object && !yaffs_skip_verification(dev)) {
++ if (tags.chunk_id == 0)
++ matching_chunk =
++ object->hdr_chunk;
++ else if (object->soft_del)
++ /* Defeat the test */
++ matching_chunk = old_chunk;
++ else
++ matching_chunk =
++ yaffs_find_chunk_in_file
++ (object, tags.chunk_id,
++ NULL);
++
++ if (old_chunk != matching_chunk)
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "gc: page in gc mismatch: %d %d %d %d",
++ old_chunk,
++ matching_chunk,
++ tags.obj_id,
++ tags.chunk_id);
++ }
++
++ if (!object) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "page %d in gc has no object: %d %d %d ",
++ old_chunk,
++ tags.obj_id, tags.chunk_id,
++ tags.n_bytes);
++ }
++
++ if (object &&
++ object->deleted &&
++ object->soft_del && tags.chunk_id != 0) {
++ /* Data chunk in a soft deleted file,
++ * throw it away.
++ * It's a soft deleted data chunk,
++ * No need to copy this, just forget
++ * about it and fix up the object.
++ */
++
++ /* Free chunks already includes
++ * softdeleted chunks, how ever this
++ * chunk is going to soon be really
++ * deleted which will increment free
++ * chunks. We have to decrement free
++ * chunks so this works out properly.
++ */
++ dev->n_free_chunks--;
++ bi->soft_del_pages--;
++
++ object->n_data_chunks--;
++ if (object->n_data_chunks <= 0) {
++ /* remeber to clean up obj */
++ dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
++ dev->n_clean_ups++;
++ }
++ mark_flash = 0;
++ } else if (object) {
++ /* It's either a data chunk in a live
++ * file or an ObjectHeader, so we're
++ * interested in it.
++ * NB Need to keep the ObjectHeaders of
++ * deleted files until the whole file
++ * has been deleted off
++ */
++ tags.serial_number++;
++ dev->n_gc_copies++;
++
++ if (tags.chunk_id == 0) {
++ /* It is an object Id,
++ * We need to nuke the
++ * shrinkheader flags since its
++ * work is done.
++ * Also need to clean up
++ * shadowing.
++ */
++ struct yaffs_obj_hdr *oh;
++ oh = (struct yaffs_obj_hdr *) buffer;
++
++ oh->is_shrink = 0;
++ tags.extra_is_shrink = 0;
++ oh->shadows_obj = 0;
++ oh->inband_shadowed_obj_id = 0;
++ tags.extra_shadows = 0;
++
++ /* Update file size */
++ if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
++ yaffs_oh_size_load(oh,
++ object->variant.file_variant.file_size);
++ tags.extra_file_size =
++ object->variant.file_variant.file_size;
++ }
++
++ yaffs_verify_oh(object, oh, &tags, 1);
++ new_chunk =
++ yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
++ } else {
++ new_chunk =
++ yaffs_write_new_chunk(dev, buffer, &tags, 1);
++ }
++
++ if (new_chunk < 0) {
++ ret_val = YAFFS_FAIL;
++ } else {
++
++ /* Now fix up the Tnodes etc. */
++
++ if (tags.chunk_id == 0) {
++ /* It's a header */
++ object->hdr_chunk = new_chunk;
++ object->serial = tags.serial_number;
++ } else {
++ /* It's a data chunk */
++ yaffs_put_chunk_in_file(object, tags.chunk_id,
++ new_chunk, 0);
++ }
++ }
++ }
++ if (ret_val == YAFFS_OK)
++ yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
++ return ret_val;
++}
++
++static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
++{
++ int old_chunk;
++ int ret_val = YAFFS_OK;
++ int i;
++ int is_checkpt_block;
++ int max_copies;
++ int chunks_before = yaffs_get_erased_chunks(dev);
++ int chunks_after;
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
++
++ is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
++
++ yaffs_trace(YAFFS_TRACE_TRACING,
++ "Collecting block %d, in use %d, shrink %d, whole_block %d",
++ block, bi->pages_in_use, bi->has_shrink_hdr,
++ whole_block);
++
++ /*yaffs_verify_free_chunks(dev); */
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
++ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
++
++ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
++
++ dev->gc_disable = 1;
++
++ yaffs_summary_gc(dev, block);
++
++ if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
++ yaffs_trace(YAFFS_TRACE_TRACING,
++ "Collecting block %d that has no chunks in use",
++ block);
++ yaffs_block_became_dirty(dev, block);
++ } else {
++
++ u8 *buffer = yaffs_get_temp_buffer(dev);
++
++ yaffs_verify_blk(dev, bi, block);
++
++ max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
++ old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
++
++ for (/* init already done */ ;
++ ret_val == YAFFS_OK &&
++ dev->gc_chunk < dev->param.chunks_per_block &&
++ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
++ max_copies > 0;
++ dev->gc_chunk++, old_chunk++) {
++ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
++ /* Page is in use and might need to be copied */
++ max_copies--;
++ ret_val = yaffs_gc_process_chunk(dev, bi,
++ old_chunk, buffer);
++ }
++ }
++ yaffs_release_temp_buffer(dev, buffer);
++ }
++
++ yaffs_verify_collected_blk(dev, bi, block);
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
++ /*
++ * The gc did not complete. Set block state back to FULL
++ * because checkpointing does not restore gc.
++ */
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ } else {
++ /* The gc completed. */
++ /* Do any required cleanups */
++ for (i = 0; i < dev->n_clean_ups; i++) {
++ /* Time to delete the file too */
++ struct yaffs_obj *object =
++ yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
++ if (object) {
++ yaffs_free_tnode(dev,
++ object->variant.file_variant.top);
++ object->variant.file_variant.top = NULL;
++ yaffs_trace(YAFFS_TRACE_GC,
++ "yaffs: About to finally delete object %d",
++ object->obj_id);
++ yaffs_generic_obj_del(object);
++ object->my_dev->n_deleted_files--;
++ }
++
++ }
++ chunks_after = yaffs_get_erased_chunks(dev);
++ if (chunks_before >= chunks_after)
++ yaffs_trace(YAFFS_TRACE_GC,
++ "gc did not increase free chunks before %d after %d",
++ chunks_before, chunks_after);
++ dev->gc_block = 0;
++ dev->gc_chunk = 0;
++ dev->n_clean_ups = 0;
++ }
++
++ dev->gc_disable = 0;
++
++ return ret_val;
++}
++
++/*
++ * find_gc_block() selects the dirtiest block (or close enough)
++ * for garbage collection.
++ */
++
++static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
++ int aggressive, int background)
++{
++ int i;
++ int iterations;
++ unsigned selected = 0;
++ int prioritised = 0;
++ int prioritised_exist = 0;
++ struct yaffs_block_info *bi;
++ int threshold;
++
++ /* First let's see if we need to grab a prioritised block */
++ if (dev->has_pending_prioritised_gc && !aggressive) {
++ dev->gc_dirtiest = 0;
++ bi = dev->block_info;
++ for (i = dev->internal_start_block;
++ i <= dev->internal_end_block && !selected; i++) {
++
++ if (bi->gc_prioritise) {
++ prioritised_exist = 1;
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
++ yaffs_block_ok_for_gc(dev, bi)) {
++ selected = i;
++ prioritised = 1;
++ }
++ }
++ bi++;
++ }
++
++ /*
++ * If there is a prioritised block and none was selected then
++ * this happened because there is at least one old dirty block
++ * gumming up the works. Let's gc the oldest dirty block.
++ */
++
++ if (prioritised_exist &&
++ !selected && dev->oldest_dirty_block > 0)
++ selected = dev->oldest_dirty_block;
++
++ if (!prioritised_exist) /* None found, so we can clear this */
++ dev->has_pending_prioritised_gc = 0;
++ }
++
++ /* If we're doing aggressive GC then we are happy to take a less-dirty
++ * block, and search harder.
++ * else (leasurely gc), then we only bother to do this if the
++ * block has only a few pages in use.
++ */
++
++ if (!selected) {
++ int pages_used;
++ int n_blocks =
++ dev->internal_end_block - dev->internal_start_block + 1;
++ if (aggressive) {
++ threshold = dev->param.chunks_per_block;
++ iterations = n_blocks;
++ } else {
++ int max_threshold;
++
++ if (background)
++ max_threshold = dev->param.chunks_per_block / 2;
++ else
++ max_threshold = dev->param.chunks_per_block / 8;
++
++ if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
++ max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
++
++ threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
++ if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
++ threshold = YAFFS_GC_PASSIVE_THRESHOLD;
++ if (threshold > max_threshold)
++ threshold = max_threshold;
++
++ iterations = n_blocks / 16 + 1;
++ if (iterations > 100)
++ iterations = 100;
++ }
++
++ for (i = 0;
++ i < iterations &&
++ (dev->gc_dirtiest < 1 ||
++ dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
++ i++) {
++ dev->gc_block_finder++;
++ if (dev->gc_block_finder < dev->internal_start_block ||
++ dev->gc_block_finder > dev->internal_end_block)
++ dev->gc_block_finder =
++ dev->internal_start_block;
++
++ bi = yaffs_get_block_info(dev, dev->gc_block_finder);
++
++ pages_used = bi->pages_in_use - bi->soft_del_pages;
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
++ pages_used < dev->param.chunks_per_block &&
++ (dev->gc_dirtiest < 1 ||
++ pages_used < dev->gc_pages_in_use) &&
++ yaffs_block_ok_for_gc(dev, bi)) {
++ dev->gc_dirtiest = dev->gc_block_finder;
++ dev->gc_pages_in_use = pages_used;
++ }
++ }
++
++ if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
++ selected = dev->gc_dirtiest;
++ }
++
++ /*
++ * If nothing has been selected for a while, try the oldest dirty
++ * because that's gumming up the works.
++ */
++
++ if (!selected && dev->param.is_yaffs2 &&
++ dev->gc_not_done >= (background ? 10 : 20)) {
++ yaffs2_find_oldest_dirty_seq(dev);
++ if (dev->oldest_dirty_block > 0) {
++ selected = dev->oldest_dirty_block;
++ dev->gc_dirtiest = selected;
++ dev->oldest_dirty_gc_count++;
++ bi = yaffs_get_block_info(dev, selected);
++ dev->gc_pages_in_use =
++ bi->pages_in_use - bi->soft_del_pages;
++ } else {
++ dev->gc_not_done = 0;
++ }
++ }
++
++ if (selected) {
++ yaffs_trace(YAFFS_TRACE_GC,
++ "GC Selected block %d with %d free, prioritised:%d",
++ selected,
++ dev->param.chunks_per_block - dev->gc_pages_in_use,
++ prioritised);
++
++ dev->n_gc_blocks++;
++ if (background)
++ dev->bg_gcs++;
++
++ dev->gc_dirtiest = 0;
++ dev->gc_pages_in_use = 0;
++ dev->gc_not_done = 0;
++ if (dev->refresh_skip > 0)
++ dev->refresh_skip--;
++ } else {
++ dev->gc_not_done++;
++ yaffs_trace(YAFFS_TRACE_GC,
++ "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
++ dev->gc_block_finder, dev->gc_not_done, threshold,
++ dev->gc_dirtiest, dev->gc_pages_in_use,
++ dev->oldest_dirty_block, background ? " bg" : "");
++ }
++
++ return selected;
++}
++
++/* New garbage collector
++ * If we're very low on erased blocks then we do aggressive garbage collection
++ * otherwise we do "leasurely" garbage collection.
++ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
++ * Passive gc only inspects smaller areas and only accepts more dirty blocks.
++ *
++ * The idea is to help clear out space in a more spread-out manner.
++ * Dunno if it really does anything useful.
++ */
++static int yaffs_check_gc(struct yaffs_dev *dev, int background)
++{
++ int aggressive = 0;
++ int gc_ok = YAFFS_OK;
++ int max_tries = 0;
++ int min_erased;
++ int erased_chunks;
++ int checkpt_block_adjust;
++
++ if (dev->param.gc_control_fn &&
++ (dev->param.gc_control_fn(dev) & 1) == 0)
++ return YAFFS_OK;
++
++ if (dev->gc_disable)
++ /* Bail out so we don't get recursive gc */
++ return YAFFS_OK;
++
++ /* This loop should pass the first time.
++ * Only loops here if the collection does not increase space.
++ */
++
++ do {
++ max_tries++;
++
++ checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
++
++ min_erased =
++ dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
++ erased_chunks =
++ dev->n_erased_blocks * dev->param.chunks_per_block;
++
++ /* If we need a block soon then do aggressive gc. */
++ if (dev->n_erased_blocks < min_erased)
++ aggressive = 1;
++ else {
++ if (!background
++ && erased_chunks > (dev->n_free_chunks / 4))
++ break;
++
++ if (dev->gc_skip > 20)
++ dev->gc_skip = 20;
++ if (erased_chunks < dev->n_free_chunks / 2 ||
++ dev->gc_skip < 1 || background)
++ aggressive = 0;
++ else {
++ dev->gc_skip--;
++ break;
++ }
++ }
++
++ dev->gc_skip = 5;
++
++ /* If we don't already have a block being gc'd then see if we
++ * should start another */
++
++ if (dev->gc_block < 1 && !aggressive) {
++ dev->gc_block = yaffs2_find_refresh_block(dev);
++ dev->gc_chunk = 0;
++ dev->n_clean_ups = 0;
++ }
++ if (dev->gc_block < 1) {
++ dev->gc_block =
++ yaffs_find_gc_block(dev, aggressive, background);
++ dev->gc_chunk = 0;
++ dev->n_clean_ups = 0;
++ }
++
++ if (dev->gc_block > 0) {
++ dev->all_gcs++;
++ if (!aggressive)
++ dev->passive_gc_count++;
++
++ yaffs_trace(YAFFS_TRACE_GC,
++ "yaffs: GC n_erased_blocks %d aggressive %d",
++ dev->n_erased_blocks, aggressive);
++
++ gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
++ }
++
++ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
++ dev->gc_block > 0) {
++ yaffs_trace(YAFFS_TRACE_GC,
++ "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
++ dev->n_erased_blocks, max_tries,
++ dev->gc_block);
++ }
++ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
++ (dev->gc_block > 0) && (max_tries < 2));
++
++ return aggressive ? gc_ok : YAFFS_OK;
++}
++
++/*
++ * yaffs_bg_gc()
++ * Garbage collects. Intended to be called from a background thread.
++ * Returns non-zero if at least half the free chunks are erased.
++ */
++int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
++{
++ int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
++
++ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
++
++ yaffs_check_gc(dev, 1);
++ return erased_chunks > dev->n_free_chunks / 2;
++}
++
++/*-------------------- Data file manipulation -----------------*/
++
++static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
++{
++ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
++
++ if (nand_chunk >= 0)
++ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
++ buffer, NULL);
++ else {
++ yaffs_trace(YAFFS_TRACE_NANDACCESS,
++ "Chunk %d not found zero instead",
++ nand_chunk);
++ /* get sane (zero) data if you read a hole */
++ memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
++ return 0;
++ }
++
++}
++
++void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
++ int lyn)
++{
++ int block;
++ int page;
++ struct yaffs_ext_tags tags;
++ struct yaffs_block_info *bi;
++
++ if (chunk_id <= 0)
++ return;
++
++ dev->n_deletions++;
++ block = chunk_id / dev->param.chunks_per_block;
++ page = chunk_id % dev->param.chunks_per_block;
++
++ if (!yaffs_check_chunk_bit(dev, block, page))
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Deleting invalid chunk %d", chunk_id);
++
++ bi = yaffs_get_block_info(dev, block);
++
++ yaffs2_update_oldest_dirty_seq(dev, block, bi);
++
++ yaffs_trace(YAFFS_TRACE_DELETION,
++ "line %d delete of chunk %d",
++ lyn, chunk_id);
++
++ if (!dev->param.is_yaffs2 && mark_flash &&
++ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
++
++ memset(&tags, 0, sizeof(tags));
++ tags.is_deleted = 1;
++ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
++ yaffs_handle_chunk_update(dev, chunk_id, &tags);
++ } else {
++ dev->n_unmarked_deletions++;
++ }
++
++ /* Pull out of the management area.
++ * If the whole block became dirty, this will kick off an erasure.
++ */
++ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
++ bi->block_state == YAFFS_BLOCK_STATE_FULL ||
++ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
++ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
++ dev->n_free_chunks++;
++ yaffs_clear_chunk_bit(dev, block, page);
++ bi->pages_in_use--;
++
++ if (bi->pages_in_use == 0 &&
++ !bi->has_shrink_hdr &&
++ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
++ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
++ yaffs_block_became_dirty(dev, block);
++ }
++ }
++}
++
++static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
++ const u8 *buffer, int n_bytes, int use_reserve)
++{
++ /* Find old chunk Need to do this to get serial number
++ * Write new one and patch into tree.
++ * Invalidate old tags.
++ */
++
++ int prev_chunk_id;
++ struct yaffs_ext_tags prev_tags;
++ int new_chunk_id;
++ struct yaffs_ext_tags new_tags;
++ struct yaffs_dev *dev = in->my_dev;
++
++ yaffs_check_gc(dev, 0);
++
++ /* Get the previous chunk at this location in the file if it exists.
++ * If it does not exist then put a zero into the tree. This creates
++ * the tnode now, rather than later when it is harder to clean up.
++ */
++ prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
++ if (prev_chunk_id < 1 &&
++ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
++ return 0;
++
++ /* Set up new tags */
++ memset(&new_tags, 0, sizeof(new_tags));
++
++ new_tags.chunk_id = inode_chunk;
++ new_tags.obj_id = in->obj_id;
++ new_tags.serial_number =
++ (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
++ new_tags.n_bytes = n_bytes;
++
++ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "Writing %d bytes to chunk!!!!!!!!!",
++ n_bytes);
++ BUG();
++ }
++
++ new_chunk_id =
++ yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
++
++ if (new_chunk_id > 0) {
++ yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
++
++ if (prev_chunk_id > 0)
++ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
++
++ yaffs_verify_file_sane(in);
++ }
++ return new_chunk_id;
++
++}
++
++
++
++static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
++ const YCHAR *name, const void *value, int size,
++ int flags)
++{
++ struct yaffs_xattr_mod xmod;
++ int result;
++
++ xmod.set = set;
++ xmod.name = name;
++ xmod.data = value;
++ xmod.size = size;
++ xmod.flags = flags;
++ xmod.result = -ENOSPC;
++
++ result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
++
++ if (result > 0)
++ return xmod.result;
++ else
++ return -ENOSPC;
++}
++
++static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
++ struct yaffs_xattr_mod *xmod)
++{
++ int retval = 0;
++ int x_offs = sizeof(struct yaffs_obj_hdr);
++ struct yaffs_dev *dev = obj->my_dev;
++ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
++ char *x_buffer = buffer + x_offs;
++
++ if (xmod->set)
++ retval =
++ nval_set(x_buffer, x_size, xmod->name, xmod->data,
++ xmod->size, xmod->flags);
++ else
++ retval = nval_del(x_buffer, x_size, xmod->name);
++
++ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
++ obj->xattr_known = 1;
++ xmod->result = retval;
++
++ return retval;
++}
++
++static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
++ void *value, int size)
++{
++ char *buffer = NULL;
++ int result;
++ struct yaffs_ext_tags tags;
++ struct yaffs_dev *dev = obj->my_dev;
++ int x_offs = sizeof(struct yaffs_obj_hdr);
++ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
++ char *x_buffer;
++ int retval = 0;
++
++ if (obj->hdr_chunk < 1)
++ return -ENODATA;
++
++ /* If we know that the object has no xattribs then don't do all the
++ * reading and parsing.
++ */
++ if (obj->xattr_known && !obj->has_xattr) {
++ if (name)
++ return -ENODATA;
++ else
++ return 0;
++ }
++
++ buffer = (char *)yaffs_get_temp_buffer(dev);
++ if (!buffer)
++ return -ENOMEM;
++
++ result =
++ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
++
++ if (result != YAFFS_OK)
++ retval = -ENOENT;
++ else {
++ x_buffer = buffer + x_offs;
++
++ if (!obj->xattr_known) {
++ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
++ obj->xattr_known = 1;
++ }
++
++ if (name)
++ retval = nval_get(x_buffer, x_size, name, value, size);
++ else
++ retval = nval_list(x_buffer, x_size, value, size);
++ }
++ yaffs_release_temp_buffer(dev, (u8 *) buffer);
++ return retval;
++}
++
++int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
++ const void *value, int size, int flags)
++{
++ return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
++}
++
++int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
++{
++ return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
++}
++
++int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
++ int size)
++{
++ return yaffs_do_xattrib_fetch(obj, name, value, size);
++}
++
++int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
++{
++ return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
++}
++
++static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
++{
++ u8 *buf;
++ struct yaffs_obj_hdr *oh;
++ struct yaffs_dev *dev;
++ struct yaffs_ext_tags tags;
++ int result;
++ int alloc_failed = 0;
++
++ if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
++ return;
++
++ dev = in->my_dev;
++ in->lazy_loaded = 0;
++ buf = yaffs_get_temp_buffer(dev);
++
++ result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
++ oh = (struct yaffs_obj_hdr *)buf;
++
++ in->yst_mode = oh->yst_mode;
++ yaffs_load_attribs(in, oh);
++ yaffs_set_obj_name_from_oh(in, oh);
++
++ if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
++ in->variant.symlink_variant.alias =
++ yaffs_clone_str(oh->alias);
++ if (!in->variant.symlink_variant.alias)
++ alloc_failed = 1; /* Not returned */
++ }
++ yaffs_release_temp_buffer(dev, buf);
++}
++
++static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
++ const YCHAR *oh_name, int buff_size)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++ if (dev->param.auto_unicode) {
++ if (*oh_name) {
++ /* It is an ASCII name, do an ASCII to
++ * unicode conversion */
++ const char *ascii_oh_name = (const char *)oh_name;
++ int n = buff_size - 1;
++ while (n > 0 && *ascii_oh_name) {
++ *name = *ascii_oh_name;
++ name++;
++ ascii_oh_name++;
++ n--;
++ }
++ } else {
++ strncpy(name, oh_name + 1, buff_size - 1);
++ }
++ } else {
++#else
++ (void) dev;
++ {
++#endif
++ strncpy(name, oh_name, buff_size - 1);
++ }
++}
++
++static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
++ const YCHAR *name)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++
++ int is_ascii;
++ YCHAR *w;
++
++ if (dev->param.auto_unicode) {
++
++ is_ascii = 1;
++ w = name;
++
++ /* Figure out if the name will fit in ascii character set */
++ while (is_ascii && *w) {
++ if ((*w) & 0xff00)
++ is_ascii = 0;
++ w++;
++ }
++
++ if (is_ascii) {
++ /* It is an ASCII name, so convert unicode to ascii */
++ char *ascii_oh_name = (char *)oh_name;
++ int n = YAFFS_MAX_NAME_LENGTH - 1;
++ while (n > 0 && *name) {
++ *ascii_oh_name = *name;
++ name++;
++ ascii_oh_name++;
++ n--;
++ }
++ } else {
++ /* Unicode name, so save starting at the second YCHAR */
++ *oh_name = 0;
++ strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
++ }
++ } else {
++#else
++ dev = dev;
++ {
++#endif
++ strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
++ }
++}
++
++/* UpdateObjectHeader updates the header on NAND for an object.
++ * If name is not NULL, then that new name is used.
++ */
++int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
++ int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
++{
++
++ struct yaffs_block_info *bi;
++ struct yaffs_dev *dev = in->my_dev;
++ int prev_chunk_id;
++ int ret_val = 0;
++ int result = 0;
++ int new_chunk_id;
++ struct yaffs_ext_tags new_tags;
++ struct yaffs_ext_tags old_tags;
++ const YCHAR *alias = NULL;
++ u8 *buffer = NULL;
++ YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
++ struct yaffs_obj_hdr *oh = NULL;
++ loff_t file_size = 0;
++
++ strcpy(old_name, _Y("silly old name"));
++
++ if (in->fake && in != dev->root_dir && !force && !xmod)
++ return ret_val;
++
++ yaffs_check_gc(dev, 0);
++ yaffs_check_obj_details_loaded(in);
++
++ buffer = yaffs_get_temp_buffer(in->my_dev);
++ oh = (struct yaffs_obj_hdr *)buffer;
++
++ prev_chunk_id = in->hdr_chunk;
++
++ if (prev_chunk_id > 0) {
++ result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
++ buffer, &old_tags);
++
++ yaffs_verify_oh(in, oh, &old_tags, 0);
++ memcpy(old_name, oh->name, sizeof(oh->name));
++ memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
++ } else {
++ memset(buffer, 0xff, dev->data_bytes_per_chunk);
++ }
++
++ oh->type = in->variant_type;
++ oh->yst_mode = in->yst_mode;
++ oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
++
++ yaffs_load_attribs_oh(oh, in);
++
++ if (in->parent)
++ oh->parent_obj_id = in->parent->obj_id;
++ else
++ oh->parent_obj_id = 0;
++
++ if (name && *name) {
++ memset(oh->name, 0, sizeof(oh->name));
++ yaffs_load_oh_from_name(dev, oh->name, name);
++ } else if (prev_chunk_id > 0) {
++ memcpy(oh->name, old_name, sizeof(oh->name));
++ } else {
++ memset(oh->name, 0, sizeof(oh->name));
++ }
++
++ oh->is_shrink = is_shrink;
++
++ switch (in->variant_type) {
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* Should not happen */
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
++ oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
++ file_size = in->variant.file_variant.file_size;
++ yaffs_oh_size_load(oh, file_size);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ oh->equiv_id = in->variant.hardlink_variant.equiv_id;
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ alias = in->variant.symlink_variant.alias;
++ if (!alias)
++ alias = _Y("no alias");
++ strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
++ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
++ break;
++ }
++
++ /* process any xattrib modifications */
++ if (xmod)
++ yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
++
++ /* Tags */
++ memset(&new_tags, 0, sizeof(new_tags));
++ in->serial++;
++ new_tags.chunk_id = 0;
++ new_tags.obj_id = in->obj_id;
++ new_tags.serial_number = in->serial;
++
++ /* Add extra info for file header */
++ new_tags.extra_available = 1;
++ new_tags.extra_parent_id = oh->parent_obj_id;
++ new_tags.extra_file_size = file_size;
++ new_tags.extra_is_shrink = oh->is_shrink;
++ new_tags.extra_equiv_id = oh->equiv_id;
++ new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
++ new_tags.extra_obj_type = in->variant_type;
++ yaffs_verify_oh(in, oh, &new_tags, 1);
++
++ /* Create new chunk in NAND */
++ new_chunk_id =
++ yaffs_write_new_chunk(dev, buffer, &new_tags,
++ (prev_chunk_id > 0) ? 1 : 0);
++
++ if (buffer)
++ yaffs_release_temp_buffer(dev, buffer);
++
++ if (new_chunk_id < 0)
++ return new_chunk_id;
++
++ in->hdr_chunk = new_chunk_id;
++
++ if (prev_chunk_id > 0)
++ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
++
++ if (!yaffs_obj_cache_dirty(in))
++ in->dirty = 0;
++
++ /* If this was a shrink, then mark the block
++ * that the chunk lives on */
++ if (is_shrink) {
++ bi = yaffs_get_block_info(in->my_dev,
++ new_chunk_id /
++ in->my_dev->param.chunks_per_block);
++ bi->has_shrink_hdr = 1;
++ }
++
++
++ return new_chunk_id;
++}
++
++/*--------------------- File read/write ------------------------
++ * Read and write have very similar structures.
++ * In general the read/write has three parts to it
++ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
++ * Some complete chunks
++ * An incomplete chunk to end off with
++ *
++ * Curve-balls: the first chunk might also be the last chunk.
++ */
++
++int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
++{
++ int chunk;
++ u32 start;
++ int n_copy;
++ int n = n_bytes;
++ int n_done = 0;
++ struct yaffs_cache *cache;
++ struct yaffs_dev *dev;
++
++ dev = in->my_dev;
++
++ while (n > 0) {
++ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
++ chunk++;
++
++ /* OK now check for the curveball where the start and end are in
++ * the same chunk.
++ */
++ if ((start + n) < dev->data_bytes_per_chunk)
++ n_copy = n;
++ else
++ n_copy = dev->data_bytes_per_chunk - start;
++
++ cache = yaffs_find_chunk_cache(in, chunk);
++
++ /* If the chunk is already in the cache or it is less than
++ * a whole chunk or we're using inband tags then use the cache
++ * (if there is caching) else bypass the cache.
++ */
++ if (cache || n_copy != dev->data_bytes_per_chunk ||
++ dev->param.inband_tags) {
++ if (dev->param.n_caches > 0) {
++
++ /* If we can't find the data in the cache,
++ * then load it up. */
++
++ if (!cache) {
++ cache =
++ yaffs_grab_chunk_cache(in->my_dev);
++ cache->object = in;
++ cache->chunk_id = chunk;
++ cache->dirty = 0;
++ cache->locked = 0;
++ yaffs_rd_data_obj(in, chunk,
++ cache->data);
++ cache->n_bytes = 0;
++ }
++
++ yaffs_use_cache(dev, cache, 0);
++
++ cache->locked = 1;
++
++ memcpy(buffer, &cache->data[start], n_copy);
++
++ cache->locked = 0;
++ } else {
++ /* Read into the local buffer then copy.. */
++
++ u8 *local_buffer =
++ yaffs_get_temp_buffer(dev);
++ yaffs_rd_data_obj(in, chunk, local_buffer);
++
++ memcpy(buffer, &local_buffer[start], n_copy);
++
++ yaffs_release_temp_buffer(dev, local_buffer);
++ }
++ } else {
++ /* A full chunk. Read directly into the buffer. */
++ yaffs_rd_data_obj(in, chunk, buffer);
++ }
++ n -= n_copy;
++ offset += n_copy;
++ buffer += n_copy;
++ n_done += n_copy;
++ }
++ return n_done;
++}
++
++int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
++ int n_bytes, int write_through)
++{
++
++ int chunk;
++ u32 start;
++ int n_copy;
++ int n = n_bytes;
++ int n_done = 0;
++ int n_writeback;
++ loff_t start_write = offset;
++ int chunk_written = 0;
++ u32 n_bytes_read;
++ loff_t chunk_start;
++ struct yaffs_dev *dev;
++
++ dev = in->my_dev;
++
++ while (n > 0 && chunk_written >= 0) {
++ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
++
++ if (((loff_t)chunk) *
++ dev->data_bytes_per_chunk + start != offset ||
++ start >= dev->data_bytes_per_chunk) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "AddrToChunk of offset %lld gives chunk %d start %d",
++ offset, chunk, start);
++ }
++ chunk++; /* File pos to chunk in file offset */
++
++ /* OK now check for the curveball where the start and end are in
++ * the same chunk.
++ */
++
++ if ((start + n) < dev->data_bytes_per_chunk) {
++ n_copy = n;
++
++ /* Now calculate how many bytes to write back....
++ * If we're overwriting and not writing to then end of
++ * file then we need to write back as much as was there
++ * before.
++ */
++
++ chunk_start = (((loff_t)(chunk - 1)) *
++ dev->data_bytes_per_chunk);
++
++ if (chunk_start > in->variant.file_variant.file_size)
++ n_bytes_read = 0; /* Past end of file */
++ else
++ n_bytes_read =
++ in->variant.file_variant.file_size -
++ chunk_start;
++
++ if (n_bytes_read > dev->data_bytes_per_chunk)
++ n_bytes_read = dev->data_bytes_per_chunk;
++
++ n_writeback =
++ (n_bytes_read >
++ (start + n)) ? n_bytes_read : (start + n);
++
++ if (n_writeback < 0 ||
++ n_writeback > dev->data_bytes_per_chunk)
++ BUG();
++
++ } else {
++ n_copy = dev->data_bytes_per_chunk - start;
++ n_writeback = dev->data_bytes_per_chunk;
++ }
++
++ if (n_copy != dev->data_bytes_per_chunk ||
++ !dev->param.cache_bypass_aligned ||
++ dev->param.inband_tags) {
++ /* An incomplete start or end chunk (or maybe both
++ * start and end chunk), or we're using inband tags,
++ * or we're forcing writes through the cache,
++ * so we want to use the cache buffers.
++ */
++ if (dev->param.n_caches > 0) {
++ struct yaffs_cache *cache;
++
++ /* If we can't find the data in the cache, then
++ * load the cache */
++ cache = yaffs_find_chunk_cache(in, chunk);
++
++ if (!cache &&
++ yaffs_check_alloc_available(dev, 1)) {
++ cache = yaffs_grab_chunk_cache(dev);
++ cache->object = in;
++ cache->chunk_id = chunk;
++ cache->dirty = 0;
++ cache->locked = 0;
++ yaffs_rd_data_obj(in, chunk,
++ cache->data);
++ } else if (cache &&
++ !cache->dirty &&
++ !yaffs_check_alloc_available(dev,
++ 1)) {
++ /* Drop the cache if it was a read cache
++ * item and no space check has been made
++ * for it.
++ */
++ cache = NULL;
++ }
++
++ if (cache) {
++ yaffs_use_cache(dev, cache, 1);
++ cache->locked = 1;
++
++ memcpy(&cache->data[start], buffer,
++ n_copy);
++
++ cache->locked = 0;
++ cache->n_bytes = n_writeback;
++
++ if (write_through) {
++ chunk_written =
++ yaffs_wr_data_obj
++ (cache->object,
++ cache->chunk_id,
++ cache->data,
++ cache->n_bytes, 1);
++ cache->dirty = 0;
++ }
++ } else {
++ chunk_written = -1; /* fail write */
++ }
++ } else {
++ /* An incomplete start or end chunk (or maybe
++ * both start and end chunk). Read into the
++ * local buffer then copy over and write back.
++ */
++
++ u8 *local_buffer = yaffs_get_temp_buffer(dev);
++
++ yaffs_rd_data_obj(in, chunk, local_buffer);
++ memcpy(&local_buffer[start], buffer, n_copy);
++
++ chunk_written =
++ yaffs_wr_data_obj(in, chunk,
++ local_buffer,
++ n_writeback, 0);
++
++ yaffs_release_temp_buffer(dev, local_buffer);
++ }
++ } else {
++ /* A full chunk. Write directly from the buffer. */
++
++ chunk_written =
++ yaffs_wr_data_obj(in, chunk, buffer,
++ dev->data_bytes_per_chunk, 0);
++
++ /* Since we've overwritten the cached data,
++ * we better invalidate it. */
++ yaffs_invalidate_chunk_cache(in, chunk);
++ }
++
++ if (chunk_written >= 0) {
++ n -= n_copy;
++ offset += n_copy;
++ buffer += n_copy;
++ n_done += n_copy;
++ }
++ }
++
++ /* Update file object */
++
++ if ((start_write + n_done) > in->variant.file_variant.file_size)
++ in->variant.file_variant.file_size = (start_write + n_done);
++
++ in->dirty = 1;
++ return n_done;
++}
++
++int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
++ int n_bytes, int write_through)
++{
++ yaffs2_handle_hole(in, offset);
++ return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through);
++}
++
++/* ---------------------- File resizing stuff ------------------ */
++
++static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size)
++{
++
++ struct yaffs_dev *dev = in->my_dev;
++ loff_t old_size = in->variant.file_variant.file_size;
++ int i;
++ int chunk_id;
++ u32 dummy;
++ int last_del;
++ int start_del;
++
++ if (old_size > 0)
++ yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy);
++ else
++ last_del = 0;
++
++ yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1,
++ &start_del, &dummy);
++ last_del++;
++ start_del++;
++
++ /* Delete backwards so that we don't end up with holes if
++ * power is lost part-way through the operation.
++ */
++ for (i = last_del; i >= start_del; i--) {
++ /* NB this could be optimised somewhat,
++ * eg. could retrieve the tags and write them without
++ * using yaffs_chunk_del
++ */
++
++ chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
++
++ if (chunk_id < 1)
++ continue;
++
++ if (chunk_id <
++ (dev->internal_start_block * dev->param.chunks_per_block) ||
++ chunk_id >=
++ ((dev->internal_end_block + 1) *
++ dev->param.chunks_per_block)) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "Found daft chunk_id %d for %d",
++ chunk_id, i);
++ } else {
++ in->n_data_chunks--;
++ yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
++ }
++ }
++}
++
++void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
++{
++ int new_full;
++ u32 new_partial;
++ struct yaffs_dev *dev = obj->my_dev;
++
++ yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
++
++ yaffs_prune_chunks(obj, new_size);
++
++ if (new_partial != 0) {
++ int last_chunk = 1 + new_full;
++ u8 *local_buffer = yaffs_get_temp_buffer(dev);
++
++ /* Rewrite the last chunk with its new size and zero pad */
++ yaffs_rd_data_obj(obj, last_chunk, local_buffer);
++ memset(local_buffer + new_partial, 0,
++ dev->data_bytes_per_chunk - new_partial);
++
++ yaffs_wr_data_obj(obj, last_chunk, local_buffer,
++ new_partial, 1);
++
++ yaffs_release_temp_buffer(dev, local_buffer);
++ }
++
++ obj->variant.file_variant.file_size = new_size;
++
++ yaffs_prune_tree(dev, &obj->variant.file_variant);
++}
++
++int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
++{
++ struct yaffs_dev *dev = in->my_dev;
++ loff_t old_size = in->variant.file_variant.file_size;
++
++ yaffs_flush_file_cache(in);
++ yaffs_invalidate_whole_cache(in);
++
++ yaffs_check_gc(dev, 0);
++
++ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
++ return YAFFS_FAIL;
++
++ if (new_size == old_size)
++ return YAFFS_OK;
++
++ if (new_size > old_size) {
++ yaffs2_handle_hole(in, new_size);
++ in->variant.file_variant.file_size = new_size;
++ } else {
++ /* new_size < old_size */
++ yaffs_resize_file_down(in, new_size);
++ }
++
++ /* Write a new object header to reflect the resize.
++ * show we've shrunk the file, if need be
++ * Do this only if the file is not in the deleted directories
++ * and is not shadowed.
++ */
++ if (in->parent &&
++ !in->is_shadowed &&
++ in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
++ in->parent->obj_id != YAFFS_OBJECTID_DELETED)
++ yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
++
++ return YAFFS_OK;
++}
++
++int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
++{
++ if (!in->dirty)
++ return YAFFS_OK;
++
++ yaffs_flush_file_cache(in);
++
++ if (data_sync)
++ return YAFFS_OK;
++
++ if (update_time)
++ yaffs_load_current_time(in, 0, 0);
++
++ return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ?
++ YAFFS_OK : YAFFS_FAIL;
++}
++
++
++/* yaffs_del_file deletes the whole file data
++ * and the inode associated with the file.
++ * It does not delete the links associated with the file.
++ */
++static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
++{
++ int ret_val;
++ int del_now = 0;
++ struct yaffs_dev *dev = in->my_dev;
++
++ if (!in->my_inode)
++ del_now = 1;
++
++ if (del_now) {
++ ret_val =
++ yaffs_change_obj_name(in, in->my_dev->del_dir,
++ _Y("deleted"), 0, 0);
++ yaffs_trace(YAFFS_TRACE_TRACING,
++ "yaffs: immediate deletion of file %d",
++ in->obj_id);
++ in->deleted = 1;
++ in->my_dev->n_deleted_files++;
++ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
++ yaffs_resize_file(in, 0);
++ yaffs_soft_del_file(in);
++ } else {
++ ret_val =
++ yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
++ _Y("unlinked"), 0, 0);
++ }
++ return ret_val;
++}
++
++static int yaffs_del_file(struct yaffs_obj *in)
++{
++ int ret_val = YAFFS_OK;
++ int deleted; /* Need to cache value on stack if in is freed */
++ struct yaffs_dev *dev = in->my_dev;
++
++ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
++ yaffs_resize_file(in, 0);
++
++ if (in->n_data_chunks > 0) {
++ /* Use soft deletion if there is data in the file.
++ * That won't be the case if it has been resized to zero.
++ */
++ if (!in->unlinked)
++ ret_val = yaffs_unlink_file_if_needed(in);
++
++ deleted = in->deleted;
++
++ if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
++ in->deleted = 1;
++ deleted = 1;
++ in->my_dev->n_deleted_files++;
++ yaffs_soft_del_file(in);
++ }
++ return deleted ? YAFFS_OK : YAFFS_FAIL;
++ } else {
++ /* The file has no data chunks so we toss it immediately */
++ yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
++ in->variant.file_variant.top = NULL;
++ yaffs_generic_obj_del(in);
++
++ return YAFFS_OK;
++ }
++}
++
++int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
++{
++ return (obj &&
++ obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
++ !(list_empty(&obj->variant.dir_variant.children));
++}
++
++static int yaffs_del_dir(struct yaffs_obj *obj)
++{
++ /* First check that the directory is empty. */
++ if (yaffs_is_non_empty_dir(obj))
++ return YAFFS_FAIL;
++
++ return yaffs_generic_obj_del(obj);
++}
++
++static int yaffs_del_symlink(struct yaffs_obj *in)
++{
++ kfree(in->variant.symlink_variant.alias);
++ in->variant.symlink_variant.alias = NULL;
++
++ return yaffs_generic_obj_del(in);
++}
++
++static int yaffs_del_link(struct yaffs_obj *in)
++{
++ /* remove this hardlink from the list associated with the equivalent
++ * object
++ */
++ list_del_init(&in->hard_links);
++ return yaffs_generic_obj_del(in);
++}
++
++int yaffs_del_obj(struct yaffs_obj *obj)
++{
++ int ret_val = -1;
++
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ ret_val = yaffs_del_file(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ if (!list_empty(&obj->variant.dir_variant.dirty)) {
++ yaffs_trace(YAFFS_TRACE_BACKGROUND,
++ "Remove object %d from dirty directories",
++ obj->obj_id);
++ list_del_init(&obj->variant.dir_variant.dirty);
++ }
++ return yaffs_del_dir(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ ret_val = yaffs_del_symlink(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ ret_val = yaffs_del_link(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ ret_val = yaffs_generic_obj_del(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ ret_val = 0;
++ break; /* should not happen. */
++ }
++ return ret_val;
++}
++
++
++static void yaffs_empty_dir_to_dir(struct yaffs_obj *from_dir,
++ struct yaffs_obj *to_dir)
++{
++ struct yaffs_obj *obj;
++ struct list_head *lh;
++ struct list_head *n;
++
++ list_for_each_safe(lh, n, &from_dir->variant.dir_variant.children) {
++ obj = list_entry(lh, struct yaffs_obj, siblings);
++ yaffs_add_obj_to_dir(to_dir, obj);
++ }
++}
++
++struct yaffs_obj *yaffs_retype_obj(struct yaffs_obj *obj,
++ enum yaffs_obj_type type)
++{
++ /* Tear down the old variant */
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ /* Nuke file data */
++ yaffs_resize_file(obj, 0);
++ yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
++ obj->variant.file_variant.top = NULL;
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Put the children in lost and found. */
++ yaffs_empty_dir_to_dir(obj, obj->my_dev->lost_n_found);
++ if (!list_empty(&obj->variant.dir_variant.dirty))
++ list_del_init(&obj->variant.dir_variant.dirty);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ /* Nuke symplink data */
++ kfree(obj->variant.symlink_variant.alias);
++ obj->variant.symlink_variant.alias = NULL;
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ list_del_init(&obj->hard_links);
++ break;
++ default:
++ break;
++ }
++
++ memset(&obj->variant, 0, sizeof(obj->variant));
++
++ /*Set up new variant if the memset is not enough. */
++ switch (type) {
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ INIT_LIST_HEAD(&obj->variant.dir_variant.children);
++ INIT_LIST_HEAD(&obj->variant.dir_variant.dirty);
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ default:
++ break;
++ }
++
++ obj->variant_type = type;
++
++ return obj;
++
++}
++
++static int yaffs_unlink_worker(struct yaffs_obj *obj)
++{
++ int del_now = 0;
++
++ if (!obj)
++ return YAFFS_FAIL;
++
++ if (!obj->my_inode)
++ del_now = 1;
++
++ yaffs_update_parent(obj->parent);
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
++ return yaffs_del_link(obj);
++ } else if (!list_empty(&obj->hard_links)) {
++ /* Curve ball: We're unlinking an object that has a hardlink.
++ *
++ * This problem arises because we are not strictly following
++ * The Linux link/inode model.
++ *
++ * We can't really delete the object.
++ * Instead, we do the following:
++ * - Select a hardlink.
++ * - Unhook it from the hard links
++ * - Move it from its parent directory so that the rename works.
++ * - Rename the object to the hardlink's name.
++ * - Delete the hardlink
++ */
++
++ struct yaffs_obj *hl;
++ struct yaffs_obj *parent;
++ int ret_val;
++ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
++
++ hl = list_entry(obj->hard_links.next, struct yaffs_obj,
++ hard_links);
++
++ yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
++ parent = hl->parent;
++
++ list_del_init(&hl->hard_links);
++
++ yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
++
++ ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
++
++ if (ret_val == YAFFS_OK)
++ ret_val = yaffs_generic_obj_del(hl);
++
++ return ret_val;
++
++ } else if (del_now) {
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ return yaffs_del_file(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ list_del_init(&obj->variant.dir_variant.dirty);
++ return yaffs_del_dir(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ return yaffs_del_symlink(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ return yaffs_generic_obj_del(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ default:
++ return YAFFS_FAIL;
++ }
++ } else if (yaffs_is_non_empty_dir(obj)) {
++ return YAFFS_FAIL;
++ } else {
++ return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
++ _Y("unlinked"), 0, 0);
++ }
++}
++
++static int yaffs_unlink_obj(struct yaffs_obj *obj)
++{
++ if (obj && obj->unlink_allowed)
++ return yaffs_unlink_worker(obj);
++
++ return YAFFS_FAIL;
++}
++
++int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name)
++{
++ struct yaffs_obj *obj;
++
++ obj = yaffs_find_by_name(dir, name);
++ return yaffs_unlink_obj(obj);
++}
++
++/* Note:
++ * If old_name is NULL then we take old_dir as the object to be renamed.
++ */
++int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name,
++ struct yaffs_obj *new_dir, const YCHAR *new_name)
++{
++ struct yaffs_obj *obj = NULL;
++ struct yaffs_obj *existing_target = NULL;
++ int force = 0;
++ int result;
++ struct yaffs_dev *dev;
++
++ if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ BUG();
++ return YAFFS_FAIL;
++ }
++ if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ BUG();
++ return YAFFS_FAIL;
++ }
++
++ dev = old_dir->my_dev;
++
++#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
++ /* Special case for case insemsitive systems.
++ * While look-up is case insensitive, the name isn't.
++ * Therefore we might want to change x.txt to X.txt
++ */
++ if (old_dir == new_dir &&
++ old_name && new_name &&
++ strcmp(old_name, new_name) == 0)
++ force = 1;
++#endif
++
++ if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
++ YAFFS_MAX_NAME_LENGTH)
++ /* ENAMETOOLONG */
++ return YAFFS_FAIL;
++
++ if (old_name)
++ obj = yaffs_find_by_name(old_dir, old_name);
++ else{
++ obj = old_dir;
++ old_dir = obj->parent;
++ }
++
++ if (obj && obj->rename_allowed) {
++ /* Now handle an existing target, if there is one */
++ existing_target = yaffs_find_by_name(new_dir, new_name);
++ if (yaffs_is_non_empty_dir(existing_target)) {
++ return YAFFS_FAIL; /* ENOTEMPTY */
++ } else if (existing_target && existing_target != obj) {
++ /* Nuke the target first, using shadowing,
++ * but only if it isn't the same object.
++ *
++ * Note we must disable gc here otherwise it can mess
++ * up the shadowing.
++ *
++ */
++ dev->gc_disable = 1;
++ yaffs_change_obj_name(obj, new_dir, new_name, force,
++ existing_target->obj_id);
++ existing_target->is_shadowed = 1;
++ yaffs_unlink_obj(existing_target);
++ dev->gc_disable = 0;
++ }
++
++ result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
++
++ yaffs_update_parent(old_dir);
++ if (new_dir != old_dir)
++ yaffs_update_parent(new_dir);
++
++ return result;
++ }
++ return YAFFS_FAIL;
++}
++
++/*----------------------- Initialisation Scanning ---------------------- */
++
++void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
++ int backward_scanning)
++{
++ struct yaffs_obj *obj;
++
++ if (backward_scanning) {
++ /* Handle YAFFS2 case (backward scanning)
++ * If the shadowed object exists then ignore.
++ */
++ obj = yaffs_find_by_number(dev, obj_id);
++ if (obj)
++ return;
++ }
++
++ /* Let's create it (if it does not exist) assuming it is a file so that
++ * it can do shrinking etc.
++ * We put it in unlinked dir to be cleaned up after the scanning
++ */
++ obj =
++ yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
++ if (!obj)
++ return;
++ obj->is_shadowed = 1;
++ yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
++ obj->variant.file_variant.shrink_size = 0;
++ obj->valid = 1; /* So that we don't read any other info. */
++}
++
++void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list)
++{
++ struct list_head *lh;
++ struct list_head *save;
++ struct yaffs_obj *hl;
++ struct yaffs_obj *in;
++
++ list_for_each_safe(lh, save, hard_list) {
++ hl = list_entry(lh, struct yaffs_obj, hard_links);
++ in = yaffs_find_by_number(dev,
++ hl->variant.hardlink_variant.equiv_id);
++
++ if (in) {
++ /* Add the hardlink pointers */
++ hl->variant.hardlink_variant.equiv_obj = in;
++ list_add(&hl->hard_links, &in->hard_links);
++ } else {
++ /* Todo Need to report/handle this better.
++ * Got a problem... hardlink to a non-existant object
++ */
++ hl->variant.hardlink_variant.equiv_obj = NULL;
++ INIT_LIST_HEAD(&hl->hard_links);
++ }
++ }
++}
++
++static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
++{
++ /*
++ * Sort out state of unlinked and deleted objects after scanning.
++ */
++ struct list_head *i;
++ struct list_head *n;
++ struct yaffs_obj *l;
++
++ if (dev->read_only)
++ return;
++
++ /* Soft delete all the unlinked files */
++ list_for_each_safe(i, n,
++ &dev->unlinked_dir->variant.dir_variant.children) {
++ l = list_entry(i, struct yaffs_obj, siblings);
++ yaffs_del_obj(l);
++ }
++
++ list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
++ l = list_entry(i, struct yaffs_obj, siblings);
++ yaffs_del_obj(l);
++ }
++}
++
++/*
++ * This code iterates through all the objects making sure that they are rooted.
++ * Any unrooted objects are re-rooted in lost+found.
++ * An object needs to be in one of:
++ * - Directly under deleted, unlinked
++ * - Directly or indirectly under root.
++ *
++ * Note:
++ * This code assumes that we don't ever change the current relationships
++ * between directories:
++ * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
++ * lost-n-found->parent == root_dir
++ *
++ * This fixes the problem where directories might have inadvertently been
++ * deleted leaving the object "hanging" without being rooted in the
++ * directory tree.
++ */
++
++static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
++{
++ return (obj == dev->del_dir ||
++ obj == dev->unlinked_dir || obj == dev->root_dir);
++}
++
++static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_obj *parent;
++ int i;
++ struct list_head *lh;
++ struct list_head *n;
++ int depth_limit;
++ int hanging;
++
++ if (dev->read_only)
++ return;
++
++ /* Iterate through the objects in each hash entry,
++ * looking at each object.
++ * Make sure it is rooted.
++ */
++
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
++ obj = list_entry(lh, struct yaffs_obj, hash_link);
++ parent = obj->parent;
++
++ if (yaffs_has_null_parent(dev, obj)) {
++ /* These directories are not hanging */
++ hanging = 0;
++ } else if (!parent ||
++ parent->variant_type !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
++ hanging = 1;
++ } else if (yaffs_has_null_parent(dev, parent)) {
++ hanging = 0;
++ } else {
++ /*
++ * Need to follow the parent chain to
++ * see if it is hanging.
++ */
++ hanging = 0;
++ depth_limit = 100;
++
++ while (parent != dev->root_dir &&
++ parent->parent &&
++ parent->parent->variant_type ==
++ YAFFS_OBJECT_TYPE_DIRECTORY &&
++ depth_limit > 0) {
++ parent = parent->parent;
++ depth_limit--;
++ }
++ if (parent != dev->root_dir)
++ hanging = 1;
++ }
++ if (hanging) {
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "Hanging object %d moved to lost and found",
++ obj->obj_id);
++ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
++ }
++ }
++ }
++}
++
++/*
++ * Delete directory contents for cleaning up lost and found.
++ */
++static void yaffs_del_dir_contents(struct yaffs_obj *dir)
++{
++ struct yaffs_obj *obj;
++ struct list_head *lh;
++ struct list_head *n;
++
++ if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++ BUG();
++
++ list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
++ obj = list_entry(lh, struct yaffs_obj, siblings);
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
++ yaffs_del_dir_contents(obj);
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "Deleting lost_found object %d",
++ obj->obj_id);
++ yaffs_unlink_obj(obj);
++ }
++}
++
++static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
++{
++ yaffs_del_dir_contents(dev->lost_n_found);
++}
++
++
++struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
++ const YCHAR *name)
++{
++ int sum;
++ struct list_head *i;
++ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
++ struct yaffs_obj *l;
++
++ if (!name)
++ return NULL;
++
++ if (!directory) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "tragedy: yaffs_find_by_name: null pointer directory"
++ );
++ BUG();
++ return NULL;
++ }
++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "tragedy: yaffs_find_by_name: non-directory"
++ );
++ BUG();
++ }
++
++ sum = yaffs_calc_name_sum(name);
++
++ list_for_each(i, &directory->variant.dir_variant.children) {
++ l = list_entry(i, struct yaffs_obj, siblings);
++
++ if (l->parent != directory)
++ BUG();
++
++ yaffs_check_obj_details_loaded(l);
++
++ /* Special case for lost-n-found */
++ if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
++ if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
++ return l;
++ } else if (l->sum == sum || l->hdr_chunk <= 0) {
++ /* LostnFound chunk called Objxxx
++ * Do a real check
++ */
++ yaffs_get_obj_name(l, buffer,
++ YAFFS_MAX_NAME_LENGTH + 1);
++ if (!strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH))
++ return l;
++ }
++ }
++ return NULL;
++}
++
++/* GetEquivalentObject dereferences any hard links to get to the
++ * actual object.
++ */
++
++struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
++{
++ if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
++ obj = obj->variant.hardlink_variant.equiv_obj;
++ yaffs_check_obj_details_loaded(obj);
++ }
++ return obj;
++}
++
++/*
++ * A note or two on object names.
++ * * If the object name is missing, we then make one up in the form objnnn
++ *
++ * * ASCII names are stored in the object header's name field from byte zero
++ * * Unicode names are historically stored starting from byte zero.
++ *
++ * Then there are automatic Unicode names...
++ * The purpose of these is to save names in a way that can be read as
++ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
++ * system to share files.
++ *
++ * These automatic unicode are stored slightly differently...
++ * - If the name can fit in the ASCII character space then they are saved as
++ * ascii names as per above.
++ * - If the name needs Unicode then the name is saved in Unicode
++ * starting at oh->name[1].
++
++ */
++static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
++ int buffer_size)
++{
++ /* Create an object name if we could not find one. */
++ if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
++ YCHAR local_name[20];
++ YCHAR num_string[20];
++ YCHAR *x = &num_string[19];
++ unsigned v = obj->obj_id;
++ num_string[19] = 0;
++ while (v > 0) {
++ x--;
++ *x = '0' + (v % 10);
++ v /= 10;
++ }
++ /* make up a name */
++ strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
++ strcat(local_name, x);
++ strncpy(name, local_name, buffer_size - 1);
++ }
++}
++
++int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size)
++{
++ memset(name, 0, buffer_size * sizeof(YCHAR));
++ yaffs_check_obj_details_loaded(obj);
++ if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
++ strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
++ } else if (obj->short_name[0]) {
++ strcpy(name, obj->short_name);
++ } else if (obj->hdr_chunk > 0) {
++ int result;
++ u8 *buffer = yaffs_get_temp_buffer(obj->my_dev);
++
++ struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
++
++ memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
++
++ if (obj->hdr_chunk > 0) {
++ result = yaffs_rd_chunk_tags_nand(obj->my_dev,
++ obj->hdr_chunk,
++ buffer, NULL);
++ }
++ yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
++ buffer_size);
++
++ yaffs_release_temp_buffer(obj->my_dev, buffer);
++ }
++
++ yaffs_fix_null_name(obj, name, buffer_size);
++
++ return strnlen(name, YAFFS_MAX_NAME_LENGTH);
++}
++
++loff_t yaffs_get_obj_length(struct yaffs_obj *obj)
++{
++ /* Dereference any hard linking */
++ obj = yaffs_get_equivalent_obj(obj);
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ return obj->variant.file_variant.file_size;
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
++ if (!obj->variant.symlink_variant.alias)
++ return 0;
++ return strnlen(obj->variant.symlink_variant.alias,
++ YAFFS_MAX_ALIAS_LENGTH);
++ } else {
++ /* Only a directory should drop through to here */
++ return obj->my_dev->data_bytes_per_chunk;
++ }
++}
++
++int yaffs_get_obj_link_count(struct yaffs_obj *obj)
++{
++ int count = 0;
++ struct list_head *i;
++
++ if (!obj->unlinked)
++ count++; /* the object itself */
++
++ list_for_each(i, &obj->hard_links)
++ count++; /* add the hard links; */
++
++ return count;
++}
++
++int yaffs_get_obj_inode(struct yaffs_obj *obj)
++{
++ obj = yaffs_get_equivalent_obj(obj);
++
++ return obj->obj_id;
++}
++
++unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
++{
++ obj = yaffs_get_equivalent_obj(obj);
++
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ return DT_REG;
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ return DT_DIR;
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ return DT_LNK;
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ return DT_REG;
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ if (S_ISFIFO(obj->yst_mode))
++ return DT_FIFO;
++ if (S_ISCHR(obj->yst_mode))
++ return DT_CHR;
++ if (S_ISBLK(obj->yst_mode))
++ return DT_BLK;
++ if (S_ISSOCK(obj->yst_mode))
++ return DT_SOCK;
++ return DT_REG;
++ break;
++ default:
++ return DT_REG;
++ break;
++ }
++}
++
++YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
++{
++ obj = yaffs_get_equivalent_obj(obj);
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
++ return yaffs_clone_str(obj->variant.symlink_variant.alias);
++ else
++ return yaffs_clone_str(_Y(""));
++}
++
++/*--------------------------- Initialisation code -------------------------- */
++
++static int yaffs_check_dev_fns(struct yaffs_dev *dev)
++{
++ struct yaffs_driver *drv = &dev->drv;
++ struct yaffs_tags_handler *tagger = &dev->tagger;
++
++ /* Common functions, gotta have */
++ if (!drv->drv_read_chunk_fn ||
++ !drv->drv_write_chunk_fn ||
++ !drv->drv_erase_fn)
++ return 0;
++
++ if (dev->param.is_yaffs2 &&
++ (!drv->drv_mark_bad_fn || !drv->drv_check_bad_fn))
++ return 0;
++
++ /* Install the default tags marshalling functions if needed. */
++ yaffs_tags_compat_install(dev);
++ yaffs_tags_marshall_install(dev);
++
++ /* Check we now have the marshalling functions required. */
++ if (!tagger->write_chunk_tags_fn ||
++ !tagger->read_chunk_tags_fn ||
++ !tagger->query_block_fn ||
++ !tagger->mark_bad_fn)
++ return 0;
++
++ return 1;
++}
++
++static int yaffs_create_initial_dir(struct yaffs_dev *dev)
++{
++ /* Initialise the unlinked, deleted, root and lost+found directories */
++ dev->lost_n_found = dev->root_dir = NULL;
++ dev->unlinked_dir = dev->del_dir = NULL;
++ dev->unlinked_dir =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
++ dev->del_dir =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
++ dev->root_dir =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
++ YAFFS_ROOT_MODE | S_IFDIR);
++ dev->lost_n_found =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
++ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
++
++ if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
++ && dev->del_dir) {
++ yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
++ return YAFFS_OK;
++ }
++ return YAFFS_FAIL;
++}
++
++/* Low level init.
++ * Typically only used by yaffs_guts_initialise, but also used by the
++ * Low level yaffs driver tests.
++ */
++
++int yaffs_guts_ll_init(struct yaffs_dev *dev)
++{
++
++
++ yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_ll_init()");
++
++ if (!dev) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: Need a device"
++ );
++ return YAFFS_FAIL;
++ }
++
++ if (dev->ll_init)
++ return YAFFS_OK;
++
++ dev->internal_start_block = dev->param.start_block;
++ dev->internal_end_block = dev->param.end_block;
++ dev->block_offset = 0;
++ dev->chunk_offset = 0;
++ dev->n_free_chunks = 0;
++
++ dev->gc_block = 0;
++
++ if (dev->param.start_block == 0) {
++ dev->internal_start_block = dev->param.start_block + 1;
++ dev->internal_end_block = dev->param.end_block + 1;
++ dev->block_offset = 1;
++ dev->chunk_offset = dev->param.chunks_per_block;
++ }
++
++ /* Check geometry parameters. */
++
++ if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
++ dev->param.total_bytes_per_chunk < 1024) ||
++ (!dev->param.is_yaffs2 &&
++ dev->param.total_bytes_per_chunk < 512) ||
++ (dev->param.inband_tags && !dev->param.is_yaffs2) ||
++ dev->param.chunks_per_block < 2 ||
++ dev->param.n_reserved_blocks < 2 ||
++ dev->internal_start_block <= 0 ||
++ dev->internal_end_block <= 0 ||
++ dev->internal_end_block <=
++ (dev->internal_start_block + dev->param.n_reserved_blocks + 2)
++ ) {
++ /* otherwise it is too small */
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
++ dev->param.total_bytes_per_chunk,
++ dev->param.is_yaffs2 ? "2" : "",
++ dev->param.inband_tags);
++ return YAFFS_FAIL;
++ }
++
++ /* Sort out space for inband tags, if required */
++ if (dev->param.inband_tags)
++ dev->data_bytes_per_chunk =
++ dev->param.total_bytes_per_chunk -
++ sizeof(struct yaffs_packed_tags2_tags_only);
++ else
++ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
++
++ /* Got the right mix of functions? */
++ if (!yaffs_check_dev_fns(dev)) {
++ /* Function missing */
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "device function(s) missing or wrong");
++
++ return YAFFS_FAIL;
++ }
++
++ if (yaffs_init_nand(dev) != YAFFS_OK) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
++ return YAFFS_FAIL;
++ }
++
++ return YAFFS_OK;
++}
++
++
++int yaffs_guts_format_dev(struct yaffs_dev *dev)
++{
++ int i;
++ enum yaffs_block_state state;
++ u32 dummy;
++
++ if(yaffs_guts_ll_init(dev) != YAFFS_OK)
++ return YAFFS_FAIL;
++
++ if(dev->is_mounted)
++ return YAFFS_FAIL;
++
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ yaffs_query_init_block_state(dev, i, &state, &dummy);
++ if (state != YAFFS_BLOCK_STATE_DEAD)
++ yaffs_erase_block(dev, i);
++ }
++
++ return YAFFS_OK;
++}
++
++
++int yaffs_guts_initialise(struct yaffs_dev *dev)
++{
++ int init_failed = 0;
++ unsigned x;
++ int bits;
++
++ if(yaffs_guts_ll_init(dev) != YAFFS_OK)
++ return YAFFS_FAIL;
++
++ if (dev->is_mounted) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
++ return YAFFS_FAIL;
++ }
++
++ dev->is_mounted = 1;
++
++ /* OK now calculate a few things for the device */
++
++ /*
++ * Calculate all the chunk size manipulation numbers:
++ */
++ x = dev->data_bytes_per_chunk;
++ /* We always use dev->chunk_shift and dev->chunk_div */
++ dev->chunk_shift = calc_shifts(x);
++ x >>= dev->chunk_shift;
++ dev->chunk_div = x;
++ /* We only use chunk mask if chunk_div is 1 */
++ dev->chunk_mask = (1 << dev->chunk_shift) - 1;
++
++ /*
++ * Calculate chunk_grp_bits.
++ * We need to find the next power of 2 > than internal_end_block
++ */
++
++ x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
++
++ bits = calc_shifts_ceiling(x);
++
++ /* Set up tnode width if wide tnodes are enabled. */
++ if (!dev->param.wide_tnodes_disabled) {
++ /* bits must be even so that we end up with 32-bit words */
++ if (bits & 1)
++ bits++;
++ if (bits < 16)
++ dev->tnode_width = 16;
++ else
++ dev->tnode_width = bits;
++ } else {
++ dev->tnode_width = 16;
++ }
++
++ dev->tnode_mask = (1 << dev->tnode_width) - 1;
++
++ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
++ * so if the bitwidth of the
++ * chunk range we're using is greater than 16 we need
++ * to figure out chunk shift and chunk_grp_size
++ */
++
++ if (bits <= dev->tnode_width)
++ dev->chunk_grp_bits = 0;
++ else
++ dev->chunk_grp_bits = bits - dev->tnode_width;
++
++ dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
++ if (dev->tnode_size < sizeof(struct yaffs_tnode))
++ dev->tnode_size = sizeof(struct yaffs_tnode);
++
++ dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
++
++ if (dev->param.chunks_per_block < dev->chunk_grp_size) {
++ /* We have a problem because the soft delete won't work if
++ * the chunk group size > chunks per block.
++ * This can be remedied by using larger "virtual blocks".
++ */
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
++
++ return YAFFS_FAIL;
++ }
++
++ /* Finished verifying the device, continue with initialisation */
++
++ /* More device initialisation */
++ dev->all_gcs = 0;
++ dev->passive_gc_count = 0;
++ dev->oldest_dirty_gc_count = 0;
++ dev->bg_gcs = 0;
++ dev->gc_block_finder = 0;
++ dev->buffered_block = -1;
++ dev->doing_buffered_block_rewrite = 0;
++ dev->n_deleted_files = 0;
++ dev->n_bg_deletions = 0;
++ dev->n_unlinked_files = 0;
++ dev->n_ecc_fixed = 0;
++ dev->n_ecc_unfixed = 0;
++ dev->n_tags_ecc_fixed = 0;
++ dev->n_tags_ecc_unfixed = 0;
++ dev->n_erase_failures = 0;
++ dev->n_erased_blocks = 0;
++ dev->gc_disable = 0;
++ dev->has_pending_prioritised_gc = 1;
++ /* Assume the worst for now, will get fixed on first GC */
++ INIT_LIST_HEAD(&dev->dirty_dirs);
++ dev->oldest_dirty_seq = 0;
++ dev->oldest_dirty_block = 0;
++
++ /* Initialise temporary buffers and caches. */
++ if (!yaffs_init_tmp_buffers(dev))
++ init_failed = 1;
++
++ dev->cache = NULL;
++ dev->gc_cleanup_list = NULL;
++
++ if (!init_failed && dev->param.n_caches > 0) {
++ int i;
++ void *buf;
++ int cache_bytes =
++ dev->param.n_caches * sizeof(struct yaffs_cache);
++
++ if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
++ dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
++
++ dev->cache = kmalloc(cache_bytes, GFP_NOFS);
++
++ buf = (u8 *) dev->cache;
++
++ if (dev->cache)
++ memset(dev->cache, 0, cache_bytes);
++
++ for (i = 0; i < dev->param.n_caches && buf; i++) {
++ dev->cache[i].object = NULL;
++ dev->cache[i].last_use = 0;
++ dev->cache[i].dirty = 0;
++ dev->cache[i].data = buf =
++ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
++ }
++ if (!buf)
++ init_failed = 1;
++
++ dev->cache_last_use = 0;
++ }
++
++ dev->cache_hits = 0;
++
++ if (!init_failed) {
++ dev->gc_cleanup_list =
++ kmalloc(dev->param.chunks_per_block * sizeof(u32),
++ GFP_NOFS);
++ if (!dev->gc_cleanup_list)
++ init_failed = 1;
++ }
++
++ if (dev->param.is_yaffs2)
++ dev->param.use_header_file_size = 1;
++
++ if (!init_failed && !yaffs_init_blocks(dev))
++ init_failed = 1;
++
++ yaffs_init_tnodes_and_objs(dev);
++
++ if (!init_failed && !yaffs_create_initial_dir(dev))
++ init_failed = 1;
++
++ if (!init_failed && dev->param.is_yaffs2 &&
++ !dev->param.disable_summary &&
++ !yaffs_summary_init(dev))
++ init_failed = 1;
++
++ if (!init_failed) {
++ /* Now scan the flash. */
++ if (dev->param.is_yaffs2) {
++ if (yaffs2_checkpt_restore(dev)) {
++ yaffs_check_obj_details_loaded(dev->root_dir);
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT |
++ YAFFS_TRACE_MOUNT,
++ "yaffs: restored from checkpoint"
++ );
++ } else {
++
++ /* Clean up the mess caused by an aborted
++ * checkpoint load then scan backwards.
++ */
++ yaffs_deinit_blocks(dev);
++
++ yaffs_deinit_tnodes_and_objs(dev);
++
++ dev->n_erased_blocks = 0;
++ dev->n_free_chunks = 0;
++ dev->alloc_block = -1;
++ dev->alloc_page = -1;
++ dev->n_deleted_files = 0;
++ dev->n_unlinked_files = 0;
++ dev->n_bg_deletions = 0;
++
++ if (!init_failed && !yaffs_init_blocks(dev))
++ init_failed = 1;
++
++ yaffs_init_tnodes_and_objs(dev);
++
++ if (!init_failed
++ && !yaffs_create_initial_dir(dev))
++ init_failed = 1;
++
++ if (!init_failed && !yaffs2_scan_backwards(dev))
++ init_failed = 1;
++ }
++ } else if (!yaffs1_scan(dev)) {
++ init_failed = 1;
++ }
++
++ yaffs_strip_deleted_objs(dev);
++ yaffs_fix_hanging_objs(dev);
++ if (dev->param.empty_lost_n_found)
++ yaffs_empty_l_n_f(dev);
++ }
++
++ if (init_failed) {
++ /* Clean up the mess */
++ yaffs_trace(YAFFS_TRACE_TRACING,
++ "yaffs: yaffs_guts_initialise() aborted.");
++
++ yaffs_deinitialise(dev);
++ return YAFFS_FAIL;
++ }
++
++ /* Zero out stats */
++ dev->n_page_reads = 0;
++ dev->n_page_writes = 0;
++ dev->n_erasures = 0;
++ dev->n_gc_copies = 0;
++ dev->n_retried_writes = 0;
++
++ dev->n_retired_blocks = 0;
++
++ yaffs_verify_free_chunks(dev);
++ yaffs_verify_blocks(dev);
++
++ /* Clean up any aborted checkpoint data */
++ if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
++ yaffs2_checkpt_invalidate(dev);
++
++ yaffs_trace(YAFFS_TRACE_TRACING,
++ "yaffs: yaffs_guts_initialise() done.");
++ return YAFFS_OK;
++}
++
++void yaffs_deinitialise(struct yaffs_dev *dev)
++{
++ if (dev->is_mounted) {
++ int i;
++
++ yaffs_deinit_blocks(dev);
++ yaffs_deinit_tnodes_and_objs(dev);
++ yaffs_summary_deinit(dev);
++
++ if (dev->param.n_caches > 0 && dev->cache) {
++
++ for (i = 0; i < dev->param.n_caches; i++) {
++ kfree(dev->cache[i].data);
++ dev->cache[i].data = NULL;
++ }
++
++ kfree(dev->cache);
++ dev->cache = NULL;
++ }
++
++ kfree(dev->gc_cleanup_list);
++
++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
++ kfree(dev->temp_buffer[i].buffer);
++
++ dev->is_mounted = 0;
++
++ yaffs_deinit_nand(dev);
++ }
++}
++
++int yaffs_count_free_chunks(struct yaffs_dev *dev)
++{
++ int n_free = 0;
++ int b;
++ struct yaffs_block_info *blk;
++
++ blk = dev->block_info;
++ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
++ switch (blk->block_state) {
++ case YAFFS_BLOCK_STATE_EMPTY:
++ case YAFFS_BLOCK_STATE_ALLOCATING:
++ case YAFFS_BLOCK_STATE_COLLECTING:
++ case YAFFS_BLOCK_STATE_FULL:
++ n_free +=
++ (dev->param.chunks_per_block - blk->pages_in_use +
++ blk->soft_del_pages);
++ break;
++ default:
++ break;
++ }
++ blk++;
++ }
++ return n_free;
++}
++
++int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
++{
++ /* This is what we report to the outside world */
++ int n_free;
++ int n_dirty_caches;
++ int blocks_for_checkpt;
++ int i;
++
++ n_free = dev->n_free_chunks;
++ n_free += dev->n_deleted_files;
++
++ /* Now count and subtract the number of dirty chunks in the cache. */
++
++ for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].dirty)
++ n_dirty_caches++;
++ }
++
++ n_free -= n_dirty_caches;
++
++ n_free -=
++ ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
++
++ /* Now figure checkpoint space and report that... */
++ blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
++
++ n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
++
++ if (n_free < 0)
++ n_free = 0;
++
++ return n_free;
++}
++
++
++
++/*
++ * Marshalling functions to get loff_t file sizes into and out of
++ * object headers.
++ */
++void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize)
++{
++ oh->file_size_low = (fsize & 0xFFFFFFFF);
++ oh->file_size_high = ((fsize >> 32) & 0xFFFFFFFF);
++}
++
++loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh)
++{
++ loff_t retval;
++
++ if (sizeof(loff_t) >= 8 && ~(oh->file_size_high))
++ retval = (((loff_t) oh->file_size_high) << 32) |
++ (((loff_t) oh->file_size_low) & 0xFFFFFFFF);
++ else
++ retval = (loff_t) oh->file_size_low;
++
++ return retval;
++}
++
++
++void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10])
++{
++ int i;
++ struct yaffs_block_info *bi;
++ int s;
++
++ for(i = 0; i < 10; i++)
++ bs[i] = 0;
++
++ for(i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ bi = yaffs_get_block_info(dev, i);
++ s = bi->block_state;
++ if(s > YAFFS_BLOCK_STATE_DEAD || s < YAFFS_BLOCK_STATE_UNKNOWN)
++ bs[0]++;
++ else
++ bs[s]++;
++ }
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_guts.h linux-3.15-rc5/fs/yaffs2/yaffs_guts.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_guts.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_guts.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,1007 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_GUTS_H__
++#define __YAFFS_GUTS_H__
++
++#include "yportenv.h"
++
++#define YAFFS_OK 1
++#define YAFFS_FAIL 0
++
++/* Give us a Y=0x59,
++ * Give us an A=0x41,
++ * Give us an FF=0xff
++ * Give us an S=0x53
++ * And what have we got...
++ */
++#define YAFFS_MAGIC 0x5941ff53
++
++/*
++ * Tnodes form a tree with the tnodes in "levels"
++ * Levels greater than 0 hold 8 slots which point to other tnodes.
++ * Those at level 0 hold 16 slots which point to chunks in NAND.
++ *
++ * A maximum level of 8 thust supports files of size up to:
++ *
++ * 2^(3*MAX_LEVEL+4)
++ *
++ * Thus a max level of 8 supports files with up to 2^^28 chunks which gives
++ * a maximum file size of around 512Gbytees with 2k chunks.
++ */
++#define YAFFS_NTNODES_LEVEL0 16
++#define YAFFS_TNODES_LEVEL0_BITS 4
++#define YAFFS_TNODES_LEVEL0_MASK 0xf
++
++#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2)
++#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1)
++#define YAFFS_TNODES_INTERNAL_MASK 0x7
++#define YAFFS_TNODES_MAX_LEVEL 8
++#define YAFFS_TNODES_MAX_BITS (YAFFS_TNODES_LEVEL0_BITS + \
++ YAFFS_TNODES_INTERNAL_BITS * \
++ YAFFS_TNODES_MAX_LEVEL)
++#define YAFFS_MAX_CHUNK_ID ((1 << YAFFS_TNODES_MAX_BITS) - 1)
++
++#define YAFFS_MAX_FILE_SIZE_32 0x7fffffff
++
++/* Constants for YAFFS1 mode */
++#define YAFFS_BYTES_PER_SPARE 16
++#define YAFFS_BYTES_PER_CHUNK 512
++#define YAFFS_CHUNK_SIZE_SHIFT 9
++#define YAFFS_CHUNKS_PER_BLOCK 32
++#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
++
++#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024
++#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32
++
++
++
++#define YAFFS_ALLOCATION_NOBJECTS 100
++#define YAFFS_ALLOCATION_NTNODES 100
++#define YAFFS_ALLOCATION_NLINKS 100
++
++#define YAFFS_NOBJECT_BUCKETS 256
++
++#define YAFFS_OBJECT_SPACE 0x40000
++#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE - 1)
++
++/* Binary data version stamps */
++#define YAFFS_SUMMARY_VERSION 1
++#define YAFFS_CHECKPOINT_VERSION 7
++
++#ifdef CONFIG_YAFFS_UNICODE
++#define YAFFS_MAX_NAME_LENGTH 127
++#define YAFFS_MAX_ALIAS_LENGTH 79
++#else
++#define YAFFS_MAX_NAME_LENGTH 255
++#define YAFFS_MAX_ALIAS_LENGTH 159
++#endif
++
++#define YAFFS_SHORT_NAME_LENGTH 15
++
++/* Some special object ids for pseudo objects */
++#define YAFFS_OBJECTID_ROOT 1
++#define YAFFS_OBJECTID_LOSTNFOUND 2
++#define YAFFS_OBJECTID_UNLINKED 3
++#define YAFFS_OBJECTID_DELETED 4
++
++/* Fake object Id for summary data */
++#define YAFFS_OBJECTID_SUMMARY 0x10
++
++/* Pseudo object ids for checkpointing */
++#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
++#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
++
++#define YAFFS_MAX_SHORT_OP_CACHES 20
++
++#define YAFFS_N_TEMP_BUFFERS 6
++
++/* We limit the number attempts at sucessfully saving a chunk of data.
++ * Small-page devices have 32 pages per block; large-page devices have 64.
++ * Default to something in the order of 5 to 10 blocks worth of chunks.
++ */
++#define YAFFS_WR_ATTEMPTS (5*64)
++
++/* Sequence numbers are used in YAFFS2 to determine block allocation order.
++ * The range is limited slightly to help distinguish bad numbers from good.
++ * This also allows us to perhaps in the future use special numbers for
++ * special purposes.
++ * EFFFFF00 allows the allocation of 8 blocks/second (~1Mbytes) for 15 years,
++ * and is a larger number than the lifetime of a 2GB device.
++ */
++#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
++#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xefffff00
++
++/* Special sequence number for bad block that failed to be marked bad */
++#define YAFFS_SEQUENCE_BAD_BLOCK 0xffff0000
++
++/* ChunkCache is used for short read/write operations.*/
++struct yaffs_cache {
++ struct yaffs_obj *object;
++ int chunk_id;
++ int last_use;
++ int dirty;
++ int n_bytes; /* Only valid if the cache is dirty */
++ int locked; /* Can't push out or flush while locked. */
++ u8 *data;
++};
++
++/* yaffs1 tags structures in RAM
++ * NB This uses bitfield. Bitfields should not straddle a u32 boundary
++ * otherwise the structure size will get blown out.
++ */
++
++struct yaffs_tags {
++ u32 chunk_id:20;
++ u32 serial_number:2;
++ u32 n_bytes_lsb:10;
++ u32 obj_id:18;
++ u32 ecc:12;
++ u32 n_bytes_msb:2;
++};
++
++union yaffs_tags_union {
++ struct yaffs_tags as_tags;
++ u8 as_bytes[8];
++};
++
++
++/* Stuff used for extended tags in YAFFS2 */
++
++enum yaffs_ecc_result {
++ YAFFS_ECC_RESULT_UNKNOWN,
++ YAFFS_ECC_RESULT_NO_ERROR,
++ YAFFS_ECC_RESULT_FIXED,
++ YAFFS_ECC_RESULT_UNFIXED
++};
++
++enum yaffs_obj_type {
++ YAFFS_OBJECT_TYPE_UNKNOWN,
++ YAFFS_OBJECT_TYPE_FILE,
++ YAFFS_OBJECT_TYPE_SYMLINK,
++ YAFFS_OBJECT_TYPE_DIRECTORY,
++ YAFFS_OBJECT_TYPE_HARDLINK,
++ YAFFS_OBJECT_TYPE_SPECIAL
++};
++
++#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
++
++struct yaffs_ext_tags {
++ unsigned chunk_used; /* Status of the chunk: used or unused */
++ unsigned obj_id; /* If 0 this is not used */
++ unsigned chunk_id; /* If 0 this is a header, else a data chunk */
++ unsigned n_bytes; /* Only valid for data chunks */
++
++ /* The following stuff only has meaning when we read */
++ enum yaffs_ecc_result ecc_result;
++ unsigned block_bad;
++
++ /* YAFFS 1 stuff */
++ unsigned is_deleted; /* The chunk is marked deleted */
++ unsigned serial_number; /* Yaffs1 2-bit serial number */
++
++ /* YAFFS2 stuff */
++ unsigned seq_number; /* The sequence number of this block */
++
++ /* Extra info if this is an object header (YAFFS2 only) */
++
++ unsigned extra_available; /* Extra info available if not zero */
++ unsigned extra_parent_id; /* The parent object */
++ unsigned extra_is_shrink; /* Is it a shrink header? */
++ unsigned extra_shadows; /* Does this shadow another object? */
++
++ enum yaffs_obj_type extra_obj_type; /* What object type? */
++
++ loff_t extra_file_size; /* Length if it is a file */
++ unsigned extra_equiv_id; /* Equivalent object for a hard link */
++};
++
++/* Spare structure for YAFFS1 */
++struct yaffs_spare {
++ u8 tb0;
++ u8 tb1;
++ u8 tb2;
++ u8 tb3;
++ u8 page_status; /* set to 0 to delete the chunk */
++ u8 block_status;
++ u8 tb4;
++ u8 tb5;
++ u8 ecc1[3];
++ u8 tb6;
++ u8 tb7;
++ u8 ecc2[3];
++};
++
++/*Special structure for passing through to mtd */
++struct yaffs_nand_spare {
++ struct yaffs_spare spare;
++ int eccres1;
++ int eccres2;
++};
++
++/* Block data in RAM */
++
++enum yaffs_block_state {
++ YAFFS_BLOCK_STATE_UNKNOWN = 0,
++
++ YAFFS_BLOCK_STATE_SCANNING,
++ /* Being scanned */
++
++ YAFFS_BLOCK_STATE_NEEDS_SCAN,
++ /* The block might have something on it (ie it is allocating or full,
++ * perhaps empty) but it needs to be scanned to determine its true
++ * state.
++ * This state is only valid during scanning.
++ * NB We tolerate empty because the pre-scanner might be incapable of
++ * deciding
++ * However, if this state is returned on a YAFFS2 device,
++ * then we expect a sequence number
++ */
++
++ YAFFS_BLOCK_STATE_EMPTY,
++ /* This block is empty */
++
++ YAFFS_BLOCK_STATE_ALLOCATING,
++ /* This block is partially allocated.
++ * At least one page holds valid data.
++ * This is the one currently being used for page
++ * allocation. Should never be more than one of these.
++ * If a block is only partially allocated at mount it is treated as
++ * full.
++ */
++
++ YAFFS_BLOCK_STATE_FULL,
++ /* All the pages in this block have been allocated.
++ * If a block was only partially allocated when mounted we treat
++ * it as fully allocated.
++ */
++
++ YAFFS_BLOCK_STATE_DIRTY,
++ /* The block was full and now all chunks have been deleted.
++ * Erase me, reuse me.
++ */
++
++ YAFFS_BLOCK_STATE_CHECKPOINT,
++ /* This block is assigned to holding checkpoint data. */
++
++ YAFFS_BLOCK_STATE_COLLECTING,
++ /* This block is being garbage collected */
++
++ YAFFS_BLOCK_STATE_DEAD
++ /* This block has failed and is not in use */
++};
++
++#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
++
++struct yaffs_block_info {
++
++ s32 soft_del_pages:10; /* number of soft deleted pages */
++ s32 pages_in_use:10; /* number of pages in use */
++ u32 block_state:4; /* One of the above block states. */
++ /* NB use unsigned because enum is sometimes
++ * an int */
++ u32 needs_retiring:1; /* Data has failed on this block, */
++ /*need to get valid data off and retire*/
++ u32 skip_erased_check:1;/* Skip the erased check on this block */
++ u32 gc_prioritise:1; /* An ECC check or blank check has failed.
++ Block should be prioritised for GC */
++ u32 chunk_error_strikes:3; /* How many times we've had ecc etc
++ failures on this block and tried to reuse it */
++ u32 has_summary:1; /* The block has a summary */
++
++ u32 has_shrink_hdr:1; /* This block has at least one shrink header */
++ u32 seq_number; /* block sequence number for yaffs2 */
++
++};
++
++/* -------------------------- Object structure -------------------------------*/
++/* This is the object structure as stored on NAND */
++
++struct yaffs_obj_hdr {
++ enum yaffs_obj_type type;
++
++ /* Apply to everything */
++ int parent_obj_id;
++ u16 sum_no_longer_used; /* checksum of name. No longer used */
++ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
++
++ /* The following apply to all object types except for hard links */
++ u32 yst_mode; /* protection */
++
++ u32 yst_uid;
++ u32 yst_gid;
++ u32 yst_atime;
++ u32 yst_mtime;
++ u32 yst_ctime;
++
++ /* File size applies to files only */
++ u32 file_size_low;
++
++ /* Equivalent object id applies to hard links only. */
++ int equiv_id;
++
++ /* Alias is for symlinks only. */
++ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
++
++ u32 yst_rdev; /* stuff for block and char devices (major/min) */
++
++ u32 win_ctime[2];
++ u32 win_atime[2];
++ u32 win_mtime[2];
++
++ u32 inband_shadowed_obj_id;
++ u32 inband_is_shrink;
++
++ u32 file_size_high;
++ u32 reserved[1];
++ int shadows_obj; /* This object header shadows the
++ specified object if > 0 */
++
++ /* is_shrink applies to object headers written when wemake a hole. */
++ u32 is_shrink;
++
++};
++
++/*--------------------------- Tnode -------------------------- */
++
++struct yaffs_tnode {
++ struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
++};
++
++/*------------------------ Object -----------------------------*/
++/* An object can be one of:
++ * - a directory (no data, has children links
++ * - a regular file (data.... not prunes :->).
++ * - a symlink [symbolic link] (the alias).
++ * - a hard link
++ */
++
++struct yaffs_file_var {
++ loff_t file_size;
++ loff_t scanned_size;
++ loff_t shrink_size;
++ int top_level;
++ struct yaffs_tnode *top;
++};
++
++struct yaffs_dir_var {
++ struct list_head children; /* list of child links */
++ struct list_head dirty; /* Entry for list of dirty directories */
++};
++
++struct yaffs_symlink_var {
++ YCHAR *alias;
++};
++
++struct yaffs_hardlink_var {
++ struct yaffs_obj *equiv_obj;
++ u32 equiv_id;
++};
++
++union yaffs_obj_var {
++ struct yaffs_file_var file_variant;
++ struct yaffs_dir_var dir_variant;
++ struct yaffs_symlink_var symlink_variant;
++ struct yaffs_hardlink_var hardlink_variant;
++};
++
++struct yaffs_obj {
++ u8 deleted:1; /* This should only apply to unlinked files. */
++ u8 soft_del:1; /* it has also been soft deleted */
++ u8 unlinked:1; /* An unlinked file.*/
++ u8 fake:1; /* A fake object has no presence on NAND. */
++ u8 rename_allowed:1; /* Some objects cannot be renamed. */
++ u8 unlink_allowed:1;
++ u8 dirty:1; /* the object needs to be written to flash */
++ u8 valid:1; /* When the file system is being loaded up, this
++ * object might be created before the data
++ * is available
++ * ie. file data chunks encountered before
++ * the header.
++ */
++ u8 lazy_loaded:1; /* This object has been lazy loaded and
++ * is missing some detail */
++
++ u8 defered_free:1; /* Object is removed from NAND, but is
++ * still in the inode cache.
++ * Free of object is defered.
++ * until the inode is released.
++ */
++ u8 being_created:1; /* This object is still being created
++ * so skip some verification checks. */
++ u8 is_shadowed:1; /* This object is shadowed on the way
++ * to being renamed. */
++
++ u8 xattr_known:1; /* We know if this has object has xattribs
++ * or not. */
++ u8 has_xattr:1; /* This object has xattribs.
++ * Only valid if xattr_known. */
++
++ u8 serial; /* serial number of chunk in NAND.*/
++ u16 sum; /* sum of the name to speed searching */
++
++ struct yaffs_dev *my_dev; /* The device I'm on */
++
++ struct list_head hash_link; /* list of objects in hash bucket */
++
++ struct list_head hard_links; /* hard linked object chain*/
++
++ /* directory structure stuff */
++ /* also used for linking up the free list */
++ struct yaffs_obj *parent;
++ struct list_head siblings;
++
++ /* Where's my object header in NAND? */
++ int hdr_chunk;
++
++ int n_data_chunks; /* Number of data chunks for this file. */
++
++ u32 obj_id; /* the object id value */
++
++ u32 yst_mode;
++
++ YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
++
++#ifdef CONFIG_YAFFS_WINCE
++ u32 win_ctime[2];
++ u32 win_mtime[2];
++ u32 win_atime[2];
++#else
++ u32 yst_uid;
++ u32 yst_gid;
++ u32 yst_atime;
++ u32 yst_mtime;
++ u32 yst_ctime;
++#endif
++
++ u32 yst_rdev;
++
++ void *my_inode;
++
++ enum yaffs_obj_type variant_type;
++
++ union yaffs_obj_var variant;
++
++};
++
++struct yaffs_obj_bucket {
++ struct list_head list;
++ int count;
++};
++
++/* yaffs_checkpt_obj holds the definition of an object as dumped
++ * by checkpointing.
++ */
++
++struct yaffs_checkpt_obj {
++ int struct_type;
++ u32 obj_id;
++ u32 parent_id;
++ int hdr_chunk;
++ enum yaffs_obj_type variant_type:3;
++ u8 deleted:1;
++ u8 soft_del:1;
++ u8 unlinked:1;
++ u8 fake:1;
++ u8 rename_allowed:1;
++ u8 unlink_allowed:1;
++ u8 serial;
++ int n_data_chunks;
++ loff_t size_or_equiv_obj;
++};
++
++/*--------------------- Temporary buffers ----------------
++ *
++ * These are chunk-sized working buffers. Each device has a few.
++ */
++
++struct yaffs_buffer {
++ u8 *buffer;
++ int in_use;
++};
++
++/*----------------- Device ---------------------------------*/
++
++struct yaffs_param {
++ const YCHAR *name;
++
++ /*
++ * Entry parameters set up way early. Yaffs sets up the rest.
++ * The structure should be zeroed out before use so that unused
++ * and default values are zero.
++ */
++
++ int inband_tags; /* Use unband tags */
++ u32 total_bytes_per_chunk; /* Should be >= 512, does not need to
++ be a power of 2 */
++ int chunks_per_block; /* does not need to be a power of 2 */
++ int spare_bytes_per_chunk; /* spare area size */
++ int start_block; /* Start block we're allowed to use */
++ int end_block; /* End block we're allowed to use */
++ int n_reserved_blocks; /* Tuneable so that we can reduce
++ * reserved blocks on NOR and RAM. */
++
++ int n_caches; /* If <= 0, then short op caching is disabled,
++ * else the number of short op caches.
++ */
++ int cache_bypass_aligned; /* If non-zero then bypass the cache for
++ * aligned writes.
++ */
++
++ int use_nand_ecc; /* Flag to decide whether or not to use
++ * NAND driver ECC on data (yaffs1) */
++ int tags_9bytes; /* Use 9 byte tags */
++ int no_tags_ecc; /* Flag to decide whether or not to do ECC
++ * on packed tags (yaffs2) */
++
++ int is_yaffs2; /* Use yaffs2 mode on this device */
++
++ int empty_lost_n_found; /* Auto-empty lost+found directory on mount */
++
++ int refresh_period; /* How often to check for a block refresh */
++
++ /* Checkpoint control. Can be set before or after initialisation */
++ u8 skip_checkpt_rd;
++ u8 skip_checkpt_wr;
++
++ int enable_xattr; /* Enable xattribs */
++
++ int max_objects; /*
++ * Set to limit the number of objects created.
++ * 0 = no limit.
++ */
++
++ /* The remove_obj_fn function must be supplied by OS flavours that
++ * need it.
++ * yaffs direct uses it to implement the faster readdir.
++ * Linux uses it to protect the directory during unlocking.
++ */
++ void (*remove_obj_fn) (struct yaffs_obj *obj);
++
++ /* Callback to mark the superblock dirty */
++ void (*sb_dirty_fn) (struct yaffs_dev *dev);
++
++ /* Callback to control garbage collection. */
++ unsigned (*gc_control_fn) (struct yaffs_dev *dev);
++
++ /* Debug control flags. Don't use unless you know what you're doing */
++ int use_header_file_size; /* Flag to determine if we should use
++ * file sizes from the header */
++ int disable_lazy_load; /* Disable lazy loading on this device */
++ int wide_tnodes_disabled; /* Set to disable wide tnodes */
++ int disable_soft_del; /* yaffs 1 only: Set to disable the use of
++ * softdeletion. */
++
++ int defered_dir_update; /* Set to defer directory updates */
++
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++ int auto_unicode;
++#endif
++ int always_check_erased; /* Force chunk erased check always on */
++
++ int disable_summary;
++ int disable_bad_block_marking;
++
++};
++
++struct yaffs_driver {
++ int (*drv_write_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
++ const u8 *data, int data_len,
++ const u8 *oob, int oob_len);
++ int (*drv_read_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
++ u8 *data, int data_len,
++ u8 *oob, int oob_len,
++ enum yaffs_ecc_result *ecc_result);
++ int (*drv_erase_fn) (struct yaffs_dev *dev, int block_no);
++ int (*drv_mark_bad_fn) (struct yaffs_dev *dev, int block_no);
++ int (*drv_check_bad_fn) (struct yaffs_dev *dev, int block_no);
++ int (*drv_initialise_fn) (struct yaffs_dev *dev);
++ int (*drv_deinitialise_fn) (struct yaffs_dev *dev);
++};
++
++struct yaffs_tags_handler {
++ int (*write_chunk_tags_fn) (struct yaffs_dev *dev,
++ int nand_chunk, const u8 *data,
++ const struct yaffs_ext_tags *tags);
++ int (*read_chunk_tags_fn) (struct yaffs_dev *dev,
++ int nand_chunk, u8 *data,
++ struct yaffs_ext_tags *tags);
++
++ int (*query_block_fn) (struct yaffs_dev *dev, int block_no,
++ enum yaffs_block_state *state,
++ u32 *seq_number);
++ int (*mark_bad_fn) (struct yaffs_dev *dev, int block_no);
++};
++
++struct yaffs_dev {
++ struct yaffs_param param;
++ struct yaffs_driver drv;
++ struct yaffs_tags_handler tagger;
++
++ /* Context storage. Holds extra OS specific data for this device */
++
++ void *os_context;
++ void *driver_context;
++
++ struct list_head dev_list;
++
++ int ll_init;
++ /* Runtime parameters. Set up by YAFFS. */
++ int data_bytes_per_chunk;
++
++ /* Non-wide tnode stuff */
++ u16 chunk_grp_bits; /* Number of bits that need to be resolved if
++ * the tnodes are not wide enough.
++ */
++ u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
++
++ /* Stuff to support wide tnodes */
++ u32 tnode_width;
++ u32 tnode_mask;
++ u32 tnode_size;
++
++ /* Stuff for figuring out file offset to chunk conversions */
++ u32 chunk_shift; /* Shift value */
++ u32 chunk_div; /* Divisor after shifting: 1 for 2^n sizes */
++ u32 chunk_mask; /* Mask to use for power-of-2 case */
++
++ int is_mounted;
++ int read_only;
++ int is_checkpointed;
++
++ /* Stuff to support block offsetting to support start block zero */
++ int internal_start_block;
++ int internal_end_block;
++ int block_offset;
++ int chunk_offset;
++
++ /* Runtime checkpointing stuff */
++ int checkpt_page_seq; /* running sequence number of checkpt pages */
++ int checkpt_byte_count;
++ int checkpt_byte_offs;
++ u8 *checkpt_buffer;
++ int checkpt_open_write;
++ int blocks_in_checkpt;
++ int checkpt_cur_chunk;
++ int checkpt_cur_block;
++ int checkpt_next_block;
++ int *checkpt_block_list;
++ int checkpt_max_blocks;
++ u32 checkpt_sum;
++ u32 checkpt_xor;
++
++ int checkpoint_blocks_required; /* Number of blocks needed to store
++ * current checkpoint set */
++
++ /* Block Info */
++ struct yaffs_block_info *block_info;
++ u8 *chunk_bits; /* bitmap of chunks in use */
++ u8 block_info_alt:1; /* allocated using alternative alloc */
++ u8 chunk_bits_alt:1; /* allocated using alternative alloc */
++ int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
++ * Must be consistent with chunks_per_block.
++ */
++
++ int n_erased_blocks;
++ int alloc_block; /* Current block being allocated off */
++ u32 alloc_page;
++ int alloc_block_finder; /* Used to search for next allocation block */
++
++ /* Object and Tnode memory management */
++ void *allocator;
++ int n_obj;
++ int n_tnodes;
++
++ int n_hardlinks;
++
++ struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
++ u32 bucket_finder;
++
++ int n_free_chunks;
++
++ /* Garbage collection control */
++ u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */
++ u32 n_clean_ups;
++
++ unsigned has_pending_prioritised_gc; /* We think this device might
++ have pending prioritised gcs */
++ unsigned gc_disable;
++ unsigned gc_block_finder;
++ unsigned gc_dirtiest;
++ unsigned gc_pages_in_use;
++ unsigned gc_not_done;
++ unsigned gc_block;
++ unsigned gc_chunk;
++ unsigned gc_skip;
++ struct yaffs_summary_tags *gc_sum_tags;
++
++ /* Special directories */
++ struct yaffs_obj *root_dir;
++ struct yaffs_obj *lost_n_found;
++
++ int buffered_block; /* Which block is buffered here? */
++ int doing_buffered_block_rewrite;
++
++ struct yaffs_cache *cache;
++ int cache_last_use;
++
++ /* Stuff for background deletion and unlinked files. */
++ struct yaffs_obj *unlinked_dir; /* Directory where unlinked and deleted
++ files live. */
++ struct yaffs_obj *del_dir; /* Directory where deleted objects are
++ sent to disappear. */
++ struct yaffs_obj *unlinked_deletion; /* Current file being
++ background deleted. */
++ int n_deleted_files; /* Count of files awaiting deletion; */
++ int n_unlinked_files; /* Count of unlinked files. */
++ int n_bg_deletions; /* Count of background deletions. */
++
++ /* Temporary buffer management */
++ struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
++ int max_temp;
++ int temp_in_use;
++ int unmanaged_buffer_allocs;
++ int unmanaged_buffer_deallocs;
++
++ /* yaffs2 runtime stuff */
++ unsigned seq_number; /* Sequence number of currently
++ allocating block */
++ unsigned oldest_dirty_seq;
++ unsigned oldest_dirty_block;
++
++ /* Block refreshing */
++ int refresh_skip; /* A skip down counter.
++ * Refresh happens when this gets to zero. */
++
++ /* Dirty directory handling */
++ struct list_head dirty_dirs; /* List of dirty directories */
++
++ /* Summary */
++ int chunks_per_summary;
++ struct yaffs_summary_tags *sum_tags;
++
++ /* Statistics */
++ u32 n_page_writes;
++ u32 n_page_reads;
++ u32 n_erasures;
++ u32 n_bad_queries;
++ u32 n_bad_markings;
++ u32 n_erase_failures;
++ u32 n_gc_copies;
++ u32 all_gcs;
++ u32 passive_gc_count;
++ u32 oldest_dirty_gc_count;
++ u32 n_gc_blocks;
++ u32 bg_gcs;
++ u32 n_retried_writes;
++ u32 n_retired_blocks;
++ u32 n_ecc_fixed;
++ u32 n_ecc_unfixed;
++ u32 n_tags_ecc_fixed;
++ u32 n_tags_ecc_unfixed;
++ u32 n_deletions;
++ u32 n_unmarked_deletions;
++ u32 refresh_count;
++ u32 cache_hits;
++ u32 tags_used;
++ u32 summary_used;
++
++};
++
++/* The CheckpointDevice structure holds the device information that changes
++ *at runtime and must be preserved over unmount/mount cycles.
++ */
++struct yaffs_checkpt_dev {
++ int struct_type;
++ int n_erased_blocks;
++ int alloc_block; /* Current block being allocated off */
++ u32 alloc_page;
++ int n_free_chunks;
++
++ int n_deleted_files; /* Count of files awaiting deletion; */
++ int n_unlinked_files; /* Count of unlinked files. */
++ int n_bg_deletions; /* Count of background deletions. */
++
++ /* yaffs2 runtime stuff */
++ unsigned seq_number; /* Sequence number of currently
++ * allocating block */
++
++};
++
++struct yaffs_checkpt_validity {
++ int struct_type;
++ u32 magic;
++ u32 version;
++ u32 head;
++};
++
++struct yaffs_shadow_fixer {
++ int obj_id;
++ int shadowed_id;
++ struct yaffs_shadow_fixer *next;
++};
++
++/* Structure for doing xattr modifications */
++struct yaffs_xattr_mod {
++ int set; /* If 0 then this is a deletion */
++ const YCHAR *name;
++ const void *data;
++ int size;
++ int flags;
++ int result;
++};
++
++/*----------------------- YAFFS Functions -----------------------*/
++
++int yaffs_guts_initialise(struct yaffs_dev *dev);
++void yaffs_deinitialise(struct yaffs_dev *dev);
++
++int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
++
++int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
++ struct yaffs_obj *new_dir, const YCHAR * new_name);
++
++int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
++int yaffs_del_obj(struct yaffs_obj *obj);
++struct yaffs_obj *yaffs_retype_obj(struct yaffs_obj *obj,
++ enum yaffs_obj_type type);
++
++
++int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
++loff_t yaffs_get_obj_length(struct yaffs_obj *obj);
++int yaffs_get_obj_inode(struct yaffs_obj *obj);
++unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
++int yaffs_get_obj_link_count(struct yaffs_obj *obj);
++
++/* File operations */
++int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
++ int n_bytes);
++int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
++ int n_bytes, int write_trhrough);
++int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
++
++struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
++ const YCHAR *name, u32 mode, u32 uid,
++ u32 gid);
++
++int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync);
++
++/* Flushing and checkpointing */
++void yaffs_flush_whole_cache(struct yaffs_dev *dev);
++
++int yaffs_checkpoint_save(struct yaffs_dev *dev);
++int yaffs_checkpoint_restore(struct yaffs_dev *dev);
++
++/* Directory operations */
++struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
++ u32 mode, u32 uid, u32 gid);
++struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
++ const YCHAR *name);
++struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
++
++/* Link operations */
++struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR *name,
++ struct yaffs_obj *equiv_obj);
++
++struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
++
++/* Symlink operations */
++struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
++ const YCHAR *name, u32 mode, u32 uid,
++ u32 gid, const YCHAR *alias);
++YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
++
++/* Special inodes (fifos, sockets and devices) */
++struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
++ const YCHAR *name, u32 mode, u32 uid,
++ u32 gid, u32 rdev);
++
++int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR *name,
++ const void *value, int size, int flags);
++int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR *name, void *value,
++ int size);
++int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
++int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR *name);
++
++/* Special directories */
++struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
++struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
++
++void yaffs_handle_defered_free(struct yaffs_obj *obj);
++
++void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
++
++int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
++
++/* Debug dump */
++int yaffs_dump_obj(struct yaffs_obj *obj);
++
++void yaffs_guts_test(struct yaffs_dev *dev);
++int yaffs_guts_ll_init(struct yaffs_dev *dev);
++
++
++/* A few useful functions to be used within the core files*/
++void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
++ int lyn);
++int yaffs_check_ff(u8 *buffer, int n_bytes);
++void yaffs_handle_chunk_error(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi);
++
++u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev);
++void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer);
++
++struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
++ int number,
++ enum yaffs_obj_type type);
++int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
++ int nand_chunk, int in_scan);
++void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR *name);
++void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
++ const struct yaffs_obj_hdr *oh);
++void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
++YCHAR *yaffs_clone_str(const YCHAR *str);
++void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list);
++void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
++int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name,
++ int force, int is_shrink, int shadows,
++ struct yaffs_xattr_mod *xop);
++void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
++ int backward_scanning);
++int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
++struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
++struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
++ struct yaffs_file_var *file_struct,
++ u32 chunk_id,
++ struct yaffs_tnode *passed_tn);
++
++int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
++ int n_bytes, int write_trhrough);
++void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
++void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
++
++int yaffs_count_free_chunks(struct yaffs_dev *dev);
++
++struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
++ struct yaffs_file_var *file_struct,
++ u32 chunk_id);
++
++u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
++ unsigned pos);
++
++int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
++
++int yaffs_guts_format_dev(struct yaffs_dev *dev);
++
++void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
++ int *chunk_out, u32 *offset_out);
++/*
++ * Marshalling functions to get loff_t file sizes into aand out of
++ * object headers.
++ */
++void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize);
++loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh);
++loff_t yaffs_max_file_size(struct yaffs_dev *dev);
++
++/*
++ * Debug function to count number of blocks in each state
++ * NB Needs to be called with correct number of integers
++ */
++
++void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10]);
++
++int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
++ struct yaffs_ext_tags *tags);
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_linux.h linux-3.15-rc5/fs/yaffs2/yaffs_linux.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_linux.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_linux.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,48 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_LINUX_H__
++#define __YAFFS_LINUX_H__
++
++#include "yportenv.h"
++
++struct yaffs_linux_context {
++ struct list_head context_list; /* List of these we have mounted */
++ struct yaffs_dev *dev;
++ struct super_block *super;
++ struct task_struct *bg_thread; /* Background thread for this device */
++ int bg_running;
++ struct mutex gross_lock; /* Gross locking mutex*/
++ u8 *spare_buffer; /* For mtdif2 use. Don't know the buffer size
++ * at compile time so we have to allocate it.
++ */
++ struct list_head search_contexts;
++ struct task_struct *readdir_process;
++ unsigned mount_id;
++ int dirty;
++};
++
++#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context))
++#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++#define WRITE_SIZE_STR "writesize"
++#define WRITE_SIZE(mtd) ((mtd)->writesize)
++#else
++#define WRITE_SIZE_STR "oobblock"
++#define WRITE_SIZE(mtd) ((mtd)->oobblock)
++#endif
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_mtdif.c linux-3.15-rc5/fs/yaffs2/yaffs_mtdif.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_mtdif.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_mtdif.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,309 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yportenv.h"
++
++#include "yaffs_mtdif.h"
++
++#include "linux/mtd/mtd.h"
++#include "linux/types.h"
++#include "linux/time.h"
++#include "linux/major.h"
++#include "linux/mtd/nand.h"
++#include "linux/kernel.h"
++#include "linux/version.h"
++#include "linux/types.h"
++
++#include "yaffs_trace.h"
++#include "yaffs_guts.h"
++#include "yaffs_linux.h"
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
++#define MTD_OPS_AUTO_OOB MTD_OOB_AUTO
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
++#define mtd_erase(m, ei) (m)->erase(m, ei)
++#define mtd_write_oob(m, addr, pops) (m)->write_oob(m, addr, pops)
++#define mtd_read_oob(m, addr, pops) (m)->read_oob(m, addr, pops)
++#define mtd_block_isbad(m, offs) (m)->block_isbad(m, offs)
++#define mtd_block_markbad(m, offs) (m)->block_markbad(m, offs)
++#endif
++
++
++
++int nandmtd_erase_block(struct yaffs_dev *dev, int block_no)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ u32 addr =
++ ((loff_t) block_no) * dev->param.total_bytes_per_chunk *
++ dev->param.chunks_per_block;
++ struct erase_info ei;
++ int retval = 0;
++
++ ei.mtd = mtd;
++ ei.addr = addr;
++ ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
++ ei.time = 1000;
++ ei.retries = 2;
++ ei.callback = NULL;
++ ei.priv = (u_long) dev;
++
++ retval = mtd_erase(mtd, &ei);
++
++ if (retval == 0)
++ return YAFFS_OK;
++
++ return YAFFS_FAIL;
++}
++
++
++static int yaffs_mtd_write(struct yaffs_dev *dev, int nand_chunk,
++ const u8 *data, int data_len,
++ const u8 *oob, int oob_len)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ loff_t addr;
++ struct mtd_oob_ops ops;
++ int retval;
++
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "yaffs_mtd_write(%p, %d, %p, %d, %p, %d)\n",
++ dev, nand_chunk, data, data_len, oob, oob_len);
++
++ if (!data || !data_len) {
++ data = NULL;
++ data_len = 0;
++ }
++
++ if (!oob || !oob_len) {
++ oob = NULL;
++ oob_len = 0;
++ }
++
++ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
++ memset(&ops, 0, sizeof(ops));
++ ops.mode = MTD_OPS_AUTO_OOB;
++ ops.len = (data) ? data_len : 0;
++ ops.ooblen = oob_len;
++ ops.datbuf = (u8 *)data;
++ ops.oobbuf = (u8 *)oob;
++
++ retval = mtd_write_oob(mtd, addr, &ops);
++ if (retval) {
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "write_oob failed, chunk %d, mtd error %d",
++ nand_chunk, retval);
++ }
++ return retval ? YAFFS_FAIL : YAFFS_OK;
++}
++
++static int yaffs_mtd_read(struct yaffs_dev *dev, int nand_chunk,
++ u8 *data, int data_len,
++ u8 *oob, int oob_len,
++ enum yaffs_ecc_result *ecc_result)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ loff_t addr;
++ struct mtd_oob_ops ops;
++ int retval;
++
++ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
++ memset(&ops, 0, sizeof(ops));
++ ops.mode = MTD_OPS_AUTO_OOB;
++ ops.len = (data) ? data_len : 0;
++ ops.ooblen = oob_len;
++ ops.datbuf = data;
++ ops.oobbuf = oob;
++
++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20))
++ /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
++ * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
++ */
++ ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
++#endif
++ /* Read page and oob using MTD.
++ * Check status and determine ECC result.
++ */
++ retval = mtd_read_oob(mtd, addr, &ops);
++ if (retval)
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "read_oob failed, chunk %d, mtd error %d",
++ nand_chunk, retval);
++
++ switch (retval) {
++ case 0:
++ /* no error */
++ if(ecc_result)
++ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++ break;
++
++ case -EUCLEAN:
++ /* MTD's ECC fixed the data */
++ if(ecc_result)
++ *ecc_result = YAFFS_ECC_RESULT_FIXED;
++ dev->n_ecc_fixed++;
++ break;
++
++ case -EBADMSG:
++ default:
++ /* MTD's ECC could not fix the data */
++ dev->n_ecc_unfixed++;
++ if(ecc_result)
++ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++ return YAFFS_FAIL;
++ }
++
++ return YAFFS_OK;
++}
++
++static int yaffs_mtd_erase(struct yaffs_dev *dev, int block_no)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++
++ loff_t addr;
++ struct erase_info ei;
++ int retval = 0;
++ u32 block_size;
++
++ block_size = dev->param.total_bytes_per_chunk *
++ dev->param.chunks_per_block;
++ addr = ((loff_t) block_no) * block_size;
++
++ ei.mtd = mtd;
++ ei.addr = addr;
++ ei.len = block_size;
++ ei.time = 1000;
++ ei.retries = 2;
++ ei.callback = NULL;
++ ei.priv = (u_long) dev;
++
++ retval = mtd_erase(mtd, &ei);
++
++ if (retval == 0)
++ return YAFFS_OK;
++
++ return YAFFS_FAIL;
++}
++
++static int yaffs_mtd_mark_bad(struct yaffs_dev *dev, int block_no)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ int blocksize = dev->param.chunks_per_block * dev->param.total_bytes_per_chunk;
++ int retval;
++
++ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", block_no);
++
++ retval = mtd_block_markbad(mtd, (loff_t) blocksize * block_no);
++ return (retval) ? YAFFS_FAIL : YAFFS_OK;
++}
++
++static int yaffs_mtd_check_bad(struct yaffs_dev *dev, int block_no)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ int blocksize = dev->param.chunks_per_block * dev->param.total_bytes_per_chunk;
++ int retval;
++
++ yaffs_trace(YAFFS_TRACE_MTD, "checking block %d bad", block_no);
++
++ retval = mtd_block_isbad(mtd, (loff_t) blocksize * block_no);
++ return (retval) ? YAFFS_FAIL : YAFFS_OK;
++}
++
++static int yaffs_mtd_initialise(struct yaffs_dev *dev)
++{
++ return YAFFS_OK;
++}
++
++static int yaffs_mtd_deinitialise(struct yaffs_dev *dev)
++{
++ return YAFFS_OK;
++}
++
++
++void yaffs_mtd_drv_install(struct yaffs_dev *dev)
++{
++ struct yaffs_driver *drv = &dev->drv;
++
++ drv->drv_write_chunk_fn = yaffs_mtd_write;
++ drv->drv_read_chunk_fn = yaffs_mtd_read;
++ drv->drv_erase_fn = yaffs_mtd_erase;
++ drv->drv_mark_bad_fn = yaffs_mtd_mark_bad;
++ drv->drv_check_bad_fn = yaffs_mtd_check_bad;
++ drv->drv_initialise_fn = yaffs_mtd_initialise;
++ drv->drv_deinitialise_fn = yaffs_mtd_deinitialise;
++}
++
++
++struct mtd_info * yaffs_get_mtd_device(dev_t sdev)
++{
++ struct mtd_info *mtd;
++
++ mtd = yaffs_get_mtd_device(sdev);
++
++ /* Check it's an mtd device..... */
++ if (MAJOR(sdev) != MTD_BLOCK_MAJOR)
++ return NULL; /* This isn't an mtd device */
++
++ /* Check it's NAND */
++ if (mtd->type != MTD_NANDFLASH) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: MTD device is not NAND it's type %d",
++ mtd->type);
++ return NULL;
++ }
++
++ yaffs_trace(YAFFS_TRACE_OS, " %s %d", WRITE_SIZE_STR, WRITE_SIZE(mtd));
++ yaffs_trace(YAFFS_TRACE_OS, " oobsize %d", mtd->oobsize);
++ yaffs_trace(YAFFS_TRACE_OS, " erasesize %d", mtd->erasesize);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++ yaffs_trace(YAFFS_TRACE_OS, " size %u", mtd->size);
++#else
++ yaffs_trace(YAFFS_TRACE_OS, " size %lld", mtd->size);
++#endif
++
++ return mtd;
++}
++
++int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags)
++{
++ if (yaffs_version == 2) {
++ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
++ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
++ !inband_tags) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "MTD device does not have the right page sizes"
++ );
++ return -1;
++ }
++ } else {
++ if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
++ mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "MTD device does not support have the right page sizes"
++ );
++ return -1;
++ }
++ }
++
++ return 0;
++}
++
++
++void yaffs_put_mtd_device(struct mtd_info *mtd)
++{
++ if(mtd)
++ put_mtd_device(mtd);
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_mtdif.h linux-3.15-rc5/fs/yaffs2/yaffs_mtdif.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_mtdif.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_mtdif.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,25 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_MTDIF_H__
++#define __YAFFS_MTDIF_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_mtd_drv_install(struct yaffs_dev *dev);
++struct mtd_info * yaffs_get_mtd_device(dev_t sdev);
++void yaffs_put_mtd_device(struct mtd_info *mtd);
++int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags);
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_nameval.c linux-3.15-rc5/fs/yaffs2/yaffs_nameval.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_nameval.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_nameval.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,208 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This simple implementation of a name-value store assumes a small number of
++* values and fits into a small finite buffer.
++ *
++ * Each attribute is stored as a record:
++ * sizeof(int) bytes record size.
++ * strnlen+1 bytes name null terminated.
++ * nbytes value.
++ * ----------
++ * total size stored in record size
++ *
++ * This code has not been tested with unicode yet.
++ */
++
++#include "yaffs_nameval.h"
++
++#include "yportenv.h"
++
++static int nval_find(const char *xb, int xb_size, const YCHAR *name,
++ int *exist_size)
++{
++ int pos = 0;
++ int size;
++
++ memcpy(&size, xb, sizeof(int));
++ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
++ if (!strncmp((YCHAR *) (xb + pos + sizeof(int)),
++ name, size)) {
++ if (exist_size)
++ *exist_size = size;
++ return pos;
++ }
++ pos += size;
++ if (pos < xb_size - sizeof(int))
++ memcpy(&size, xb + pos, sizeof(int));
++ else
++ size = 0;
++ }
++ if (exist_size)
++ *exist_size = 0;
++ return -ENODATA;
++}
++
++static int nval_used(const char *xb, int xb_size)
++{
++ int pos = 0;
++ int size;
++
++ memcpy(&size, xb + pos, sizeof(int));
++ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
++ pos += size;
++ if (pos < xb_size - sizeof(int))
++ memcpy(&size, xb + pos, sizeof(int));
++ else
++ size = 0;
++ }
++ return pos;
++}
++
++int nval_del(char *xb, int xb_size, const YCHAR *name)
++{
++ int pos = nval_find(xb, xb_size, name, NULL);
++ int size;
++
++ if (pos < 0 || pos >= xb_size)
++ return -ENODATA;
++
++ /* Find size, shift rest over this record,
++ * then zero out the rest of buffer */
++ memcpy(&size, xb + pos, sizeof(int));
++ memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
++ memset(xb + (xb_size - size), 0, size);
++ return 0;
++}
++
++int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf,
++ int bsize, int flags)
++{
++ int pos;
++ int namelen = strnlen(name, xb_size);
++ int reclen;
++ int size_exist = 0;
++ int space;
++ int start;
++
++ pos = nval_find(xb, xb_size, name, &size_exist);
++
++ if (flags & XATTR_CREATE && pos >= 0)
++ return -EEXIST;
++ if (flags & XATTR_REPLACE && pos < 0)
++ return -ENODATA;
++
++ start = nval_used(xb, xb_size);
++ space = xb_size - start + size_exist;
++
++ reclen = (sizeof(int) + namelen + 1 + bsize);
++
++ if (reclen > space)
++ return -ENOSPC;
++
++ if (pos >= 0) {
++ nval_del(xb, xb_size, name);
++ start = nval_used(xb, xb_size);
++ }
++
++ pos = start;
++
++ memcpy(xb + pos, &reclen, sizeof(int));
++ pos += sizeof(int);
++ strncpy((YCHAR *) (xb + pos), name, reclen);
++ pos += (namelen + 1);
++ memcpy(xb + pos, buf, bsize);
++ return 0;
++}
++
++int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
++ int bsize)
++{
++ int pos = nval_find(xb, xb_size, name, NULL);
++ int size;
++
++ if (pos >= 0 && pos < xb_size) {
++
++ memcpy(&size, xb + pos, sizeof(int));
++ pos += sizeof(int); /* advance past record length */
++ size -= sizeof(int);
++
++ /* Advance over name string */
++ while (xb[pos] && size > 0 && pos < xb_size) {
++ pos++;
++ size--;
++ }
++ /*Advance over NUL */
++ pos++;
++ size--;
++
++ /* If bsize is zero then this is a size query.
++ * Return the size, but don't copy.
++ */
++ if (!bsize)
++ return size;
++
++ if (size <= bsize) {
++ memcpy(buf, xb + pos, size);
++ return size;
++ }
++ }
++ if (pos >= 0)
++ return -ERANGE;
++
++ return -ENODATA;
++}
++
++int nval_list(const char *xb, int xb_size, char *buf, int bsize)
++{
++ int pos = 0;
++ int size;
++ int name_len;
++ int ncopied = 0;
++ int filled = 0;
++
++ memcpy(&size, xb + pos, sizeof(int));
++ while (size > sizeof(int) &&
++ size <= xb_size &&
++ (pos + size) < xb_size &&
++ !filled) {
++ pos += sizeof(int);
++ size -= sizeof(int);
++ name_len = strnlen((YCHAR *) (xb + pos), size);
++ if (ncopied + name_len + 1 < bsize) {
++ memcpy(buf, xb + pos, name_len * sizeof(YCHAR));
++ buf += name_len;
++ *buf = '\0';
++ buf++;
++ if (sizeof(YCHAR) > 1) {
++ *buf = '\0';
++ buf++;
++ }
++ ncopied += (name_len + 1);
++ } else {
++ filled = 1;
++ }
++ pos += size;
++ if (pos < xb_size - sizeof(int))
++ memcpy(&size, xb + pos, sizeof(int));
++ else
++ size = 0;
++ }
++ return ncopied;
++}
++
++int nval_hasvalues(const char *xb, int xb_size)
++{
++ return nval_used(xb, xb_size) > 0;
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_nameval.h linux-3.15-rc5/fs/yaffs2/yaffs_nameval.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_nameval.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_nameval.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,28 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __NAMEVAL_H__
++#define __NAMEVAL_H__
++
++#include "yportenv.h"
++
++int nval_del(char *xb, int xb_size, const YCHAR * name);
++int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
++ int bsize, int flags);
++int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
++ int bsize);
++int nval_list(const char *xb, int xb_size, char *buf, int bsize);
++int nval_hasvalues(const char *xb, int xb_size);
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_nand.c linux-3.15-rc5/fs/yaffs2/yaffs_nand.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_nand.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_nand.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,122 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_nand.h"
++#include "yaffs_tagscompat.h"
++
++#include "yaffs_getblockinfo.h"
++#include "yaffs_summary.h"
++
++static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
++{
++ return chunk - dev->chunk_offset;
++}
++
++int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
++ u8 *buffer, struct yaffs_ext_tags *tags)
++{
++ int result;
++ struct yaffs_ext_tags local_tags;
++ int flash_chunk = apply_chunk_offset(dev, nand_chunk);
++
++ dev->n_page_reads++;
++
++ /* If there are no tags provided use local tags. */
++ if (!tags)
++ tags = &local_tags;
++
++ result = dev->tagger.read_chunk_tags_fn(dev, flash_chunk, buffer, tags);
++ if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
++
++ struct yaffs_block_info *bi;
++ bi = yaffs_get_block_info(dev,
++ nand_chunk /
++ dev->param.chunks_per_block);
++ yaffs_handle_chunk_error(dev, bi);
++ }
++ return result;
++}
++
++int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
++ int nand_chunk,
++ const u8 *buffer, struct yaffs_ext_tags *tags)
++{
++ int result;
++ int flash_chunk = apply_chunk_offset(dev, nand_chunk);
++
++ dev->n_page_writes++;
++
++ if (!tags) {
++ yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags");
++ BUG();
++ return YAFFS_FAIL;
++ }
++
++ tags->seq_number = dev->seq_number;
++ tags->chunk_used = 1;
++ yaffs_trace(YAFFS_TRACE_WRITE,
++ "Writing chunk %d tags %d %d",
++ nand_chunk, tags->obj_id, tags->chunk_id);
++
++ result = dev->tagger.write_chunk_tags_fn(dev, flash_chunk,
++ buffer, tags);
++
++ yaffs_summary_add(dev, tags, nand_chunk);
++
++ return result;
++}
++
++int yaffs_mark_bad(struct yaffs_dev *dev, int block_no)
++{
++ block_no -= dev->block_offset;
++ dev->n_bad_markings++;
++
++ if (dev->param.disable_bad_block_marking)
++ return YAFFS_OK;
++
++ return dev->tagger.mark_bad_fn(dev, block_no);
++}
++
++
++int yaffs_query_init_block_state(struct yaffs_dev *dev,
++ int block_no,
++ enum yaffs_block_state *state,
++ u32 *seq_number)
++{
++ block_no -= dev->block_offset;
++ return dev->tagger.query_block_fn(dev, block_no, state, seq_number);
++}
++
++int yaffs_erase_block(struct yaffs_dev *dev, int block_no)
++{
++ int result;
++
++ block_no -= dev->block_offset;
++ dev->n_erasures++;
++ result = dev->drv.drv_erase_fn(dev, block_no);
++ return result;
++}
++
++int yaffs_init_nand(struct yaffs_dev *dev)
++{
++ if (dev->drv.drv_initialise_fn)
++ return dev->drv.drv_initialise_fn(dev);
++ return YAFFS_OK;
++}
++
++int yaffs_deinit_nand(struct yaffs_dev *dev)
++{
++ if (dev->drv.drv_deinitialise_fn)
++ return dev->drv.drv_deinitialise_fn(dev);
++ return YAFFS_OK;
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_nand.h linux-3.15-rc5/fs/yaffs2/yaffs_nand.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_nand.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_nand.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_NAND_H__
++#define __YAFFS_NAND_H__
++#include "yaffs_guts.h"
++
++int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
++ u8 *buffer, struct yaffs_ext_tags *tags);
++
++int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
++ int nand_chunk,
++ const u8 *buffer, struct yaffs_ext_tags *tags);
++
++int yaffs_mark_bad(struct yaffs_dev *dev, int block_no);
++
++int yaffs_query_init_block_state(struct yaffs_dev *dev,
++ int block_no,
++ enum yaffs_block_state *state,
++ unsigned *seq_number);
++
++int yaffs_erase_block(struct yaffs_dev *dev, int flash_block);
++
++int yaffs_init_nand(struct yaffs_dev *dev);
++int yaffs_deinit_nand(struct yaffs_dev *dev);
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_packedtags1.c linux-3.15-rc5/fs/yaffs2/yaffs_packedtags1.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_packedtags1.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_packedtags1.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,56 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_packedtags1.h"
++#include "yportenv.h"
++
++static const u8 all_ff[20] = {
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff
++};
++
++void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
++ const struct yaffs_ext_tags *t)
++{
++ pt->chunk_id = t->chunk_id;
++ pt->serial_number = t->serial_number;
++ pt->n_bytes = t->n_bytes;
++ pt->obj_id = t->obj_id;
++ pt->ecc = 0;
++ pt->deleted = (t->is_deleted) ? 0 : 1;
++ pt->unused_stuff = 0;
++ pt->should_be_ff = 0xffffffff;
++}
++
++void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
++ const struct yaffs_packed_tags1 *pt)
++{
++
++ if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) {
++ t->block_bad = 0;
++ if (pt->should_be_ff != 0xffffffff)
++ t->block_bad = 1;
++ t->chunk_used = 1;
++ t->obj_id = pt->obj_id;
++ t->chunk_id = pt->chunk_id;
++ t->n_bytes = pt->n_bytes;
++ t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++ t->is_deleted = (pt->deleted) ? 0 : 1;
++ t->serial_number = pt->serial_number;
++ } else {
++ memset(t, 0, sizeof(struct yaffs_ext_tags));
++ }
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_packedtags1.h linux-3.15-rc5/fs/yaffs2/yaffs_packedtags1.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_packedtags1.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_packedtags1.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
++
++#ifndef __YAFFS_PACKEDTAGS1_H__
++#define __YAFFS_PACKEDTAGS1_H__
++
++#include "yaffs_guts.h"
++
++struct yaffs_packed_tags1 {
++ u32 chunk_id:20;
++ u32 serial_number:2;
++ u32 n_bytes:10;
++ u32 obj_id:18;
++ u32 ecc:12;
++ u32 deleted:1;
++ u32 unused_stuff:1;
++ unsigned should_be_ff;
++
++};
++
++void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
++ const struct yaffs_ext_tags *t);
++void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
++ const struct yaffs_packed_tags1 *pt);
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_packedtags2.c linux-3.15-rc5/fs/yaffs2/yaffs_packedtags2.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_packedtags2.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_packedtags2.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,197 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_packedtags2.h"
++#include "yportenv.h"
++#include "yaffs_trace.h"
++
++/* This code packs a set of extended tags into a binary structure for
++ * NAND storage
++ */
++
++/* Some of the information is "extra" struff which can be packed in to
++ * speed scanning
++ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
++ */
++
++/* Extra flags applied to chunk_id */
++
++#define EXTRA_HEADER_INFO_FLAG 0x80000000
++#define EXTRA_SHRINK_FLAG 0x40000000
++#define EXTRA_SHADOWS_FLAG 0x20000000
++#define EXTRA_SPARE_FLAGS 0x10000000
++
++#define ALL_EXTRA_FLAGS 0xf0000000
++
++/* Also, the top 4 bits of the object Id are set to the object type. */
++#define EXTRA_OBJECT_TYPE_SHIFT (28)
++#define EXTRA_OBJECT_TYPE_MASK ((0x0f) << EXTRA_OBJECT_TYPE_SHIFT)
++
++static void yaffs_dump_packed_tags2_tags_only(
++ const struct yaffs_packed_tags2_tags_only *ptt)
++{
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "packed tags obj %d chunk %d byte %d seq %d",
++ ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number);
++}
++
++static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt)
++{
++ yaffs_dump_packed_tags2_tags_only(&pt->t);
++}
++
++static void yaffs_dump_tags2(const struct yaffs_ext_tags *t)
++{
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d",
++ t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
++ t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
++ t->seq_number);
++
++}
++
++static int yaffs_check_tags_extra_packable(const struct yaffs_ext_tags *t)
++{
++ if (t->chunk_id != 0 || !t->extra_available)
++ return 0;
++
++ /* Check if the file size is too long to store */
++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE &&
++ (t->extra_file_size >> 31) != 0)
++ return 0;
++ return 1;
++}
++
++void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt,
++ const struct yaffs_ext_tags *t)
++{
++ ptt->chunk_id = t->chunk_id;
++ ptt->seq_number = t->seq_number;
++ ptt->n_bytes = t->n_bytes;
++ ptt->obj_id = t->obj_id;
++
++ /* Only store extra tags for object headers.
++ * If it is a file then only store if the file size is short\
++ * enough to fit.
++ */
++ if (yaffs_check_tags_extra_packable(t)) {
++ /* Store the extra header info instead */
++ /* We save the parent object in the chunk_id */
++ ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id;
++ if (t->extra_is_shrink)
++ ptt->chunk_id |= EXTRA_SHRINK_FLAG;
++ if (t->extra_shadows)
++ ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
++
++ ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
++ ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
++
++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ ptt->n_bytes = t->extra_equiv_id;
++ else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
++ ptt->n_bytes = (unsigned) t->extra_file_size;
++ else
++ ptt->n_bytes = 0;
++ }
++
++ yaffs_dump_packed_tags2_tags_only(ptt);
++ yaffs_dump_tags2(t);
++}
++
++void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
++ const struct yaffs_ext_tags *t, int tags_ecc)
++{
++ yaffs_pack_tags2_tags_only(&pt->t, t);
++
++ if (tags_ecc)
++ yaffs_ecc_calc_other((unsigned char *)&pt->t,
++ sizeof(struct yaffs_packed_tags2_tags_only),
++ &pt->ecc);
++}
++
++void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
++ struct yaffs_packed_tags2_tags_only *ptt)
++{
++ memset(t, 0, sizeof(struct yaffs_ext_tags));
++
++ if (ptt->seq_number == 0xffffffff)
++ return;
++
++ t->block_bad = 0;
++ t->chunk_used = 1;
++ t->obj_id = ptt->obj_id;
++ t->chunk_id = ptt->chunk_id;
++ t->n_bytes = ptt->n_bytes;
++ t->is_deleted = 0;
++ t->serial_number = 0;
++ t->seq_number = ptt->seq_number;
++
++ /* Do extra header info stuff */
++ if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
++ t->chunk_id = 0;
++ t->n_bytes = 0;
++
++ t->extra_available = 1;
++ t->extra_parent_id = ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
++ t->extra_is_shrink = ptt->chunk_id & EXTRA_SHRINK_FLAG ? 1 : 0;
++ t->extra_shadows = ptt->chunk_id & EXTRA_SHADOWS_FLAG ? 1 : 0;
++ t->extra_obj_type = ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
++ t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
++
++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ t->extra_equiv_id = ptt->n_bytes;
++ else
++ t->extra_file_size = ptt->n_bytes;
++ }
++ yaffs_dump_packed_tags2_tags_only(ptt);
++ yaffs_dump_tags2(t);
++}
++
++void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
++ int tags_ecc)
++{
++ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++
++ if (pt->t.seq_number != 0xffffffff && tags_ecc) {
++ /* Chunk is in use and we need to do ECC */
++
++ struct yaffs_ecc_other ecc;
++ int result;
++ yaffs_ecc_calc_other((unsigned char *)&pt->t,
++ sizeof(struct yaffs_packed_tags2_tags_only),
++ &ecc);
++ result =
++ yaffs_ecc_correct_other((unsigned char *)&pt->t,
++ sizeof(struct yaffs_packed_tags2_tags_only),
++ &pt->ecc, &ecc);
++ switch (result) {
++ case 0:
++ ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++ break;
++ case 1:
++ ecc_result = YAFFS_ECC_RESULT_FIXED;
++ break;
++ case -1:
++ ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++ break;
++ default:
++ ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
++ }
++ }
++ yaffs_unpack_tags2_tags_only(t, &pt->t);
++
++ t->ecc_result = ecc_result;
++
++ yaffs_dump_packed_tags2(pt);
++ yaffs_dump_tags2(t);
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_packedtags2.h linux-3.15-rc5/fs/yaffs2/yaffs_packedtags2.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_packedtags2.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_packedtags2.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,47 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
++
++#ifndef __YAFFS_PACKEDTAGS2_H__
++#define __YAFFS_PACKEDTAGS2_H__
++
++#include "yaffs_guts.h"
++#include "yaffs_ecc.h"
++
++struct yaffs_packed_tags2_tags_only {
++ unsigned seq_number;
++ unsigned obj_id;
++ unsigned chunk_id;
++ unsigned n_bytes;
++};
++
++struct yaffs_packed_tags2 {
++ struct yaffs_packed_tags2_tags_only t;
++ struct yaffs_ecc_other ecc;
++};
++
++/* Full packed tags with ECC, used for oob tags */
++void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
++ const struct yaffs_ext_tags *t, int tags_ecc);
++void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
++ int tags_ecc);
++
++/* Only the tags part (no ECC for use with inband tags */
++void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt,
++ const struct yaffs_ext_tags *t);
++void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
++ struct yaffs_packed_tags2_tags_only *pt);
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_summary.c linux-3.15-rc5/fs/yaffs2/yaffs_summary.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_summary.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_summary.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,312 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Summaries write the useful part of the tags for the chunks in a block into an
++ * an array which is written to the last n chunks of the block.
++ * Reading the summaries gives all the tags for the block in one read. Much
++ * faster.
++ *
++ * Chunks holding summaries are marked with tags making it look like
++ * they are part of a fake file.
++ *
++ * The summary could also be used during gc.
++ *
++ */
++
++#include "yaffs_summary.h"
++#include "yaffs_packedtags2.h"
++#include "yaffs_nand.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_bitmap.h"
++
++/*
++ * The summary is built up in an array of summary tags.
++ * This gets written to the last one or two (maybe more) chunks in a block.
++ * A summary header is written as the first part of each chunk of summary data.
++ * The summary header must match or the summary is rejected.
++ */
++
++/* Summary tags don't need the sequence number because that is redundant. */
++struct yaffs_summary_tags {
++ unsigned obj_id;
++ unsigned chunk_id;
++ unsigned n_bytes;
++};
++
++/* Summary header */
++struct yaffs_summary_header {
++ unsigned version; /* Must match current version */
++ unsigned block; /* Must be this block */
++ unsigned seq; /* Must be this sequence number */
++ unsigned sum; /* Just add up all the bytes in the tags */
++};
++
++
++static void yaffs_summary_clear(struct yaffs_dev *dev)
++{
++ if (!dev->sum_tags)
++ return;
++ memset(dev->sum_tags, 0, dev->chunks_per_summary *
++ sizeof(struct yaffs_summary_tags));
++}
++
++
++void yaffs_summary_deinit(struct yaffs_dev *dev)
++{
++ kfree(dev->sum_tags);
++ dev->sum_tags = NULL;
++ kfree(dev->gc_sum_tags);
++ dev->gc_sum_tags = NULL;
++ dev->chunks_per_summary = 0;
++}
++
++int yaffs_summary_init(struct yaffs_dev *dev)
++{
++ int sum_bytes;
++ int chunks_used; /* Number of chunks used by summary */
++ int sum_tags_bytes;
++
++ sum_bytes = dev->param.chunks_per_block *
++ sizeof(struct yaffs_summary_tags);
++
++ chunks_used = (sum_bytes + dev->data_bytes_per_chunk - 1)/
++ (dev->data_bytes_per_chunk -
++ sizeof(struct yaffs_summary_header));
++
++ dev->chunks_per_summary = dev->param.chunks_per_block - chunks_used;
++ sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
++ dev->chunks_per_summary;
++ dev->sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
++ dev->gc_sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
++ if (!dev->sum_tags || !dev->gc_sum_tags) {
++ yaffs_summary_deinit(dev);
++ return YAFFS_FAIL;
++ }
++
++ yaffs_summary_clear(dev);
++
++ return YAFFS_OK;
++}
++
++static unsigned yaffs_summary_sum(struct yaffs_dev *dev)
++{
++ u8 *sum_buffer = (u8 *)dev->sum_tags;
++ int i;
++ unsigned sum = 0;
++
++ i = sizeof(struct yaffs_summary_tags) *
++ dev->chunks_per_summary;
++ while (i > 0) {
++ sum += *sum_buffer;
++ sum_buffer++;
++ i--;
++ }
++
++ return sum;
++}
++
++static int yaffs_summary_write(struct yaffs_dev *dev, int blk)
++{
++ struct yaffs_ext_tags tags;
++ u8 *buffer;
++ u8 *sum_buffer = (u8 *)dev->sum_tags;
++ int n_bytes;
++ int chunk_in_nand;
++ int chunk_in_block;
++ int result;
++ int this_tx;
++ struct yaffs_summary_header hdr;
++ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
++
++ buffer = yaffs_get_temp_buffer(dev);
++ n_bytes = sizeof(struct yaffs_summary_tags) *
++ dev->chunks_per_summary;
++ memset(&tags, 0, sizeof(struct yaffs_ext_tags));
++ tags.obj_id = YAFFS_OBJECTID_SUMMARY;
++ tags.chunk_id = 1;
++ chunk_in_block = dev->chunks_per_summary;
++ chunk_in_nand = dev->alloc_block * dev->param.chunks_per_block +
++ dev->chunks_per_summary;
++ hdr.version = YAFFS_SUMMARY_VERSION;
++ hdr.block = blk;
++ hdr.seq = bi->seq_number;
++ hdr.sum = yaffs_summary_sum(dev);
++
++ do {
++ this_tx = n_bytes;
++ if (this_tx > sum_bytes_per_chunk)
++ this_tx = sum_bytes_per_chunk;
++ memcpy(buffer, &hdr, sizeof(hdr));
++ memcpy(buffer + sizeof(hdr), sum_buffer, this_tx);
++ tags.n_bytes = this_tx + sizeof(hdr);
++ result = yaffs_wr_chunk_tags_nand(dev, chunk_in_nand,
++ buffer, &tags);
++
++ if (result != YAFFS_OK)
++ break;
++ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
++ bi->pages_in_use++;
++ dev->n_free_chunks--;
++
++ n_bytes -= this_tx;
++ sum_buffer += this_tx;
++ chunk_in_nand++;
++ chunk_in_block++;
++ tags.chunk_id++;
++ } while (result == YAFFS_OK && n_bytes > 0);
++ yaffs_release_temp_buffer(dev, buffer);
++
++
++ if (result == YAFFS_OK)
++ bi->has_summary = 1;
++
++
++ return result;
++}
++
++int yaffs_summary_read(struct yaffs_dev *dev,
++ struct yaffs_summary_tags *st,
++ int blk)
++{
++ struct yaffs_ext_tags tags;
++ u8 *buffer;
++ u8 *sum_buffer = (u8 *)st;
++ int n_bytes;
++ int chunk_id;
++ int chunk_in_nand;
++ int chunk_in_block;
++ int result;
++ int this_tx;
++ struct yaffs_summary_header hdr;
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
++ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
++ int sum_tags_bytes;
++
++ sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
++ dev->chunks_per_summary;
++ buffer = yaffs_get_temp_buffer(dev);
++ n_bytes = sizeof(struct yaffs_summary_tags) * dev->chunks_per_summary;
++ chunk_in_block = dev->chunks_per_summary;
++ chunk_in_nand = blk * dev->param.chunks_per_block +
++ dev->chunks_per_summary;
++ chunk_id = 1;
++ do {
++ this_tx = n_bytes;
++ if (this_tx > sum_bytes_per_chunk)
++ this_tx = sum_bytes_per_chunk;
++ result = yaffs_rd_chunk_tags_nand(dev, chunk_in_nand,
++ buffer, &tags);
++
++ if (tags.chunk_id != chunk_id ||
++ tags.obj_id != YAFFS_OBJECTID_SUMMARY ||
++ tags.chunk_used == 0 ||
++ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
++ tags.n_bytes != (this_tx + sizeof(hdr)))
++ result = YAFFS_FAIL;
++ if (result != YAFFS_OK)
++ break;
++
++ if (st == dev->sum_tags) {
++ /* If we're scanning then update the block info */
++ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
++ bi->pages_in_use++;
++ }
++ memcpy(&hdr, buffer, sizeof(hdr));
++ memcpy(sum_buffer, buffer + sizeof(hdr), this_tx);
++ n_bytes -= this_tx;
++ sum_buffer += this_tx;
++ chunk_in_nand++;
++ chunk_in_block++;
++ chunk_id++;
++ } while (result == YAFFS_OK && n_bytes > 0);
++ yaffs_release_temp_buffer(dev, buffer);
++
++ if (result == YAFFS_OK) {
++ /* Verify header */
++ if (hdr.version != YAFFS_SUMMARY_VERSION ||
++ hdr.seq != bi->seq_number ||
++ hdr.sum != yaffs_summary_sum(dev))
++ result = YAFFS_FAIL;
++ }
++
++ if (st == dev->sum_tags && result == YAFFS_OK)
++ bi->has_summary = 1;
++
++ return result;
++}
++
++int yaffs_summary_add(struct yaffs_dev *dev,
++ struct yaffs_ext_tags *tags,
++ int chunk_in_nand)
++{
++ struct yaffs_packed_tags2_tags_only tags_only;
++ struct yaffs_summary_tags *sum_tags;
++ int block_in_nand = chunk_in_nand / dev->param.chunks_per_block;
++ int chunk_in_block = chunk_in_nand % dev->param.chunks_per_block;
++
++ if (!dev->sum_tags)
++ return YAFFS_OK;
++
++ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
++ yaffs_pack_tags2_tags_only(&tags_only, tags);
++ sum_tags = &dev->sum_tags[chunk_in_block];
++ sum_tags->chunk_id = tags_only.chunk_id;
++ sum_tags->n_bytes = tags_only.n_bytes;
++ sum_tags->obj_id = tags_only.obj_id;
++
++ if (chunk_in_block == dev->chunks_per_summary - 1) {
++ /* Time to write out the summary */
++ yaffs_summary_write(dev, block_in_nand);
++ yaffs_summary_clear(dev);
++ yaffs_skip_rest_of_block(dev);
++ }
++ }
++ return YAFFS_OK;
++}
++
++int yaffs_summary_fetch(struct yaffs_dev *dev,
++ struct yaffs_ext_tags *tags,
++ int chunk_in_block)
++{
++ struct yaffs_packed_tags2_tags_only tags_only;
++ struct yaffs_summary_tags *sum_tags;
++ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
++ sum_tags = &dev->sum_tags[chunk_in_block];
++ tags_only.chunk_id = sum_tags->chunk_id;
++ tags_only.n_bytes = sum_tags->n_bytes;
++ tags_only.obj_id = sum_tags->obj_id;
++ yaffs_unpack_tags2_tags_only(tags, &tags_only);
++ return YAFFS_OK;
++ }
++ return YAFFS_FAIL;
++}
++
++void yaffs_summary_gc(struct yaffs_dev *dev, int blk)
++{
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
++ int i;
++
++ if (!bi->has_summary)
++ return;
++
++ for (i = dev->chunks_per_summary;
++ i < dev->param.chunks_per_block;
++ i++) {
++ if (yaffs_check_chunk_bit(dev, blk, i)) {
++ yaffs_clear_chunk_bit(dev, blk, i);
++ bi->pages_in_use--;
++ dev->n_free_chunks++;
++ }
++ }
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_summary.h linux-3.15-rc5/fs/yaffs2/yaffs_summary.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_summary.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_summary.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,37 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_SUMMARY_H__
++#define __YAFFS_SUMMARY_H__
++
++#include "yaffs_packedtags2.h"
++
++
++int yaffs_summary_init(struct yaffs_dev *dev);
++void yaffs_summary_deinit(struct yaffs_dev *dev);
++
++int yaffs_summary_add(struct yaffs_dev *dev,
++ struct yaffs_ext_tags *tags,
++ int chunk_in_block);
++int yaffs_summary_fetch(struct yaffs_dev *dev,
++ struct yaffs_ext_tags *tags,
++ int chunk_in_block);
++int yaffs_summary_read(struct yaffs_dev *dev,
++ struct yaffs_summary_tags *st,
++ int blk);
++void yaffs_summary_gc(struct yaffs_dev *dev, int blk);
++
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_tagscompat.c linux-3.15-rc5/fs/yaffs2/yaffs_tagscompat.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_tagscompat.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_tagscompat.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,381 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_tagscompat.h"
++#include "yaffs_ecc.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_trace.h"
++
++static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
++
++
++/********** Tags ECC calculations *********/
++
++
++void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
++{
++ /* Calculate an ecc */
++ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
++ unsigned i, j;
++ unsigned ecc = 0;
++ unsigned bit = 0;
++
++ tags->ecc = 0;
++
++ for (i = 0; i < 8; i++) {
++ for (j = 1; j & 0xff; j <<= 1) {
++ bit++;
++ if (b[i] & j)
++ ecc ^= bit;
++ }
++ }
++ tags->ecc = ecc;
++}
++
++int yaffs_check_tags_ecc(struct yaffs_tags *tags)
++{
++ unsigned ecc = tags->ecc;
++
++ yaffs_calc_tags_ecc(tags);
++
++ ecc ^= tags->ecc;
++
++ if (ecc && ecc <= 64) {
++ /* TODO: Handle the failure better. Retire? */
++ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
++
++ ecc--;
++
++ b[ecc / 8] ^= (1 << (ecc & 7));
++
++ /* Now recvalc the ecc */
++ yaffs_calc_tags_ecc(tags);
++
++ return 1; /* recovered error */
++ } else if (ecc) {
++ /* Wierd ecc failure value */
++ /* TODO Need to do somethiong here */
++ return -1; /* unrecovered error */
++ }
++ return 0;
++}
++
++/********** Tags **********/
++
++static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
++ struct yaffs_tags *tags_ptr)
++{
++ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
++
++ yaffs_calc_tags_ecc(tags_ptr);
++
++ spare_ptr->tb0 = tu->as_bytes[0];
++ spare_ptr->tb1 = tu->as_bytes[1];
++ spare_ptr->tb2 = tu->as_bytes[2];
++ spare_ptr->tb3 = tu->as_bytes[3];
++ spare_ptr->tb4 = tu->as_bytes[4];
++ spare_ptr->tb5 = tu->as_bytes[5];
++ spare_ptr->tb6 = tu->as_bytes[6];
++ spare_ptr->tb7 = tu->as_bytes[7];
++}
++
++static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
++ struct yaffs_spare *spare_ptr,
++ struct yaffs_tags *tags_ptr)
++{
++ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
++ int result;
++
++ tu->as_bytes[0] = spare_ptr->tb0;
++ tu->as_bytes[1] = spare_ptr->tb1;
++ tu->as_bytes[2] = spare_ptr->tb2;
++ tu->as_bytes[3] = spare_ptr->tb3;
++ tu->as_bytes[4] = spare_ptr->tb4;
++ tu->as_bytes[5] = spare_ptr->tb5;
++ tu->as_bytes[6] = spare_ptr->tb6;
++ tu->as_bytes[7] = spare_ptr->tb7;
++
++ result = yaffs_check_tags_ecc(tags_ptr);
++ if (result > 0)
++ dev->n_tags_ecc_fixed++;
++ else if (result < 0)
++ dev->n_tags_ecc_unfixed++;
++}
++
++static void yaffs_spare_init(struct yaffs_spare *spare)
++{
++ memset(spare, 0xff, sizeof(struct yaffs_spare));
++}
++
++static int yaffs_wr_nand(struct yaffs_dev *dev,
++ int nand_chunk, const u8 *data,
++ struct yaffs_spare *spare)
++{
++ int data_size = dev->data_bytes_per_chunk;
++
++ return dev->drv.drv_write_chunk_fn(dev, nand_chunk,
++ data, data_size,
++ (u8 *) spare, sizeof(*spare));
++}
++
++static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
++ int nand_chunk,
++ u8 *data,
++ struct yaffs_spare *spare,
++ enum yaffs_ecc_result *ecc_result,
++ int correct_errors)
++{
++ int ret_val;
++ struct yaffs_spare local_spare;
++ int data_size;
++ int spare_size;
++ int ecc_result1, ecc_result2;
++ u8 calc_ecc[3];
++
++ if (!spare) {
++ /* If we don't have a real spare, then we use a local one. */
++ /* Need this for the calculation of the ecc */
++ spare = &local_spare;
++ }
++ data_size = dev->data_bytes_per_chunk;
++ spare_size = sizeof(struct yaffs_spare);
++
++ if (dev->param.use_nand_ecc)
++ return dev->drv.drv_read_chunk_fn(dev, nand_chunk,
++ data, data_size,
++ (u8 *) spare, spare_size,
++ ecc_result);
++
++
++ /* Handle the ECC at this level. */
++
++ ret_val = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
++ data, data_size,
++ (u8 *)spare, spare_size,
++ NULL);
++ if (!data || !correct_errors)
++ return ret_val;
++
++ /* Do ECC correction if needed. */
++ yaffs_ecc_calc(data, calc_ecc);
++ ecc_result1 = yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
++ yaffs_ecc_calc(&data[256], calc_ecc);
++ ecc_result2 = yaffs_ecc_correct(&data[256], spare->ecc2, calc_ecc);
++
++ if (ecc_result1 > 0) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>>yaffs ecc error fix performed on chunk %d:0",
++ nand_chunk);
++ dev->n_ecc_fixed++;
++ } else if (ecc_result1 < 0) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>>yaffs ecc error unfixed on chunk %d:0",
++ nand_chunk);
++ dev->n_ecc_unfixed++;
++ }
++
++ if (ecc_result2 > 0) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>>yaffs ecc error fix performed on chunk %d:1",
++ nand_chunk);
++ dev->n_ecc_fixed++;
++ } else if (ecc_result2 < 0) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>>yaffs ecc error unfixed on chunk %d:1",
++ nand_chunk);
++ dev->n_ecc_unfixed++;
++ }
++
++ if (ecc_result1 || ecc_result2) {
++ /* We had a data problem on this page */
++ yaffs_handle_rd_data_error(dev, nand_chunk);
++ }
++
++ if (ecc_result1 < 0 || ecc_result2 < 0)
++ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++ else if (ecc_result1 > 0 || ecc_result2 > 0)
++ *ecc_result = YAFFS_ECC_RESULT_FIXED;
++ else
++ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++
++ return ret_val;
++}
++
++/*
++ * Functions for robustisizing
++ */
++
++static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
++{
++ int flash_block = nand_chunk / dev->param.chunks_per_block;
++
++ /* Mark the block for retirement */
++ yaffs_get_block_info(dev, flash_block + dev->block_offset)->
++ needs_retiring = 1;
++ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ "**>>Block %d marked for retirement",
++ flash_block);
++
++ /* TODO:
++ * Just do a garbage collection on the affected block
++ * then retire the block
++ * NB recursion
++ */
++}
++
++static int yaffs_tags_compat_wr(struct yaffs_dev *dev,
++ int nand_chunk,
++ const u8 *data, const struct yaffs_ext_tags *ext_tags)
++{
++ struct yaffs_spare spare;
++ struct yaffs_tags tags;
++
++ yaffs_spare_init(&spare);
++
++ if (ext_tags->is_deleted)
++ spare.page_status = 0;
++ else {
++ tags.obj_id = ext_tags->obj_id;
++ tags.chunk_id = ext_tags->chunk_id;
++
++ tags.n_bytes_lsb = ext_tags->n_bytes & (1024 - 1);
++
++ if (dev->data_bytes_per_chunk >= 1024)
++ tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
++ else
++ tags.n_bytes_msb = 3;
++
++ tags.serial_number = ext_tags->serial_number;
++
++ if (!dev->param.use_nand_ecc && data) {
++ yaffs_ecc_calc(data, spare.ecc1);
++ yaffs_ecc_calc(&data[256], spare.ecc2);
++ }
++
++ yaffs_load_tags_to_spare(&spare, &tags);
++ }
++ return yaffs_wr_nand(dev, nand_chunk, data, &spare);
++}
++
++static int yaffs_tags_compat_rd(struct yaffs_dev *dev,
++ int nand_chunk,
++ u8 *data, struct yaffs_ext_tags *ext_tags)
++{
++ struct yaffs_spare spare;
++ struct yaffs_tags tags;
++ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
++ static struct yaffs_spare spare_ff;
++ static int init;
++ int deleted;
++
++ if (!init) {
++ memset(&spare_ff, 0xff, sizeof(spare_ff));
++ init = 1;
++ }
++
++ if (!yaffs_rd_chunk_nand(dev, nand_chunk,
++ data, &spare, &ecc_result, 1))
++ return YAFFS_FAIL;
++
++ /* ext_tags may be NULL */
++ if (!ext_tags)
++ return YAFFS_OK;
++
++ deleted = (hweight8(spare.page_status) < 7) ? 1 : 0;
++
++ ext_tags->is_deleted = deleted;
++ ext_tags->ecc_result = ecc_result;
++ ext_tags->block_bad = 0; /* We're reading it */
++ /* therefore it is not a bad block */
++ ext_tags->chunk_used =
++ memcmp(&spare_ff, &spare, sizeof(spare_ff)) ? 1 : 0;
++
++ if (ext_tags->chunk_used) {
++ yaffs_get_tags_from_spare(dev, &spare, &tags);
++ ext_tags->obj_id = tags.obj_id;
++ ext_tags->chunk_id = tags.chunk_id;
++ ext_tags->n_bytes = tags.n_bytes_lsb;
++
++ if (dev->data_bytes_per_chunk >= 1024)
++ ext_tags->n_bytes |=
++ (((unsigned)tags.n_bytes_msb) << 10);
++
++ ext_tags->serial_number = tags.serial_number;
++ }
++
++ return YAFFS_OK;
++}
++
++static int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
++{
++ struct yaffs_spare spare;
++
++ memset(&spare, 0xff, sizeof(struct yaffs_spare));
++
++ spare.block_status = 'Y';
++
++ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
++ &spare);
++ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
++ NULL, &spare);
++
++ return YAFFS_OK;
++}
++
++static int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
++ int block_no,
++ enum yaffs_block_state *state,
++ u32 *seq_number)
++{
++ struct yaffs_spare spare0, spare1;
++ static struct yaffs_spare spare_ff;
++ static int init;
++ enum yaffs_ecc_result dummy;
++
++ if (!init) {
++ memset(&spare_ff, 0xff, sizeof(spare_ff));
++ init = 1;
++ }
++
++ *seq_number = 0;
++
++ /* Look for bad block markers in the first two chunks */
++ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block,
++ NULL, &spare0, &dummy, 0);
++ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
++ NULL, &spare1, &dummy, 0);
++
++ if (hweight8(spare0.block_status & spare1.block_status) < 7)
++ *state = YAFFS_BLOCK_STATE_DEAD;
++ else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
++ *state = YAFFS_BLOCK_STATE_EMPTY;
++ else
++ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
++
++ return YAFFS_OK;
++}
++
++void yaffs_tags_compat_install(struct yaffs_dev *dev)
++{
++ if(dev->param.is_yaffs2)
++ return;
++ if(!dev->tagger.write_chunk_tags_fn)
++ dev->tagger.write_chunk_tags_fn = yaffs_tags_compat_wr;
++ if(!dev->tagger.read_chunk_tags_fn)
++ dev->tagger.read_chunk_tags_fn = yaffs_tags_compat_rd;
++ if(!dev->tagger.query_block_fn)
++ dev->tagger.query_block_fn = yaffs_tags_compat_query_block;
++ if(!dev->tagger.mark_bad_fn)
++ dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad;
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_tagscompat.h linux-3.15-rc5/fs/yaffs2/yaffs_tagscompat.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_tagscompat.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_tagscompat.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,44 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_TAGSCOMPAT_H__
++#define __YAFFS_TAGSCOMPAT_H__
++
++
++#include "yaffs_guts.h"
++
++#if 0
++
++
++int yaffs_tags_compat_wr(struct yaffs_dev *dev,
++ int nand_chunk,
++ const u8 *data, const struct yaffs_ext_tags *tags);
++int yaffs_tags_compat_rd(struct yaffs_dev *dev,
++ int nand_chunk,
++ u8 *data, struct yaffs_ext_tags *tags);
++int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no);
++int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
++ int block_no,
++ enum yaffs_block_state *state,
++ u32 *seq_number);
++
++#endif
++
++
++void yaffs_tags_compat_install(struct yaffs_dev *dev);
++void yaffs_calc_tags_ecc(struct yaffs_tags *tags);
++int yaffs_check_tags_ecc(struct yaffs_tags *tags);
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_tagsmarshall.c linux-3.15-rc5/fs/yaffs2/yaffs_tagsmarshall.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_tagsmarshall.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_tagsmarshall.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,199 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yaffs_packedtags2.h"
++
++static int yaffs_tags_marshall_write(struct yaffs_dev *dev,
++ int nand_chunk, const u8 *data,
++ const struct yaffs_ext_tags *tags)
++{
++ struct yaffs_packed_tags2 pt;
++ int retval;
++
++ int packed_tags_size =
++ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
++ void *packed_tags_ptr =
++ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
++
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "yaffs_tags_marshall_write chunk %d data %p tags %p",
++ nand_chunk, data, tags);
++
++ /* For yaffs2 writing there must be both data and tags.
++ * If we're using inband tags, then the tags are stuffed into
++ * the end of the data buffer.
++ */
++ if (!data || !tags)
++ BUG();
++ else if (dev->param.inband_tags) {
++ struct yaffs_packed_tags2_tags_only *pt2tp;
++ pt2tp =
++ (struct yaffs_packed_tags2_tags_only *)(data +
++ dev->
++ data_bytes_per_chunk);
++ yaffs_pack_tags2_tags_only(pt2tp, tags);
++ } else {
++ yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
++ }
++
++ retval = dev->drv.drv_write_chunk_fn(dev, nand_chunk,
++ data, dev->param.total_bytes_per_chunk,
++ (dev->param.inband_tags) ? NULL : packed_tags_ptr,
++ (dev->param.inband_tags) ? 0 : packed_tags_size);
++
++ return retval;
++}
++
++static int yaffs_tags_marshall_read(struct yaffs_dev *dev,
++ int nand_chunk, u8 *data,
++ struct yaffs_ext_tags *tags)
++{
++ int retval = 0;
++ int local_data = 0;
++ u8 spare_buffer[100];
++ enum yaffs_ecc_result ecc_result;
++
++ struct yaffs_packed_tags2 pt;
++
++ int packed_tags_size =
++ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
++ void *packed_tags_ptr =
++ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
++
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "yaffs_tags_marshall_read chunk %d data %p tags %p",
++ nand_chunk, data, tags);
++
++ if (dev->param.inband_tags) {
++ if (!data) {
++ local_data = 1;
++ data = yaffs_get_temp_buffer(dev);
++ }
++ }
++
++ if (dev->param.inband_tags || (data && !tags))
++ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
++ data, dev->param.total_bytes_per_chunk,
++ NULL, 0,
++ &ecc_result);
++ else if (tags)
++ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
++ data, dev->param.total_bytes_per_chunk,
++ spare_buffer, packed_tags_size,
++ &ecc_result);
++ else
++ BUG();
++
++
++ if (dev->param.inband_tags) {
++ if (tags) {
++ struct yaffs_packed_tags2_tags_only *pt2tp;
++ pt2tp =
++ (struct yaffs_packed_tags2_tags_only *)
++ &data[dev->data_bytes_per_chunk];
++ yaffs_unpack_tags2_tags_only(tags, pt2tp);
++ }
++ } else if (tags) {
++ memcpy(packed_tags_ptr, spare_buffer, packed_tags_size);
++ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
++ }
++
++ if (local_data)
++ yaffs_release_temp_buffer(dev, data);
++
++ if (tags && ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
++ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++ dev->n_ecc_unfixed++;
++ }
++
++ if (tags && ecc_result == -YAFFS_ECC_RESULT_FIXED) {
++ if (tags->ecc_result <= YAFFS_ECC_RESULT_NO_ERROR)
++ tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
++ dev->n_ecc_fixed++;
++ }
++
++ if (ecc_result < YAFFS_ECC_RESULT_UNFIXED)
++ return YAFFS_OK;
++ else
++ return YAFFS_FAIL;
++}
++
++static int yaffs_tags_marshall_query_block(struct yaffs_dev *dev, int block_no,
++ enum yaffs_block_state *state,
++ u32 *seq_number)
++{
++ int retval;
++
++ yaffs_trace(YAFFS_TRACE_MTD, "yaffs_tags_marshall_query_block %d",
++ block_no);
++
++ retval = dev->drv.drv_check_bad_fn(dev, block_no);
++
++ if (retval== YAFFS_FAIL) {
++ yaffs_trace(YAFFS_TRACE_MTD, "block is bad");
++
++ *state = YAFFS_BLOCK_STATE_DEAD;
++ *seq_number = 0;
++ } else {
++ struct yaffs_ext_tags t;
++
++ yaffs_tags_marshall_read(dev,
++ block_no * dev->param.chunks_per_block,
++ NULL, &t);
++
++ if (t.chunk_used) {
++ *seq_number = t.seq_number;
++ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
++ } else {
++ *seq_number = 0;
++ *state = YAFFS_BLOCK_STATE_EMPTY;
++ }
++ }
++
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "block query returns seq %d state %d",
++ *seq_number, *state);
++
++ if (retval == 0)
++ return YAFFS_OK;
++ else
++ return YAFFS_FAIL;
++}
++
++static int yaffs_tags_marshall_mark_bad(struct yaffs_dev *dev, int block_no)
++{
++ return dev->drv.drv_mark_bad_fn(dev, block_no);
++
++}
++
++
++void yaffs_tags_marshall_install(struct yaffs_dev *dev)
++{
++ if (!dev->param.is_yaffs2)
++ return;
++
++ if (!dev->tagger.write_chunk_tags_fn)
++ dev->tagger.write_chunk_tags_fn = yaffs_tags_marshall_write;
++
++ if (!dev->tagger.read_chunk_tags_fn)
++ dev->tagger.read_chunk_tags_fn = yaffs_tags_marshall_read;
++
++ if (!dev->tagger.query_block_fn)
++ dev->tagger.query_block_fn = yaffs_tags_marshall_query_block;
++
++ if (!dev->tagger.mark_bad_fn)
++ dev->tagger.mark_bad_fn = yaffs_tags_marshall_mark_bad;
++
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_tagsmarshall.h linux-3.15-rc5/fs/yaffs2/yaffs_tagsmarshall.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_tagsmarshall.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_tagsmarshall.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,22 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_TAGSMARSHALL_H__
++#define __YAFFS_TAGSMARSHALL_H__
++
++#include "yaffs_guts.h"
++void yaffs_tags_marshall_install(struct yaffs_dev *dev);
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_trace.h linux-3.15-rc5/fs/yaffs2/yaffs_trace.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_trace.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_trace.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,57 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YTRACE_H__
++#define __YTRACE_H__
++
++extern unsigned int yaffs_trace_mask;
++extern unsigned int yaffs_wr_attempts;
++
++/*
++ * Tracing flags.
++ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
++ */
++
++#define YAFFS_TRACE_OS 0x00000002
++#define YAFFS_TRACE_ALLOCATE 0x00000004
++#define YAFFS_TRACE_SCAN 0x00000008
++#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
++#define YAFFS_TRACE_ERASE 0x00000020
++#define YAFFS_TRACE_GC 0x00000040
++#define YAFFS_TRACE_WRITE 0x00000080
++#define YAFFS_TRACE_TRACING 0x00000100
++#define YAFFS_TRACE_DELETION 0x00000200
++#define YAFFS_TRACE_BUFFERS 0x00000400
++#define YAFFS_TRACE_NANDACCESS 0x00000800
++#define YAFFS_TRACE_GC_DETAIL 0x00001000
++#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
++#define YAFFS_TRACE_MTD 0x00004000
++#define YAFFS_TRACE_CHECKPOINT 0x00008000
++
++#define YAFFS_TRACE_VERIFY 0x00010000
++#define YAFFS_TRACE_VERIFY_NAND 0x00020000
++#define YAFFS_TRACE_VERIFY_FULL 0x00040000
++#define YAFFS_TRACE_VERIFY_ALL 0x000f0000
++
++#define YAFFS_TRACE_SYNC 0x00100000
++#define YAFFS_TRACE_BACKGROUND 0x00200000
++#define YAFFS_TRACE_LOCK 0x00400000
++#define YAFFS_TRACE_MOUNT 0x00800000
++
++#define YAFFS_TRACE_ERROR 0x40000000
++#define YAFFS_TRACE_BUG 0x80000000
++#define YAFFS_TRACE_ALWAYS 0xf0000000
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_verify.c linux-3.15-rc5/fs/yaffs2/yaffs_verify.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_verify.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_verify.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,529 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_verify.h"
++#include "yaffs_trace.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_nand.h"
++
++int yaffs_skip_verification(struct yaffs_dev *dev)
++{
++ (void) dev;
++ return !(yaffs_trace_mask &
++ (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_skip_full_verification(struct yaffs_dev *dev)
++{
++ (void) dev;
++ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_skip_nand_verification(struct yaffs_dev *dev)
++{
++ (void) dev;
++ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
++}
++
++static const char * const block_state_name[] = {
++ "Unknown",
++ "Needs scan",
++ "Scanning",
++ "Empty",
++ "Allocating",
++ "Full",
++ "Dirty",
++ "Checkpoint",
++ "Collecting",
++ "Dead"
++};
++
++void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n)
++{
++ int actually_used;
++ int in_use;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ /* Report illegal runtime states */
++ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Block %d has undefined state %d",
++ n, bi->block_state);
++
++ switch (bi->block_state) {
++ case YAFFS_BLOCK_STATE_UNKNOWN:
++ case YAFFS_BLOCK_STATE_SCANNING:
++ case YAFFS_BLOCK_STATE_NEEDS_SCAN:
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Block %d has bad run-state %s",
++ n, block_state_name[bi->block_state]);
++ }
++
++ /* Check pages in use and soft deletions are legal */
++
++ actually_used = bi->pages_in_use - bi->soft_del_pages;
++
++ if (bi->pages_in_use < 0 ||
++ bi->pages_in_use > dev->param.chunks_per_block ||
++ bi->soft_del_pages < 0 ||
++ bi->soft_del_pages > dev->param.chunks_per_block ||
++ actually_used < 0 || actually_used > dev->param.chunks_per_block)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Block %d has illegal values pages_in_used %d soft_del_pages %d",
++ n, bi->pages_in_use, bi->soft_del_pages);
++
++ /* Check chunk bitmap legal */
++ in_use = yaffs_count_chunk_bits(dev, n);
++ if (in_use != bi->pages_in_use)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Block %d has inconsistent values pages_in_use %d counted chunk bits %d",
++ n, bi->pages_in_use, in_use);
++}
++
++void yaffs_verify_collected_blk(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi, int n)
++{
++ yaffs_verify_blk(dev, bi, n);
++
++ /* After collection the block should be in the erased state */
++
++ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
++ bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "Block %d is in state %d after gc, should be erased",
++ n, bi->block_state);
++ }
++}
++
++void yaffs_verify_blocks(struct yaffs_dev *dev)
++{
++ int i;
++ int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
++ int illegal_states = 0;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ memset(state_count, 0, sizeof(state_count));
++
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
++ yaffs_verify_blk(dev, bi, i);
++
++ if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
++ state_count[bi->block_state]++;
++ else
++ illegal_states++;
++ }
++
++ yaffs_trace(YAFFS_TRACE_VERIFY, "Block summary");
++
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "%d blocks have illegal states",
++ illegal_states);
++ if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Too many allocating blocks");
++
++ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "%s %d blocks",
++ block_state_name[i], state_count[i]);
++
++ if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT])
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Checkpoint block count wrong dev %d count %d",
++ dev->blocks_in_checkpt,
++ state_count[YAFFS_BLOCK_STATE_CHECKPOINT]);
++
++ if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY])
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Erased block count wrong dev %d count %d",
++ dev->n_erased_blocks,
++ state_count[YAFFS_BLOCK_STATE_EMPTY]);
++
++ if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Too many collecting blocks %d (max is 1)",
++ state_count[YAFFS_BLOCK_STATE_COLLECTING]);
++}
++
++/*
++ * Verify the object header. oh must be valid, but obj and tags may be NULL in
++ * which case those tests will not be performed.
++ */
++void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
++ struct yaffs_ext_tags *tags, int parent_check)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++
++ if (!(tags && obj && oh)) {
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Verifying object header tags %p obj %p oh %p",
++ tags, obj, oh);
++ return;
++ }
++
++ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
++ oh->type > YAFFS_OBJECT_TYPE_MAX)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d header type is illegal value 0x%x",
++ tags->obj_id, oh->type);
++
++ if (tags->obj_id != obj->obj_id)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d header mismatch obj_id %d",
++ tags->obj_id, obj->obj_id);
++
++ /*
++ * Check that the object's parent ids match if parent_check requested.
++ *
++ * Tests do not apply to the root object.
++ */
++
++ if (parent_check && tags->obj_id > 1 && !obj->parent)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d header mismatch parent_id %d obj->parent is NULL",
++ tags->obj_id, oh->parent_obj_id);
++
++ if (parent_check && obj->parent &&
++ oh->parent_obj_id != obj->parent->obj_id &&
++ (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
++ obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d header mismatch parent_id %d parent_obj_id %d",
++ tags->obj_id, oh->parent_obj_id,
++ obj->parent->obj_id);
++
++ if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d header name is NULL",
++ obj->obj_id);
++
++ if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff) /* Junk name */
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d header name is 0xff",
++ obj->obj_id);
++}
++
++void yaffs_verify_file(struct yaffs_obj *obj)
++{
++ u32 x;
++ int required_depth;
++ int actual_depth;
++ int last_chunk;
++ u32 offset_in_chunk;
++ u32 the_chunk;
++
++ u32 i;
++ struct yaffs_dev *dev;
++ struct yaffs_ext_tags tags;
++ struct yaffs_tnode *tn;
++ u32 obj_id;
++
++ if (!obj)
++ return;
++
++ if (yaffs_skip_verification(obj->my_dev))
++ return;
++
++ dev = obj->my_dev;
++ obj_id = obj->obj_id;
++
++
++ /* Check file size is consistent with tnode depth */
++ yaffs_addr_to_chunk(dev, obj->variant.file_variant.file_size,
++ &last_chunk, &offset_in_chunk);
++ last_chunk++;
++ x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS;
++ required_depth = 0;
++ while (x > 0) {
++ x >>= YAFFS_TNODES_INTERNAL_BITS;
++ required_depth++;
++ }
++
++ actual_depth = obj->variant.file_variant.top_level;
++
++ /* Check that the chunks in the tnode tree are all correct.
++ * We do this by scanning through the tnode tree and
++ * checking the tags for every chunk match.
++ */
++
++ if (yaffs_skip_nand_verification(dev))
++ return;
++
++ for (i = 1; i <= last_chunk; i++) {
++ tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
++
++ if (!tn)
++ continue;
++
++ the_chunk = yaffs_get_group_base(dev, tn, i);
++ if (the_chunk > 0) {
++ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
++ &tags);
++ if (tags.obj_id != obj_id || tags.chunk_id != i)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)",
++ obj_id, i, the_chunk,
++ tags.obj_id, tags.chunk_id);
++ }
++ }
++}
++
++void yaffs_verify_link(struct yaffs_obj *obj)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++
++ /* Verify sane equivalent object */
++}
++
++void yaffs_verify_symlink(struct yaffs_obj *obj)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++
++ /* Verify symlink string */
++}
++
++void yaffs_verify_special(struct yaffs_obj *obj)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++}
++
++void yaffs_verify_obj(struct yaffs_obj *obj)
++{
++ struct yaffs_dev *dev;
++ u32 chunk_min;
++ u32 chunk_max;
++ u32 chunk_id_ok;
++ u32 chunk_in_range;
++ u32 chunk_wrongly_deleted;
++ u32 chunk_valid;
++
++ if (!obj)
++ return;
++
++ if (obj->being_created)
++ return;
++
++ dev = obj->my_dev;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ /* Check sane object header chunk */
++
++ chunk_min = dev->internal_start_block * dev->param.chunks_per_block;
++ chunk_max =
++ (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1;
++
++ chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min &&
++ ((unsigned)(obj->hdr_chunk)) <= chunk_max);
++ chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0);
++ chunk_valid = chunk_in_range &&
++ yaffs_check_chunk_bit(dev,
++ obj->hdr_chunk / dev->param.chunks_per_block,
++ obj->hdr_chunk % dev->param.chunks_per_block);
++ chunk_wrongly_deleted = chunk_in_range && !chunk_valid;
++
++ if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted))
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d has chunk_id %d %s %s",
++ obj->obj_id, obj->hdr_chunk,
++ chunk_id_ok ? "" : ",out of range",
++ chunk_wrongly_deleted ? ",marked as deleted" : "");
++
++ if (chunk_valid && !yaffs_skip_nand_verification(dev)) {
++ struct yaffs_ext_tags tags;
++ struct yaffs_obj_hdr *oh;
++ u8 *buffer = yaffs_get_temp_buffer(dev);
++
++ oh = (struct yaffs_obj_hdr *)buffer;
++
++ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags);
++
++ yaffs_verify_oh(obj, oh, &tags, 1);
++
++ yaffs_release_temp_buffer(dev, buffer);
++ }
++
++ /* Verify it has a parent */
++ if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) {
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d has parent pointer %p which does not look like an object",
++ obj->obj_id, obj->parent);
++ }
++
++ /* Verify parent is a directory */
++ if (obj->parent &&
++ obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d's parent is not a directory (type %d)",
++ obj->obj_id, obj->parent->variant_type);
++ }
++
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ yaffs_verify_file(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ yaffs_verify_symlink(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ yaffs_verify_dir(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ yaffs_verify_link(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ yaffs_verify_special(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ default:
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d has illegaltype %d",
++ obj->obj_id, obj->variant_type);
++ break;
++ }
++}
++
++void yaffs_verify_objects(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj;
++ int i;
++ struct list_head *lh;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ /* Iterate through the objects in each hash entry */
++
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ list_for_each(lh, &dev->obj_bucket[i].list) {
++ obj = list_entry(lh, struct yaffs_obj, hash_link);
++ yaffs_verify_obj(obj);
++ }
++ }
++}
++
++void yaffs_verify_obj_in_dir(struct yaffs_obj *obj)
++{
++ struct list_head *lh;
++ struct yaffs_obj *list_obj;
++ int count = 0;
++
++ if (!obj) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify");
++ BUG();
++ return;
++ }
++
++ if (yaffs_skip_verification(obj->my_dev))
++ return;
++
++ if (!obj->parent) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent");
++ BUG();
++ return;
++ }
++
++ if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory");
++ BUG();
++ }
++
++ /* Iterate through the objects in each hash entry */
++
++ list_for_each(lh, &obj->parent->variant.dir_variant.children) {
++ list_obj = list_entry(lh, struct yaffs_obj, siblings);
++ yaffs_verify_obj(list_obj);
++ if (obj == list_obj)
++ count++;
++ }
++
++ if (count != 1) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "Object in directory %d times",
++ count);
++ BUG();
++ }
++}
++
++void yaffs_verify_dir(struct yaffs_obj *directory)
++{
++ struct list_head *lh;
++ struct yaffs_obj *list_obj;
++
++ if (!directory) {
++ BUG();
++ return;
++ }
++
++ if (yaffs_skip_full_verification(directory->my_dev))
++ return;
++
++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "Directory has wrong type: %d",
++ directory->variant_type);
++ BUG();
++ }
++
++ /* Iterate through the objects in each hash entry */
++
++ list_for_each(lh, &directory->variant.dir_variant.children) {
++ list_obj = list_entry(lh, struct yaffs_obj, siblings);
++ if (list_obj->parent != directory) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "Object in directory list has wrong parent %p",
++ list_obj->parent);
++ BUG();
++ }
++ yaffs_verify_obj_in_dir(list_obj);
++ }
++}
++
++static int yaffs_free_verification_failures;
++
++void yaffs_verify_free_chunks(struct yaffs_dev *dev)
++{
++ int counted;
++ int difference;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ counted = yaffs_count_free_chunks(dev);
++
++ difference = dev->n_free_chunks - counted;
++
++ if (difference) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "Freechunks verification failure %d %d %d",
++ dev->n_free_chunks, counted, difference);
++ yaffs_free_verification_failures++;
++ }
++}
++
++int yaffs_verify_file_sane(struct yaffs_obj *in)
++{
++ (void) in;
++ return YAFFS_OK;
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_verify.h linux-3.15-rc5/fs/yaffs2/yaffs_verify.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_verify.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_verify.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,43 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_VERIFY_H__
++#define __YAFFS_VERIFY_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi,
++ int n);
++void yaffs_verify_collected_blk(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi, int n);
++void yaffs_verify_blocks(struct yaffs_dev *dev);
++
++void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
++ struct yaffs_ext_tags *tags, int parent_check);
++void yaffs_verify_file(struct yaffs_obj *obj);
++void yaffs_verify_link(struct yaffs_obj *obj);
++void yaffs_verify_symlink(struct yaffs_obj *obj);
++void yaffs_verify_special(struct yaffs_obj *obj);
++void yaffs_verify_obj(struct yaffs_obj *obj);
++void yaffs_verify_objects(struct yaffs_dev *dev);
++void yaffs_verify_obj_in_dir(struct yaffs_obj *obj);
++void yaffs_verify_dir(struct yaffs_obj *directory);
++void yaffs_verify_free_chunks(struct yaffs_dev *dev);
++
++int yaffs_verify_file_sane(struct yaffs_obj *obj);
++
++int yaffs_skip_verification(struct yaffs_dev *dev);
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_vfs.c linux-3.15-rc5/fs/yaffs2/yaffs_vfs.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_vfs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_vfs.c 2014-05-17 02:52:54.000000000 +0200
+@@ -0,0 +1,3604 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ * Acknowledgements:
++ * Luc van OostenRyck for numerous patches.
++ * Nick Bane for numerous patches.
++ * Nick Bane for 2.5/2.6 integration.
++ * Andras Toth for mknod rdev issue.
++ * Michael Fischer for finding the problem with inode inconsistency.
++ * Some code bodily lifted from JFFS
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ *
++ * This is the file system front-end to YAFFS that hooks it up to
++ * the VFS.
++ *
++ * Special notes:
++ * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with
++ * this superblock
++ * >> 2.6: sb->s_fs_info points to the struct yaffs_dev associated with this
++ * superblock
++ * >> inode->u.generic_ip points to the associated struct yaffs_obj.
++ */
++
++/*
++ * There are two variants of the VFS glue code. This variant should compile
++ * for any version of Linux.
++ */
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10))
++#define YAFFS_COMPILE_BACKGROUND
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23))
++#define YAFFS_COMPILE_FREEZER
++#endif
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
++#define YAFFS_COMPILE_EXPORTFS
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
++#define YAFFS_USE_SETATTR_COPY
++#define YAFFS_USE_TRUNCATE_SETSIZE
++#endif
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
++#define YAFFS_HAS_EVICT_INODE
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++#define YAFFS_NEW_FOLLOW_LINK 1
++#else
++#define YAFFS_NEW_FOLLOW_LINK 0
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
++#define YAFFS_HAS_WRITE_SUPER
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++#include <linux/config.h>
++#endif
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
++#include <linux/smp_lock.h>
++#endif
++#include <linux/pagemap.h>
++#include <linux/mtd/mtd.h>
++#include <linux/interrupt.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++#include <linux/namei.h>
++#endif
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++#include <linux/exportfs.h>
++#endif
++
++#ifdef YAFFS_COMPILE_BACKGROUND
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#endif
++#ifdef YAFFS_COMPILE_FREEZER
++#include <linux/freezer.h>
++#endif
++
++#include <asm/div64.h>
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++#include <linux/statfs.h>
++
++#define UnlockPage(p) unlock_page(p)
++#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
++
++/* FIXME: use sb->s_id instead ? */
++#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
++
++#else
++
++#include <linux/locks.h>
++#define BDEVNAME_SIZE 0
++#define yaffs_devname(sb, buf) kdevname(sb->s_dev)
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
++/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
++#define __user
++#endif
++
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
++#define YPROC_ROOT (&proc_root)
++#else
++#define YPROC_ROOT NULL
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
++#define Y_INIT_TIMER(a) init_timer(a)
++#else
++#define Y_INIT_TIMER(a) init_timer_on_stack(a)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
++#define YAFFS_USE_WRITE_BEGIN_END 1
++#else
++#define YAFFS_USE_WRITE_BEGIN_END 0
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
++#define YAFFS_SUPER_HAS_DIRTY
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
++#define set_nlink(inode, count) do { (inode)->i_nlink = (count); } while(0)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
++static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
++{
++ uint64_t result = partition_size;
++ do_div(result, block_size);
++ return (uint32_t) result;
++}
++#else
++#define YCALCBLOCKS(s, b) ((s)/(b))
++#endif
++
++#include <linux/uaccess.h>
++#include <linux/mtd/mtd.h>
++
++#include "yportenv.h"
++#include "yaffs_trace.h"
++#include "yaffs_guts.h"
++#include "yaffs_attribs.h"
++
++#include "yaffs_linux.h"
++
++#include "yaffs_mtdif.h"
++#include "yaffs_packedtags2.h"
++#include "yaffs_getblockinfo.h"
++
++unsigned int yaffs_trace_mask =
++ YAFFS_TRACE_BAD_BLOCKS |
++ YAFFS_TRACE_ALWAYS |
++ 0;
++
++unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
++unsigned int yaffs_auto_checkpoint = 1;
++unsigned int yaffs_gc_control = 1;
++unsigned int yaffs_bg_enable = 1;
++unsigned int yaffs_auto_select = 1;
++/* Module Parameters */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++module_param(yaffs_trace_mask, uint, 0644);
++module_param(yaffs_wr_attempts, uint, 0644);
++module_param(yaffs_auto_checkpoint, uint, 0644);
++module_param(yaffs_gc_control, uint, 0644);
++module_param(yaffs_bg_enable, uint, 0644);
++#else
++MODULE_PARM(yaffs_trace_mask, "i");
++MODULE_PARM(yaffs_wr_attempts, "i");
++MODULE_PARM(yaffs_auto_checkpoint, "i");
++MODULE_PARM(yaffs_gc_control, "i");
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
++/* use iget and read_inode */
++#define Y_IGET(sb, inum) iget((sb), (inum))
++
++#else
++/* Call local equivalent */
++#define YAFFS_USE_OWN_IGET
++#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
++
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++#define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private)
++#else
++#define yaffs_inode_to_obj_lv(iptr) ((iptr)->u.generic_ip)
++#endif
++
++#define yaffs_inode_to_obj(iptr) \
++ ((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr)))
++#define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode)
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->s_fs_info)
++#else
++#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->u.generic_sbp)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
++#define Y_CLEAR_INODE(i) clear_inode(i)
++#else
++#define Y_CLEAR_INODE(i) end_writeback(i)
++#endif
++
++
++#define update_dir_time(dir) do {\
++ (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
++ } while (0)
++
++static void yaffs_fill_inode_from_obj(struct inode *inode,
++ struct yaffs_obj *obj);
++
++
++static void yaffs_gross_lock(struct yaffs_dev *dev)
++{
++ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current);
++ mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock));
++ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current);
++}
++
++static void yaffs_gross_unlock(struct yaffs_dev *dev)
++{
++ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current);
++ mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock));
++}
++
++
++static int yaffs_readpage_nolock(struct file *f, struct page *pg)
++{
++ /* Lifted from jffs2 */
++
++ struct yaffs_obj *obj;
++ unsigned char *pg_buf;
++ int ret;
++ loff_t pos = ((loff_t) pg->index) << PAGE_CACHE_SHIFT;
++ struct yaffs_dev *dev;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readpage_nolock at %lld, size %08x",
++ (long long)pos,
++ (unsigned)PAGE_CACHE_SIZE);
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ BUG_ON(!PageLocked(pg));
++#else
++ if (!PageLocked(pg))
++ PAGE_BUG(pg);
++#endif
++
++ pg_buf = kmap(pg);
++ /* FIXME: Can kmap fail? */
++
++ yaffs_gross_lock(dev);
++
++ ret = yaffs_file_rd(obj, pg_buf, pos, PAGE_CACHE_SIZE);
++
++ yaffs_gross_unlock(dev);
++
++ if (ret >= 0)
++ ret = 0;
++
++ if (ret) {
++ ClearPageUptodate(pg);
++ SetPageError(pg);
++ } else {
++ SetPageUptodate(pg);
++ ClearPageError(pg);
++ }
++
++ flush_dcache_page(pg);
++ kunmap(pg);
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done");
++ return ret;
++}
++
++static int yaffs_readpage_unlock(struct file *f, struct page *pg)
++{
++ int ret = yaffs_readpage_nolock(f, pg);
++ UnlockPage(pg);
++ return ret;
++}
++
++static int yaffs_readpage(struct file *f, struct page *pg)
++{
++ int ret;
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage");
++ ret = yaffs_readpage_unlock(f, pg);
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done");
++ return ret;
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
++#define YCRED_FSUID() from_kuid(&init_user_ns, current_fsuid())
++#define YCRED_FSGID() from_kgid(&init_user_ns, current_fsgid())
++#else
++#define YCRED_FSUID() YCRED(current)->fsuid
++#define YCRED_FSGID() YCRED(current)->fsgid
++
++static inline uid_t i_uid_read(const struct inode *inode)
++{
++ return inode->i_uid;
++}
++
++static inline gid_t i_gid_read(const struct inode *inode)
++{
++ return inode->i_gid;
++}
++
++static inline void i_uid_write(struct inode *inode, uid_t uid)
++{
++ inode->i_uid = uid;
++}
++
++static inline void i_gid_write(struct inode *inode, gid_t gid)
++{
++ inode->i_gid = gid;
++}
++#endif
++
++static void yaffs_set_super_dirty_val(struct yaffs_dev *dev, int val)
++{
++ struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
++
++ if (lc)
++ lc->dirty = val;
++
++# ifdef YAFFS_SUPER_HAS_DIRTY
++ {
++ struct super_block *sb = lc->super;
++
++ if (sb)
++ sb->s_dirt = val;
++ }
++#endif
++
++}
++
++static void yaffs_set_super_dirty(struct yaffs_dev *dev)
++{
++ yaffs_set_super_dirty_val(dev, 1);
++}
++
++static void yaffs_clear_super_dirty(struct yaffs_dev *dev)
++{
++ yaffs_set_super_dirty_val(dev, 0);
++}
++
++static int yaffs_check_super_dirty(struct yaffs_dev *dev)
++{
++ struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
++
++ if (lc && lc->dirty)
++ return 1;
++
++# ifdef YAFFS_SUPER_HAS_DIRTY
++ {
++ struct super_block *sb = lc->super;
++
++ if (sb && sb->s_dirt)
++ return 1;
++ }
++#endif
++ return 0;
++
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
++#else
++static int yaffs_writepage(struct page *page)
++#endif
++{
++ struct yaffs_dev *dev;
++ struct address_space *mapping = page->mapping;
++ struct inode *inode;
++ unsigned long end_index;
++ char *buffer;
++ struct yaffs_obj *obj;
++ int n_written = 0;
++ unsigned n_bytes;
++ loff_t i_size;
++
++ if (!mapping)
++ BUG();
++ inode = mapping->host;
++ if (!inode)
++ BUG();
++ i_size = i_size_read(inode);
++
++ end_index = i_size >> PAGE_CACHE_SHIFT;
++
++ if (page->index < end_index)
++ n_bytes = PAGE_CACHE_SIZE;
++ else {
++ n_bytes = i_size & (PAGE_CACHE_SIZE - 1);
++
++ if (page->index > end_index || !n_bytes) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_writepage at %lld, inode size = %lld!!",
++ ((loff_t)page->index) << PAGE_CACHE_SHIFT,
++ inode->i_size);
++ yaffs_trace(YAFFS_TRACE_OS,
++ " -> don't care!!");
++
++ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
++ set_page_writeback(page);
++ unlock_page(page);
++ end_page_writeback(page);
++ return 0;
++ }
++ }
++
++ if (n_bytes != PAGE_CACHE_SIZE)
++ zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE);
++
++ get_page(page);
++
++ buffer = kmap(page);
++
++ obj = yaffs_inode_to_obj(inode);
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_writepage at %lld, size %08x",
++ ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes);
++ yaffs_trace(YAFFS_TRACE_OS,
++ "writepag0: obj = %lld, ino = %lld",
++ obj->variant.file_variant.file_size, inode->i_size);
++
++ n_written = yaffs_wr_file(obj, buffer,
++ ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes, 0);
++
++ yaffs_set_super_dirty(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "writepag1: obj = %lld, ino = %lld",
++ obj->variant.file_variant.file_size, inode->i_size);
++
++ yaffs_gross_unlock(dev);
++
++ kunmap(page);
++ set_page_writeback(page);
++ unlock_page(page);
++ end_page_writeback(page);
++ put_page(page);
++
++ return (n_written == n_bytes) ? 0 : -ENOSPC;
++}
++
++/* Space holding and freeing is done to ensure we have space available for write_begin/end */
++/* For now we just assume few parallel writes and check against a small number. */
++/* Todo: need to do this with a counter to handle parallel reads better */
++
++static ssize_t yaffs_hold_space(struct file *f)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++
++ int n_free_chunks;
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ n_free_chunks = yaffs_get_n_free_chunks(dev);
++
++ yaffs_gross_unlock(dev);
++
++ return (n_free_chunks > 20) ? 1 : 0;
++}
++
++static void yaffs_release_space(struct file *f)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ yaffs_gross_unlock(dev);
++}
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned flags,
++ struct page **pagep, void **fsdata)
++{
++ struct page *pg = NULL;
++ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
++
++ int ret = 0;
++ int space_held = 0;
++
++ /* Get a page */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ pg = grab_cache_page_write_begin(mapping, index, flags);
++#else
++ pg = __grab_cache_page(mapping, index);
++#endif
++
++ *pagep = pg;
++ if (!pg) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ yaffs_trace(YAFFS_TRACE_OS,
++ "start yaffs_write_begin index %d(%x) uptodate %d",
++ (int)index, (int)index, Page_Uptodate(pg) ? 1 : 0);
++
++ /* Get fs space */
++ space_held = yaffs_hold_space(filp);
++
++ if (!space_held) {
++ ret = -ENOSPC;
++ goto out;
++ }
++
++ /* Update page if required */
++
++ if (!Page_Uptodate(pg))
++ ret = yaffs_readpage_nolock(filp, pg);
++
++ if (ret)
++ goto out;
++
++ /* Happy path return */
++ yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok");
++
++ return 0;
++
++out:
++ yaffs_trace(YAFFS_TRACE_OS,
++ "end yaffs_write_begin fail returning %d", ret);
++ if (space_held)
++ yaffs_release_space(filp);
++ if (pg) {
++ unlock_page(pg);
++ page_cache_release(pg);
++ }
++ return ret;
++}
++
++#else
++
++static int yaffs_prepare_write(struct file *f, struct page *pg,
++ unsigned offset, unsigned to)
++{
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_prepair_write");
++
++ if (!Page_Uptodate(pg))
++ return yaffs_readpage_nolock(f, pg);
++ return 0;
++}
++#endif
++
++
++static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
++ loff_t * pos)
++{
++ struct yaffs_obj *obj;
++ int n_written;
++ loff_t ipos;
++ struct inode *inode;
++ struct yaffs_dev *dev;
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ if (!obj) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_file_write: hey obj is null!");
++ return -EINVAL;
++ }
++
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ inode = f->f_dentry->d_inode;
++
++ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
++ ipos = inode->i_size;
++ else
++ ipos = *pos;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_file_write about to write writing %u(%x) bytes to object %d at %lld",
++ (unsigned)n, (unsigned)n, obj->obj_id, ipos);
++
++ n_written = yaffs_wr_file(obj, buf, ipos, n, 0);
++
++ yaffs_set_super_dirty(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_file_write: %d(%x) bytes written",
++ (unsigned)n, (unsigned)n);
++
++ if (n_written > 0) {
++ ipos += n_written;
++ *pos = ipos;
++ if (ipos > inode->i_size) {
++ inode->i_size = ipos;
++ inode->i_blocks = (ipos + 511) >> 9;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_file_write size updated to %lld bytes, %d blocks",
++ ipos, (int)(inode->i_blocks));
++ }
++
++ }
++ yaffs_gross_unlock(dev);
++ return (n_written == 0) && (n > 0) ? -ENOSPC : n_written;
++}
++
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned copied,
++ struct page *pg, void *fsdadata)
++{
++ int ret = 0;
++ void *addr, *kva;
++ uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
++
++ kva = kmap(pg);
++ addr = kva + offset_into_page;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_write_end addr %p pos %lld n_bytes %d",
++ addr, pos, copied);
++
++ ret = yaffs_file_write(filp, addr, copied, &pos);
++
++ if (ret != copied) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_write_end not same size ret %d copied %d",
++ ret, copied);
++ SetPageError(pg);
++ }
++
++ kunmap(pg);
++
++ yaffs_release_space(filp);
++ unlock_page(pg);
++ page_cache_release(pg);
++ return ret;
++}
++#else
++
++static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
++ unsigned to)
++{
++ void *addr, *kva;
++
++ loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
++ int n_bytes = to - offset;
++ int n_written;
++
++ kva = kmap(pg);
++ addr = kva + offset;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_commit_write addr %p pos %lld n_bytes %d",
++ addr, pos, n_bytes);
++
++ n_written = yaffs_file_write(f, addr, n_bytes, &pos);
++
++ if (n_written != n_bytes) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_commit_write not same size n_written %d n_bytes %d",
++ n_written, n_bytes);
++ SetPageError(pg);
++ }
++ kunmap(pg);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_commit_write returning %d",
++ n_written == n_bytes ? 0 : n_written);
++
++ return n_written == n_bytes ? 0 : n_written;
++}
++#endif
++
++static struct address_space_operations yaffs_file_address_operations = {
++ .readpage = yaffs_readpage,
++ .writepage = yaffs_writepage,
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++ .write_begin = yaffs_write_begin,
++ .write_end = yaffs_write_end,
++#else
++ .prepare_write = yaffs_prepare_write,
++ .commit_write = yaffs_commit_write,
++#endif
++};
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_file_flush(struct file *file, fl_owner_t id)
++#else
++static int yaffs_file_flush(struct file *file)
++#endif
++{
++ struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry);
++
++ struct yaffs_dev *dev = obj->my_dev;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_file_flush object %d (%s)",
++ obj->obj_id,
++ obj->dirty ? "dirty" : "clean");
++
++ yaffs_gross_lock(dev);
++
++ yaffs_flush_file(obj, 1, 0);
++
++ yaffs_gross_unlock(dev);
++
++ return 0;
++}
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++static int yaffs_sync_object(struct file *file, int datasync)
++#else
++static int yaffs_sync_object(struct file *file, struct dentry *dentry,
++ int datasync)
++#endif
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++ struct dentry *dentry = file->f_path.dentry;
++#endif
++
++ obj = yaffs_dentry_to_obj(dentry);
++
++ dev = obj->my_dev;
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
++ "yaffs_sync_object");
++ yaffs_gross_lock(dev);
++ yaffs_flush_file(obj, 1, datasync);
++ yaffs_gross_unlock(dev);
++ return 0;
++}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
++static const struct file_operations yaffs_file_operations = {
++ .read = do_sync_read,
++ .write = do_sync_write,
++ .aio_read = generic_file_aio_read,
++ .aio_write = generic_file_aio_write,
++ .mmap = generic_file_mmap,
++ .flush = yaffs_file_flush,
++ .fsync = yaffs_sync_object,
++ .splice_read = generic_file_splice_read,
++ .splice_write = generic_file_splice_write,
++ .llseek = generic_file_llseek,
++};
++
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++
++static const struct file_operations yaffs_file_operations = {
++ .read = do_sync_read,
++ .write = do_sync_write,
++ .aio_read = generic_file_aio_read,
++ .aio_write = generic_file_aio_write,
++ .mmap = generic_file_mmap,
++ .flush = yaffs_file_flush,
++ .fsync = yaffs_sync_object,
++ .sendfile = generic_file_sendfile,
++};
++
++#else
++
++static const struct file_operations yaffs_file_operations = {
++ .read = generic_file_read,
++ .write = generic_file_write,
++ .mmap = generic_file_mmap,
++ .flush = yaffs_file_flush,
++ .fsync = yaffs_sync_object,
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ .sendfile = generic_file_sendfile,
++#endif
++};
++#endif
++
++
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
++static void zero_user_segment(struct page *page, unsigned start, unsigned end)
++{
++ void *kaddr = kmap_atomic(page, KM_USER0);
++ memset(kaddr + start, 0, end - start);
++ kunmap_atomic(kaddr, KM_USER0);
++ flush_dcache_page(page);
++}
++#endif
++
++
++static int yaffs_vfs_setsize(struct inode *inode, loff_t newsize)
++{
++#ifdef YAFFS_USE_TRUNCATE_SETSIZE
++ truncate_setsize(inode, newsize);
++ return 0;
++#else
++ truncate_inode_pages(&inode->i_data, newsize);
++ return 0;
++#endif
++
++}
++
++
++static int yaffs_vfs_setattr(struct inode *inode, struct iattr *attr)
++{
++#ifdef YAFFS_USE_SETATTR_COPY
++ setattr_copy(inode, attr);
++ return 0;
++#else
++ return inode_setattr(inode, attr);
++#endif
++
++}
++
++static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ struct yaffs_dev *dev;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_setattr of object %d",
++ yaffs_inode_to_obj(inode)->obj_id);
++#if 0
++ /* Fail if a requested resize >= 2GB */
++ if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31))
++ error = -EINVAL;
++#endif
++
++ if (error == 0)
++ error = inode_change_ok(inode, attr);
++ if (error == 0) {
++ int result;
++ if (!error) {
++ error = yaffs_vfs_setattr(inode, attr);
++ yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called");
++ if (attr->ia_valid & ATTR_SIZE) {
++ yaffs_vfs_setsize(inode, attr->ia_size);
++ inode->i_blocks = (inode->i_size + 511) >> 9;
++ }
++ }
++ dev = yaffs_inode_to_obj(inode)->my_dev;
++ if (attr->ia_valid & ATTR_SIZE) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "resize to %d(%x)",
++ (int)(attr->ia_size),
++ (int)(attr->ia_size));
++ }
++ yaffs_gross_lock(dev);
++ result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr);
++ if (result == YAFFS_OK) {
++ error = 0;
++ } else {
++ error = -EPERM;
++ }
++ yaffs_gross_unlock(dev);
++
++ }
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error);
++
++ return error;
++}
++
++static int yaffs_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ struct yaffs_dev *dev;
++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id);
++
++ if (error == 0) {
++ int result;
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ result = yaffs_set_xattrib(obj, name, value, size, flags);
++ if (result == YAFFS_OK)
++ error = 0;
++ else if (result < 0)
++ error = result;
++ yaffs_gross_unlock(dev);
++
++ }
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error);
++
++ return error;
++}
++
++static ssize_t yaffs_getxattr(struct dentry * dentry, const char *name,
++ void *buff, size_t size)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ struct yaffs_dev *dev;
++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_getxattr \"%s\" from object %d",
++ name, obj->obj_id);
++
++ if (error == 0) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ error = yaffs_get_xattrib(obj, name, buff, size);
++ yaffs_gross_unlock(dev);
++
++ }
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error);
++
++ return error;
++}
++
++static int yaffs_removexattr(struct dentry *dentry, const char *name)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ struct yaffs_dev *dev;
++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_removexattr of object %d", obj->obj_id);
++
++ if (error == 0) {
++ int result;
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ result = yaffs_remove_xattrib(obj, name);
++ if (result == YAFFS_OK)
++ error = 0;
++ else if (result < 0)
++ error = result;
++ yaffs_gross_unlock(dev);
++
++ }
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_removexattr done returning %d", error);
++
++ return error;
++}
++
++static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ struct yaffs_dev *dev;
++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_listxattr of object %d", obj->obj_id);
++
++ if (error == 0) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ error = yaffs_list_xattrib(obj, buff, size);
++ yaffs_gross_unlock(dev);
++
++ }
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_listxattr done returning %d", error);
++
++ return error;
++}
++
++
++static const struct inode_operations yaffs_file_inode_operations = {
++ .setattr = yaffs_setattr,
++ .setxattr = yaffs_setxattr,
++ .getxattr = yaffs_getxattr,
++ .listxattr = yaffs_listxattr,
++ .removexattr = yaffs_removexattr,
++};
++
++
++static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
++ int buflen)
++{
++ unsigned char *alias;
++ int ret;
++
++ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
++
++ yaffs_gross_unlock(dev);
++
++ if (!alias)
++ return -ENOMEM;
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
++ ret = readlink_copy(buffer, buflen, alias);
++#else
++ ret = vfs_readlink(dentry, buffer, buflen, alias);
++#endif
++ kfree(alias);
++ return ret;
++}
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++ void *ret;
++#else
++static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++ int ret
++#endif
++ unsigned char *alias;
++ int ret_int = 0;
++ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
++ yaffs_gross_unlock(dev);
++
++ if (!alias) {
++ ret_int = -ENOMEM;
++ goto out;
++ }
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++ nd_set_link(nd, alias);
++ ret = alias;
++out:
++ if (ret_int)
++ ret = ERR_PTR(ret_int);
++ return ret;
++#else
++ ret = vfs_follow_link(nd, alias);
++ kfree(alias);
++out:
++ if (ret_int)
++ ret = ret_int;
++ return ret;
++#endif
++}
++
++
++#ifdef YAFFS_HAS_PUT_INODE
++
++/* For now put inode is just for debugging
++ * Put inode is called when the inode **structure** is put.
++ */
++static void yaffs_put_inode(struct inode *inode)
++{
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_put_inode: ino %d, count %d"),
++ (int)inode->i_ino, atomic_read(&inode->i_count);
++
++}
++#endif
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias)
++{
++ kfree(alias);
++}
++#endif
++
++static const struct inode_operations yaffs_symlink_inode_operations = {
++ .readlink = yaffs_readlink,
++ .follow_link = yaffs_follow_link,
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++ .put_link = yaffs_put_link,
++#endif
++ .setattr = yaffs_setattr,
++ .setxattr = yaffs_setxattr,
++ .getxattr = yaffs_getxattr,
++ .listxattr = yaffs_listxattr,
++ .removexattr = yaffs_removexattr,
++};
++
++#ifdef YAFFS_USE_OWN_IGET
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
++{
++ struct inode *inode;
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino);
++
++ inode = iget_locked(sb, ino);
++ if (!inode)
++ return ERR_PTR(-ENOMEM);
++ if (!(inode->i_state & I_NEW))
++ return inode;
++
++ /* NB This is called as a side effect of other functions, but
++ * we had to release the lock to prevent deadlocks, so
++ * need to lock again.
++ */
++
++ yaffs_gross_lock(dev);
++
++ obj = yaffs_find_by_number(dev, inode->i_ino);
++
++ yaffs_fill_inode_from_obj(inode, obj);
++
++ yaffs_gross_unlock(dev);
++
++ unlock_new_inode(inode);
++ return inode;
++}
++
++#else
++
++static void yaffs_read_inode(struct inode *inode)
++{
++ /* NB This is called as a side effect of other functions, but
++ * we had to release the lock to prevent deadlocks, so
++ * need to lock again.
++ */
++
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev = yaffs_super_to_dev(inode->i_sb);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_read_inode for %d", (int)inode->i_ino);
++
++ if (current != yaffs_dev_to_lc(dev)->readdir_process)
++ yaffs_gross_lock(dev);
++
++ obj = yaffs_find_by_number(dev, inode->i_ino);
++
++ yaffs_fill_inode_from_obj(inode, obj);
++
++ if (current != yaffs_dev_to_lc(dev)->readdir_process)
++ yaffs_gross_unlock(dev);
++}
++
++#endif
++
++
++
++struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
++ struct yaffs_obj *obj)
++{
++ struct inode *inode;
++
++ if (!sb) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_get_inode for NULL super_block!!");
++ return NULL;
++
++ }
++
++ if (!obj) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_get_inode for NULL object!!");
++ return NULL;
++
++ }
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_get_inode for object %d", obj->obj_id);
++
++ inode = Y_IGET(sb, obj->obj_id);
++ if (IS_ERR(inode))
++ return NULL;
++
++ /* NB Side effect: iget calls back to yaffs_read_inode(). */
++ /* iget also increments the inode's i_count */
++ /* NB You can't be holding gross_lock or deadlock will happen! */
++
++ return inode;
++}
++
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++#define YCRED(x) x
++#else
++#define YCRED(x) (x->cred)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
++ dev_t rdev)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ dev_t rdev)
++#else
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ int rdev)
++#endif
++{
++ struct inode *inode;
++
++ struct yaffs_obj *obj = NULL;
++ struct yaffs_dev *dev;
++
++ struct yaffs_obj *parent = yaffs_inode_to_obj(dir);
++
++ int error = -ENOSPC;
++ uid_t uid = YCRED_FSUID();
++ gid_t gid =
++ (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID();
++
++ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
++ mode |= S_ISGID;
++
++ if (parent) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_mknod: parent object %d type %d",
++ parent->obj_id, parent->variant_type);
++ } else {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_mknod: could not get parent object");
++ return -EPERM;
++ }
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_mknod: making oject for %s, mode %x dev %x",
++ dentry->d_name.name, mode, rdev);
++
++ dev = parent->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ switch (mode & S_IFMT) {
++ default:
++ /* Special (socket, fifo, device...) */
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special");
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ obj =
++ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
++ gid, old_encode_dev(rdev));
++#else
++ obj =
++ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
++ gid, rdev);
++#endif
++ break;
++ case S_IFREG: /* file */
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file");
++ obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
++ gid);
++ break;
++ case S_IFDIR: /* directory */
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory");
++ obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
++ uid, gid);
++ break;
++ case S_IFLNK: /* symlink */
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink");
++ obj = NULL; /* Do we ever get here? */
++ break;
++ }
++
++ /* Can not call yaffs_get_inode() with gross lock held */
++ yaffs_gross_unlock(dev);
++
++ if (obj) {
++ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
++ d_instantiate(dentry, inode);
++ update_dir_time(dir);
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_mknod created object %d count = %d",
++ obj->obj_id, atomic_read(&inode->i_count));
++ error = 0;
++ yaffs_fill_inode_from_obj(dir, parent);
++ } else {
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object");
++ error = -ENOMEM;
++ }
++
++ return error;
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
++#else
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++#endif
++{
++ int ret_val;
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mkdir");
++ ret_val = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
++ return ret_val;
++}
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
++ bool dummy)
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
++ struct nameidata *n)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
++ struct nameidata *n)
++#else
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
++#endif
++{
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_create");
++ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++ unsigned int dummy)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++ struct nameidata *n)
++#else
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
++#endif
++{
++ struct yaffs_obj *obj;
++ struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */
++
++ struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev;
++
++ if (current != yaffs_dev_to_lc(dev)->readdir_process)
++ yaffs_gross_lock(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup for %d:%s",
++ yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name);
++
++ obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name);
++
++ obj = yaffs_get_equivalent_obj(obj); /* in case it was a hardlink */
++
++ /* Can't hold gross lock when calling yaffs_get_inode() */
++ if (current != yaffs_dev_to_lc(dev)->readdir_process)
++ yaffs_gross_unlock(dev);
++
++ if (obj) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_lookup found %d", obj->obj_id);
++
++ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++ } else {
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found");
++
++ }
++
++/* added NCB for 2.5/6 compatability - forces add even if inode is
++ * NULL which creates dentry hash */
++ d_add(dentry, inode);
++
++ return NULL;
++}
++
++/*
++ * Create a link...
++ */
++static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
++ struct dentry *dentry)
++{
++ struct inode *inode = old_dentry->d_inode;
++ struct yaffs_obj *obj = NULL;
++ struct yaffs_obj *link = NULL;
++ struct yaffs_dev *dev;
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_link");
++
++ obj = yaffs_inode_to_obj(inode);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
++ link =
++ yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name,
++ obj);
++
++ if (link) {
++ set_nlink(old_dentry->d_inode, yaffs_get_obj_link_count(obj));
++ d_instantiate(dentry, old_dentry->d_inode);
++ atomic_inc(&old_dentry->d_inode->i_count);
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_link link count %d i_count %d",
++ old_dentry->d_inode->i_nlink,
++ atomic_read(&old_dentry->d_inode->i_count));
++ }
++
++ yaffs_gross_unlock(dev);
++
++ if (link) {
++ update_dir_time(dir);
++ return 0;
++ }
++
++ return -EPERM;
++}
++
++static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
++ const char *symname)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++ uid_t uid = YCRED_FSUID();
++ gid_t gid =
++ (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID();
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink");
++
++ if (strnlen(dentry->d_name.name, YAFFS_MAX_NAME_LENGTH + 1) >
++ YAFFS_MAX_NAME_LENGTH)
++ return -ENAMETOOLONG;
++
++ if (strnlen(symname, YAFFS_MAX_ALIAS_LENGTH + 1) >
++ YAFFS_MAX_ALIAS_LENGTH)
++ return -ENAMETOOLONG;
++
++ dev = yaffs_inode_to_obj(dir)->my_dev;
++ yaffs_gross_lock(dev);
++ obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name,
++ S_IFLNK | S_IRWXUGO, uid, gid, symname);
++ yaffs_gross_unlock(dev);
++
++ if (obj) {
++ struct inode *inode;
++
++ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++ d_instantiate(dentry, inode);
++ update_dir_time(dir);
++ yaffs_trace(YAFFS_TRACE_OS, "symlink created OK");
++ return 0;
++ } else {
++ yaffs_trace(YAFFS_TRACE_OS, "symlink not created");
++ }
++
++ return -ENOMEM;
++}
++
++/*
++ * The VFS layer already does all the dentry stuff for rename.
++ *
++ * NB: POSIX says you can rename an object over an old object of the same name
++ */
++static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
++ struct inode *new_dir, struct dentry *new_dentry)
++{
++ struct yaffs_dev *dev;
++ int ret_val = YAFFS_FAIL;
++ struct yaffs_obj *target;
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename");
++ dev = yaffs_inode_to_obj(old_dir)->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ /* Check if the target is an existing directory that is not empty. */
++ target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir),
++ new_dentry->d_name.name);
++
++ if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
++ !list_empty(&target->variant.dir_variant.children)) {
++
++ yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir");
++
++ ret_val = YAFFS_FAIL;
++ } else {
++ /* Now does unlinking internally using shadowing mechanism */
++ yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj");
++
++ ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir),
++ old_dentry->d_name.name,
++ yaffs_inode_to_obj(new_dir),
++ new_dentry->d_name.name);
++ }
++ yaffs_gross_unlock(dev);
++
++ if (ret_val == YAFFS_OK) {
++ if (target)
++ inode_dec_link_count(new_dentry->d_inode);
++
++ update_dir_time(old_dir);
++ if (old_dir != new_dir)
++ update_dir_time(new_dir);
++ return 0;
++ } else {
++ return -ENOTEMPTY;
++ }
++}
++
++
++
++
++static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
++{
++ int ret_val;
++
++ struct yaffs_dev *dev;
++ struct yaffs_obj *obj;
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_unlink %d:%s",
++ (int)(dir->i_ino), dentry->d_name.name);
++ obj = yaffs_inode_to_obj(dir);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ ret_val = yaffs_unlinker(obj, dentry->d_name.name);
++
++ if (ret_val == YAFFS_OK) {
++ inode_dec_link_count(dentry->d_inode);
++ dir->i_version++;
++ yaffs_gross_unlock(dev);
++ update_dir_time(dir);
++ return 0;
++ }
++ yaffs_gross_unlock(dev);
++ return -ENOTEMPTY;
++}
++
++
++
++static const struct inode_operations yaffs_dir_inode_operations = {
++ .create = yaffs_create,
++ .lookup = yaffs_lookup,
++ .link = yaffs_link,
++ .unlink = yaffs_unlink,
++ .symlink = yaffs_symlink,
++ .mkdir = yaffs_mkdir,
++ .rmdir = yaffs_unlink,
++ .mknod = yaffs_mknod,
++ .rename = yaffs_rename,
++ .setattr = yaffs_setattr,
++ .setxattr = yaffs_setxattr,
++ .getxattr = yaffs_getxattr,
++ .listxattr = yaffs_listxattr,
++ .removexattr = yaffs_removexattr,
++};
++
++/*-----------------------------------------------------------------*/
++/* Directory search context allows us to unlock access to yaffs during
++ * filldir without causing problems with the directory being modified.
++ * This is similar to the tried and tested mechanism used in yaffs direct.
++ *
++ * A search context iterates along a doubly linked list of siblings in the
++ * directory. If the iterating object is deleted then this would corrupt
++ * the list iteration, likely causing a crash. The search context avoids
++ * this by using the remove_obj_fn to move the search context to the
++ * next object before the object is deleted.
++ *
++ * Many readdirs (and thus seach conexts) may be alive simulateously so
++ * each struct yaffs_dev has a list of these.
++ *
++ * A seach context lives for the duration of a readdir.
++ *
++ * All these functions must be called while yaffs is locked.
++ */
++
++struct yaffs_search_context {
++ struct yaffs_dev *dev;
++ struct yaffs_obj *dir_obj;
++ struct yaffs_obj *next_return;
++ struct list_head others;
++};
++
++/*
++ * yaffs_new_search() creates a new search context, initialises it and
++ * adds it to the device's search context list.
++ *
++ * Called at start of readdir.
++ */
++static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir)
++{
++ struct yaffs_dev *dev = dir->my_dev;
++ struct yaffs_search_context *sc =
++ kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS);
++ if (sc) {
++ sc->dir_obj = dir;
++ sc->dev = dev;
++ if (list_empty(&sc->dir_obj->variant.dir_variant.children))
++ sc->next_return = NULL;
++ else
++ sc->next_return =
++ list_entry(dir->variant.dir_variant.children.next,
++ struct yaffs_obj, siblings);
++ INIT_LIST_HEAD(&sc->others);
++ list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts));
++ }
++ return sc;
++}
++
++/*
++ * yaffs_search_end() disposes of a search context and cleans up.
++ */
++static void yaffs_search_end(struct yaffs_search_context *sc)
++{
++ if (sc) {
++ list_del(&sc->others);
++ kfree(sc);
++ }
++}
++
++/*
++ * yaffs_search_advance() moves a search context to the next object.
++ * Called when the search iterates or when an object removal causes
++ * the search context to be moved to the next object.
++ */
++static void yaffs_search_advance(struct yaffs_search_context *sc)
++{
++ if (!sc)
++ return;
++
++ if (sc->next_return == NULL ||
++ list_empty(&sc->dir_obj->variant.dir_variant.children))
++ sc->next_return = NULL;
++ else {
++ struct list_head *next = sc->next_return->siblings.next;
++
++ if (next == &sc->dir_obj->variant.dir_variant.children)
++ sc->next_return = NULL; /* end of list */
++ else
++ sc->next_return =
++ list_entry(next, struct yaffs_obj, siblings);
++ }
++}
++
++/*
++ * yaffs_remove_obj_callback() is called when an object is unlinked.
++ * We check open search contexts and advance any which are currently
++ * on the object being iterated.
++ */
++static void yaffs_remove_obj_callback(struct yaffs_obj *obj)
++{
++
++ struct list_head *i;
++ struct yaffs_search_context *sc;
++ struct list_head *search_contexts =
++ &(yaffs_dev_to_lc(obj->my_dev)->search_contexts);
++
++ /* Iterate through the directory search contexts.
++ * If any are currently on the object being removed, then advance
++ * the search context to the next object to prevent a hanging pointer.
++ */
++ list_for_each(i, search_contexts) {
++ sc = list_entry(i, struct yaffs_search_context, others);
++ if (sc->next_return == obj)
++ yaffs_search_advance(sc);
++ }
++
++}
++
++
++/*-----------------------------------------------------------------*/
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
++static int yaffs_readdir(struct file *file, struct dir_context *ctx)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++ struct yaffs_search_context *sc;
++ struct inode *inode = file->f_dentry->d_inode;
++ unsigned long offset, curoffs;
++ struct yaffs_obj *l;
++ int ret_val = 0;
++
++ char name[YAFFS_MAX_NAME_LENGTH + 1];
++
++ obj = yaffs_dentry_to_obj(file->f_dentry);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ yaffs_dev_to_lc(dev)->readdir_process = current;
++
++ offset = ctx->pos;
++
++ sc = yaffs_new_search(obj);
++ if (!sc) {
++ ret_val = -ENOMEM;
++ goto out;
++ }
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: starting at %d", (int)offset);
++
++ if (offset == 0) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: entry . ino %d",
++ (int)inode->i_ino);
++ yaffs_gross_unlock(dev);
++ if (!dir_emit_dot(file, ctx)) {
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++ yaffs_gross_lock(dev);
++ offset++;
++ ctx->pos++;
++ }
++ if (offset == 1) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: entry .. ino %d",
++ (int)file->f_dentry->d_parent->d_inode->i_ino);
++ yaffs_gross_unlock(dev);
++ if (!dir_emit_dotdot(file, ctx)) {
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++ yaffs_gross_lock(dev);
++ offset++;
++ ctx->pos++;
++ }
++
++ curoffs = 1;
++
++ /* If the directory has changed since the open or last call to
++ readdir, rewind to after the 2 canned entries. */
++ if (file->f_version != inode->i_version) {
++ offset = 2;
++ ctx->pos = offset;
++ file->f_version = inode->i_version;
++ }
++
++ while (sc->next_return) {
++ curoffs++;
++ l = sc->next_return;
++ if (curoffs >= offset) {
++ int this_inode = yaffs_get_obj_inode(l);
++ int this_type = yaffs_get_obj_type(l);
++
++ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: %s inode %d",
++ name, yaffs_get_obj_inode(l));
++
++ yaffs_gross_unlock(dev);
++
++ if (!dir_emit(ctx, name, strlen(name),
++ this_inode, this_type) < 0) {
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++
++ yaffs_gross_lock(dev);
++
++ offset++;
++ ctx->pos++;
++ }
++ yaffs_search_advance(sc);
++ }
++
++out:
++ yaffs_search_end(sc);
++ yaffs_dev_to_lc(dev)->readdir_process = NULL;
++ yaffs_gross_unlock(dev);
++
++ return ret_val;
++}
++#else
++static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++ struct yaffs_search_context *sc;
++ struct inode *inode = f->f_dentry->d_inode;
++ unsigned long offset, curoffs;
++ struct yaffs_obj *l;
++ int ret_val = 0;
++
++ char name[YAFFS_MAX_NAME_LENGTH + 1];
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ yaffs_dev_to_lc(dev)->readdir_process = current;
++
++ offset = f->f_pos;
++
++ sc = yaffs_new_search(obj);
++ if (!sc) {
++ ret_val = -ENOMEM;
++ goto out;
++ }
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: starting at %d", (int)offset);
++
++ if (offset == 0) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: entry . ino %d",
++ (int)inode->i_ino);
++ yaffs_gross_unlock(dev);
++ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) {
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++ yaffs_gross_lock(dev);
++ offset++;
++ f->f_pos++;
++ }
++ if (offset == 1) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: entry .. ino %d",
++ (int)f->f_dentry->d_parent->d_inode->i_ino);
++ yaffs_gross_unlock(dev);
++ if (filldir(dirent, "..", 2, offset,
++ f->f_dentry->d_parent->d_inode->i_ino,
++ DT_DIR) < 0) {
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++ yaffs_gross_lock(dev);
++ offset++;
++ f->f_pos++;
++ }
++
++ curoffs = 1;
++
++ /* If the directory has changed since the open or last call to
++ readdir, rewind to after the 2 canned entries. */
++ if (f->f_version != inode->i_version) {
++ offset = 2;
++ f->f_pos = offset;
++ f->f_version = inode->i_version;
++ }
++
++ while (sc->next_return) {
++ curoffs++;
++ l = sc->next_return;
++ if (curoffs >= offset) {
++ int this_inode = yaffs_get_obj_inode(l);
++ int this_type = yaffs_get_obj_type(l);
++
++ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: %s inode %d",
++ name, yaffs_get_obj_inode(l));
++
++ yaffs_gross_unlock(dev);
++
++ if (filldir(dirent,
++ name,
++ strlen(name),
++ offset, this_inode, this_type) < 0) {
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++
++ yaffs_gross_lock(dev);
++
++ offset++;
++ f->f_pos++;
++ }
++ yaffs_search_advance(sc);
++ }
++
++out:
++ yaffs_search_end(sc);
++ yaffs_dev_to_lc(dev)->readdir_process = NULL;
++ yaffs_gross_unlock(dev);
++
++ return ret_val;
++}
++#endif
++
++static const struct file_operations yaffs_dir_operations = {
++ .read = generic_read_dir,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
++ .iterate = yaffs_readdir,
++#else
++ .readdir = yaffs_readdir,
++#endif
++ .fsync = yaffs_sync_object,
++ .llseek = generic_file_llseek,
++};
++
++static void yaffs_fill_inode_from_obj(struct inode *inode,
++ struct yaffs_obj *obj)
++{
++ if (inode && obj) {
++
++ /* Check mode against the variant type and attempt to repair if broken. */
++ u32 mode = obj->yst_mode;
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ if (!S_ISREG(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFREG;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ if (!S_ISLNK(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFLNK;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ if (!S_ISDIR(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFDIR;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ default:
++ /* TODO? */
++ break;
++ }
++
++ inode->i_flags |= S_NOATIME;
++
++ inode->i_ino = obj->obj_id;
++ inode->i_mode = obj->yst_mode;
++ i_uid_write(inode, obj->yst_uid);
++ i_gid_write(inode, obj->yst_gid);
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++ inode->i_blksize = inode->i_sb->s_blocksize;
++#endif
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++ inode->i_rdev = old_decode_dev(obj->yst_rdev);
++ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
++ inode->i_atime.tv_nsec = 0;
++ inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
++ inode->i_mtime.tv_nsec = 0;
++ inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
++ inode->i_ctime.tv_nsec = 0;
++#else
++ inode->i_rdev = obj->yst_rdev;
++ inode->i_atime = obj->yst_atime;
++ inode->i_mtime = obj->yst_mtime;
++ inode->i_ctime = obj->yst_ctime;
++#endif
++ inode->i_size = yaffs_get_obj_length(obj);
++ inode->i_blocks = (inode->i_size + 511) >> 9;
++
++ set_nlink(inode, yaffs_get_obj_link_count(obj));
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_fill_inode mode %x uid %d gid %d size %lld count %d",
++ inode->i_mode, i_uid_read(inode), i_gid_read(inode),
++ inode->i_size, atomic_read(&inode->i_count));
++
++ switch (obj->yst_mode & S_IFMT) {
++ default: /* fifo, device or socket */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ init_special_inode(inode, obj->yst_mode,
++ old_decode_dev(obj->yst_rdev));
++#else
++ init_special_inode(inode, obj->yst_mode,
++ (dev_t) (obj->yst_rdev));
++#endif
++ break;
++ case S_IFREG: /* file */
++ inode->i_op = &yaffs_file_inode_operations;
++ inode->i_fop = &yaffs_file_operations;
++ inode->i_mapping->a_ops =
++ &yaffs_file_address_operations;
++ break;
++ case S_IFDIR: /* directory */
++ inode->i_op = &yaffs_dir_inode_operations;
++ inode->i_fop = &yaffs_dir_operations;
++ break;
++ case S_IFLNK: /* symlink */
++ inode->i_op = &yaffs_symlink_inode_operations;
++ break;
++ }
++
++ yaffs_inode_to_obj_lv(inode) = obj;
++
++ obj->my_inode = inode;
++
++ } else {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_fill_inode invalid parameters");
++ }
++
++}
++
++
++
++/*
++ * yaffs background thread functions .
++ * yaffs_bg_thread_fn() the thread function
++ * yaffs_bg_start() launches the background thread.
++ * yaffs_bg_stop() cleans up the background thread.
++ *
++ * NB:
++ * The thread should only run after the yaffs is initialised
++ * The thread should be stopped before yaffs is unmounted.
++ * The thread should not do any writing while the fs is in read only.
++ */
++
++static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev)
++{
++ unsigned erased_chunks =
++ dev->n_erased_blocks * dev->param.chunks_per_block;
++ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
++ unsigned scattered = 0; /* Free chunks not in an erased block */
++
++ if (erased_chunks < dev->n_free_chunks)
++ scattered = (dev->n_free_chunks - erased_chunks);
++
++ if (!context->bg_running)
++ return 0;
++ else if (scattered < (dev->param.chunks_per_block * 2))
++ return 0;
++ else if (erased_chunks > dev->n_free_chunks / 2)
++ return 0;
++ else if (erased_chunks > dev->n_free_chunks / 4)
++ return 1;
++ else
++ return 2;
++}
++
++#ifdef YAFFS_COMPILE_BACKGROUND
++
++void yaffs_background_waker(unsigned long data)
++{
++ wake_up_process((struct task_struct *)data);
++}
++
++static int yaffs_bg_thread_fn(void *data)
++{
++ struct yaffs_dev *dev = (struct yaffs_dev *)data;
++ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
++ unsigned long now = jiffies;
++ unsigned long next_dir_update = now;
++ unsigned long next_gc = now;
++ unsigned long expires;
++ unsigned int urgency;
++
++ int gc_result;
++ struct timer_list timer;
++
++ yaffs_trace(YAFFS_TRACE_BACKGROUND,
++ "yaffs_background starting for dev %p", (void *)dev);
++
++#ifdef YAFFS_COMPILE_FREEZER
++ set_freezable();
++#endif
++ while (context->bg_running) {
++ yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background");
++
++ if (kthread_should_stop())
++ break;
++
++#ifdef YAFFS_COMPILE_FREEZER
++ if (try_to_freeze())
++ continue;
++#endif
++ yaffs_gross_lock(dev);
++
++ now = jiffies;
++
++ if (time_after(now, next_dir_update) && yaffs_bg_enable) {
++ yaffs_update_dirty_dirs(dev);
++ next_dir_update = now + HZ;
++ }
++
++ if (time_after(now, next_gc) && yaffs_bg_enable) {
++ if (!dev->is_checkpointed) {
++ urgency = yaffs_bg_gc_urgency(dev);
++ gc_result = yaffs_bg_gc(dev, urgency);
++ if (urgency > 1)
++ next_gc = now + HZ / 20 + 1;
++ else if (urgency > 0)
++ next_gc = now + HZ / 10 + 1;
++ else
++ next_gc = now + HZ * 2;
++ } else {
++ /*
++ * gc not running so set to next_dir_update
++ * to cut down on wake ups
++ */
++ next_gc = next_dir_update;
++ }
++ }
++ yaffs_gross_unlock(dev);
++#if 1
++ expires = next_dir_update;
++ if (time_before(next_gc, expires))
++ expires = next_gc;
++ if (time_before(expires, now))
++ expires = now + HZ;
++
++ Y_INIT_TIMER(&timer);
++ timer.expires = expires + 1;
++ timer.data = (unsigned long)current;
++ timer.function = yaffs_background_waker;
++
++ set_current_state(TASK_INTERRUPTIBLE);
++ add_timer(&timer);
++ schedule();
++ del_timer_sync(&timer);
++#else
++ msleep(10);
++#endif
++ }
++
++ return 0;
++}
++
++static int yaffs_bg_start(struct yaffs_dev *dev)
++{
++ int retval = 0;
++ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
++
++ if (dev->read_only)
++ return -1;
++
++ context->bg_running = 1;
++
++ context->bg_thread = kthread_run(yaffs_bg_thread_fn,
++ (void *)dev, "yaffs-bg-%d",
++ context->mount_id);
++
++ if (IS_ERR(context->bg_thread)) {
++ retval = PTR_ERR(context->bg_thread);
++ context->bg_thread = NULL;
++ context->bg_running = 0;
++ }
++ return retval;
++}
++
++static void yaffs_bg_stop(struct yaffs_dev *dev)
++{
++ struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev);
++
++ ctxt->bg_running = 0;
++
++ if (ctxt->bg_thread) {
++ kthread_stop(ctxt->bg_thread);
++ ctxt->bg_thread = NULL;
++ }
++}
++#else
++static int yaffs_bg_thread_fn(void *data)
++{
++ return 0;
++}
++
++static int yaffs_bg_start(struct yaffs_dev *dev)
++{
++ return 0;
++}
++
++static void yaffs_bg_stop(struct yaffs_dev *dev)
++{
++}
++#endif
++
++
++static void yaffs_flush_inodes(struct super_block *sb)
++{
++ struct inode *iptr;
++ struct yaffs_obj *obj;
++
++ list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) {
++ obj = yaffs_inode_to_obj(iptr);
++ if (obj) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "flushing obj %d",
++ obj->obj_id);
++ yaffs_flush_file(obj, 1, 0);
++ }
++ }
++}
++
++static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
++{
++ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++ if (!dev)
++ return;
++
++ yaffs_flush_inodes(sb);
++ yaffs_update_dirty_dirs(dev);
++ yaffs_flush_whole_cache(dev);
++ if (do_checkpoint)
++ yaffs_checkpoint_save(dev);
++}
++
++static LIST_HEAD(yaffs_context_list);
++struct mutex yaffs_context_lock;
++
++static void yaffs_put_super(struct super_block *sb)
++{
++ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
++ "yaffs_put_super");
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
++ "Shutting down yaffs background thread");
++ yaffs_bg_stop(dev);
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
++ "yaffs background thread shut down");
++
++ yaffs_gross_lock(dev);
++
++ yaffs_flush_super(sb, 1);
++
++ yaffs_deinitialise(dev);
++
++ yaffs_gross_unlock(dev);
++
++ mutex_lock(&yaffs_context_lock);
++ list_del_init(&(yaffs_dev_to_lc(dev)->context_list));
++ mutex_unlock(&yaffs_context_lock);
++
++ if (yaffs_dev_to_lc(dev)->spare_buffer) {
++ kfree(yaffs_dev_to_lc(dev)->spare_buffer);
++ yaffs_dev_to_lc(dev)->spare_buffer = NULL;
++ }
++
++ kfree(dev);
++
++ yaffs_put_mtd_device(mtd);
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
++ "yaffs_put_super done");
++}
++
++
++static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev)
++{
++ return yaffs_gc_control;
++}
++
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++
++static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
++ uint32_t generation)
++{
++ return Y_IGET(sb, ino);
++}
++
++static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb,
++ struct fid *fid, int fh_len,
++ int fh_type)
++{
++ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
++ yaffs2_nfs_get_inode);
++}
++
++static struct dentry *yaffs2_fh_to_parent(struct super_block *sb,
++ struct fid *fid, int fh_len,
++ int fh_type)
++{
++ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
++ yaffs2_nfs_get_inode);
++}
++
++struct dentry *yaffs2_get_parent(struct dentry *dentry)
++{
++
++ struct super_block *sb = dentry->d_inode->i_sb;
++ struct dentry *parent = ERR_PTR(-ENOENT);
++ struct inode *inode;
++ unsigned long parent_ino;
++ struct yaffs_obj *d_obj;
++ struct yaffs_obj *parent_obj;
++
++ d_obj = yaffs_inode_to_obj(dentry->d_inode);
++
++ if (d_obj) {
++ parent_obj = d_obj->parent;
++ if (parent_obj) {
++ parent_ino = yaffs_get_obj_inode(parent_obj);
++ inode = Y_IGET(sb, parent_ino);
++
++ if (IS_ERR(inode)) {
++ parent = ERR_CAST(inode);
++ } else {
++ parent = d_obtain_alias(inode);
++ if (!IS_ERR(parent)) {
++ parent = ERR_PTR(-ENOMEM);
++ iput(inode);
++ }
++ }
++ }
++ }
++
++ return parent;
++}
++
++/* Just declare a zero structure as a NULL value implies
++ * using the default functions of exportfs.
++ */
++
++static struct export_operations yaffs_export_ops = {
++ .fh_to_dentry = yaffs2_fh_to_dentry,
++ .fh_to_parent = yaffs2_fh_to_parent,
++ .get_parent = yaffs2_get_parent,
++};
++
++#endif
++
++static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj)
++{
++ /* Clear the association between the inode and
++ * the struct yaffs_obj.
++ */
++ obj->my_inode = NULL;
++ yaffs_inode_to_obj_lv(inode) = NULL;
++
++ /* If the object freeing was deferred, then the real
++ * free happens now.
++ * This should fix the inode inconsistency problem.
++ */
++ yaffs_handle_defered_free(obj);
++}
++
++#ifdef YAFFS_HAS_EVICT_INODE
++/* yaffs_evict_inode combines into one operation what was previously done in
++ * yaffs_clear_inode() and yaffs_delete_inode()
++ *
++ */
++static void yaffs_evict_inode(struct inode *inode)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++ int deleteme = 0;
++
++ obj = yaffs_inode_to_obj(inode);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_evict_inode: ino %d, count %d %s",
++ (int)inode->i_ino, atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object");
++
++ if (!inode->i_nlink && !is_bad_inode(inode))
++ deleteme = 1;
++ truncate_inode_pages(&inode->i_data, 0);
++ Y_CLEAR_INODE(inode);
++
++ if (deleteme && obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_del_obj(obj);
++ yaffs_gross_unlock(dev);
++ }
++ if (obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_unstitch_obj(inode, obj);
++ yaffs_gross_unlock(dev);
++ }
++}
++#else
++
++/* clear is called to tell the fs to release any per-inode data it holds.
++ * The object might still exist on disk and is just being thrown out of the cache
++ * or else the object has actually been deleted and we're being called via
++ * the chain
++ * yaffs_delete_inode() -> clear_inode()->yaffs_clear_inode()
++ */
++
++static void yaffs_clear_inode(struct inode *inode)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++
++ obj = yaffs_inode_to_obj(inode);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_clear_inode: ino %d, count %d %s",
++ (int)inode->i_ino, atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object");
++
++ if (obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_unstitch_obj(inode, obj);
++ yaffs_gross_unlock(dev);
++ }
++
++}
++
++/* delete is called when the link count is zero and the inode
++ * is put (ie. nobody wants to know about it anymore, time to
++ * delete the file).
++ * NB Must call clear_inode()
++ */
++static void yaffs_delete_inode(struct inode *inode)
++{
++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++ struct yaffs_dev *dev;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_delete_inode: ino %d, count %d %s",
++ (int)inode->i_ino, atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object");
++
++ if (obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_del_obj(obj);
++ yaffs_gross_unlock(dev);
++ }
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++ truncate_inode_pages(&inode->i_data, 0);
++#endif
++ clear_inode(inode);
++}
++#endif
++
++
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++ struct super_block *sb = dentry->d_sb;
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
++{
++ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++#else
++static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
++{
++ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++#endif
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs");
++
++ yaffs_gross_lock(dev);
++
++ buf->f_type = YAFFS_MAGIC;
++ buf->f_bsize = sb->s_blocksize;
++ buf->f_namelen = 255;
++
++ if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
++ /* Do this if chunk size is not a power of 2 */
++
++ uint64_t bytes_in_dev;
++ uint64_t bytes_free;
++
++ bytes_in_dev =
++ ((uint64_t)
++ ((dev->param.end_block - dev->param.start_block +
++ 1))) * ((uint64_t) (dev->param.chunks_per_block *
++ dev->data_bytes_per_chunk));
++
++ do_div(bytes_in_dev, sb->s_blocksize); /* bytes_in_dev becomes the number of blocks */
++ buf->f_blocks = bytes_in_dev;
++
++ bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) *
++ ((uint64_t) (dev->data_bytes_per_chunk));
++
++ do_div(bytes_free, sb->s_blocksize);
++
++ buf->f_bfree = bytes_free;
++
++ } else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
++
++ buf->f_blocks =
++ (dev->param.end_block - dev->param.start_block + 1) *
++ dev->param.chunks_per_block /
++ (sb->s_blocksize / dev->data_bytes_per_chunk);
++ buf->f_bfree =
++ yaffs_get_n_free_chunks(dev) /
++ (sb->s_blocksize / dev->data_bytes_per_chunk);
++ } else {
++ buf->f_blocks =
++ (dev->param.end_block - dev->param.start_block + 1) *
++ dev->param.chunks_per_block *
++ (dev->data_bytes_per_chunk / sb->s_blocksize);
++
++ buf->f_bfree =
++ yaffs_get_n_free_chunks(dev) *
++ (dev->data_bytes_per_chunk / sb->s_blocksize);
++ }
++
++ buf->f_files = 0;
++ buf->f_ffree = 0;
++ buf->f_bavail = buf->f_bfree;
++
++ yaffs_gross_unlock(dev);
++ return 0;
++}
++
++
++
++static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint)
++{
++
++ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++ unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
++ unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
++ int do_checkpoint;
++ int dirty = yaffs_check_super_dirty(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
++ "yaffs_do_sync_fs: gc-urgency %d %s %s%s",
++ gc_urgent,
++ dirty ? "dirty" : "clean",
++ request_checkpoint ? "checkpoint requested" : "no checkpoint",
++ oneshot_checkpoint ? " one-shot" : "");
++
++ yaffs_gross_lock(dev);
++ do_checkpoint = ((request_checkpoint && !gc_urgent) ||
++ oneshot_checkpoint) && !dev->is_checkpointed;
++
++ if (dirty || do_checkpoint) {
++ yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
++ yaffs_clear_super_dirty(dev);
++ if (oneshot_checkpoint)
++ yaffs_auto_checkpoint &= ~4;
++ }
++ yaffs_gross_unlock(dev);
++
++ return 0;
++}
++
++
++#ifdef YAFFS_HAS_WRITE_SUPER
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static void yaffs_write_super(struct super_block *sb)
++#else
++static int yaffs_write_super(struct super_block *sb)
++#endif
++{
++ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
++ "yaffs_write_super %s",
++ request_checkpoint ? " checkpt" : "");
++
++ yaffs_do_sync_fs(sb, request_checkpoint);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
++ return 0;
++#endif
++}
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_sync_fs(struct super_block *sb, int wait)
++#else
++static int yaffs_sync_fs(struct super_block *sb)
++#endif
++{
++ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
++ "yaffs_sync_fs%s", request_checkpoint ? " checkpt" : "");
++
++ yaffs_do_sync_fs(sb, request_checkpoint);
++
++ return 0;
++}
++
++
++
++static const struct super_operations yaffs_super_ops = {
++ .statfs = yaffs_statfs,
++
++#ifndef YAFFS_USE_OWN_IGET
++ .read_inode = yaffs_read_inode,
++#endif
++#ifdef YAFFS_HAS_PUT_INODE
++ .put_inode = yaffs_put_inode,
++#endif
++ .put_super = yaffs_put_super,
++#ifdef YAFFS_HAS_EVICT_INODE
++ .evict_inode = yaffs_evict_inode,
++#else
++ .delete_inode = yaffs_delete_inode,
++ .clear_inode = yaffs_clear_inode,
++#endif
++ .sync_fs = yaffs_sync_fs,
++#ifdef YAFFS_HAS_WRITE_SUPER
++ .write_super = yaffs_write_super,
++#endif
++};
++
++struct yaffs_options {
++ int inband_tags;
++ int skip_checkpoint_read;
++ int skip_checkpoint_write;
++ int no_cache;
++ int tags_ecc_on;
++ int tags_ecc_overridden;
++ int lazy_loading_enabled;
++ int lazy_loading_overridden;
++ int empty_lost_and_found;
++ int empty_lost_and_found_overridden;
++ int disable_summary;
++};
++
++#define MAX_OPT_LEN 30
++static int yaffs_parse_options(struct yaffs_options *options,
++ const char *options_str)
++{
++ char cur_opt[MAX_OPT_LEN + 1];
++ int p;
++ int error = 0;
++
++ /* Parse through the options which is a comma seperated list */
++
++ while (options_str && *options_str && !error) {
++ memset(cur_opt, 0, MAX_OPT_LEN + 1);
++ p = 0;
++
++ while (*options_str == ',')
++ options_str++;
++
++ while (*options_str && *options_str != ',') {
++ if (p < MAX_OPT_LEN) {
++ cur_opt[p] = *options_str;
++ p++;
++ }
++ options_str++;
++ }
++
++ if (!strcmp(cur_opt, "inband-tags")) {
++ options->inband_tags = 1;
++ } else if (!strcmp(cur_opt, "tags-ecc-off")) {
++ options->tags_ecc_on = 0;
++ options->tags_ecc_overridden = 1;
++ } else if (!strcmp(cur_opt, "tags-ecc-on")) {
++ options->tags_ecc_on = 1;
++ options->tags_ecc_overridden = 1;
++ } else if (!strcmp(cur_opt, "lazy-loading-off")) {
++ options->lazy_loading_enabled = 0;
++ options->lazy_loading_overridden = 1;
++ } else if (!strcmp(cur_opt, "lazy-loading-on")) {
++ options->lazy_loading_enabled = 1;
++ options->lazy_loading_overridden = 1;
++ } else if (!strcmp(cur_opt, "disable-summary")) {
++ options->disable_summary = 1;
++ } else if (!strcmp(cur_opt, "empty-lost-and-found-off")) {
++ options->empty_lost_and_found = 0;
++ options->empty_lost_and_found_overridden = 1;
++ } else if (!strcmp(cur_opt, "empty-lost-and-found-on")) {
++ options->empty_lost_and_found = 1;
++ options->empty_lost_and_found_overridden = 1;
++ } else if (!strcmp(cur_opt, "no-cache")) {
++ options->no_cache = 1;
++ } else if (!strcmp(cur_opt, "no-checkpoint-read")) {
++ options->skip_checkpoint_read = 1;
++ } else if (!strcmp(cur_opt, "no-checkpoint-write")) {
++ options->skip_checkpoint_write = 1;
++ } else if (!strcmp(cur_opt, "no-checkpoint")) {
++ options->skip_checkpoint_read = 1;
++ options->skip_checkpoint_write = 1;
++ } else {
++ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
++ cur_opt);
++ error = 1;
++ }
++ }
++
++ return error;
++}
++
++
++static struct dentry *yaffs_make_root(struct inode *inode)
++{
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
++ struct dentry *root = d_alloc_root(inode);
++
++ if (!root)
++ iput(inode);
++
++ return root;
++#else
++ return d_make_root(inode);
++#endif
++}
++
++
++
++
++static struct super_block *yaffs_internal_read_super(int yaffs_version,
++ struct super_block *sb,
++ void *data, int silent)
++{
++ int n_blocks;
++ struct inode *inode = NULL;
++ struct dentry *root;
++ struct yaffs_dev *dev = 0;
++ char devname_buf[BDEVNAME_SIZE + 1];
++ struct mtd_info *mtd;
++ int err;
++ char *data_str = (char *)data;
++ struct yaffs_linux_context *context = NULL;
++ struct yaffs_param *param;
++
++ int read_only = 0;
++ int inband_tags = 0;
++
++ struct yaffs_options options;
++
++ unsigned mount_id;
++ int found;
++ struct yaffs_linux_context *context_iterator;
++ struct list_head *l;
++
++ if (!sb) {
++ printk(KERN_INFO "yaffs: sb is NULL\n");
++ return NULL;
++ }
++
++ sb->s_magic = YAFFS_MAGIC;
++ sb->s_op = &yaffs_super_ops;
++ sb->s_flags |= MS_NOATIME;
++
++ read_only = ((sb->s_flags & MS_RDONLY) != 0);
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++ sb->s_export_op = &yaffs_export_ops;
++#endif
++
++ if (!sb->s_dev)
++ printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
++ else if (!yaffs_devname(sb, devname_buf))
++ printk(KERN_INFO "yaffs: devname is NULL\n");
++ else
++ printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
++ sb->s_dev,
++ yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw");
++
++ if (!data_str)
++ data_str = "";
++
++ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
++
++ memset(&options, 0, sizeof(options));
++
++ if (yaffs_parse_options(&options, data_str)) {
++ /* Option parsing failed */
++ return NULL;
++ }
++
++ sb->s_blocksize = PAGE_CACHE_SIZE;
++ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_read_super: Using yaffs%d", yaffs_version);
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_read_super: block size %d", (int)(sb->s_blocksize));
++
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: Attempting MTD mount of %u.%u,\"%s\"",
++ MAJOR(sb->s_dev), MINOR(sb->s_dev),
++ yaffs_devname(sb, devname_buf));
++
++ /* Get the device */
++ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
++ if (IS_ERR(mtd)) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: MTD device %u either not valid or unavailable",
++ MINOR(sb->s_dev));
++ return NULL;
++ }
++
++ if (yaffs_auto_select && yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2");
++ yaffs_version = 2;
++ }
++
++ /* Added NCB 26/5/2006 for completeness */
++ if (yaffs_version == 2 && !options.inband_tags
++ && WRITE_SIZE(mtd) == 512) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
++ yaffs_version = 1;
++ }
++
++ if (mtd->oobavail < sizeof(struct yaffs_packed_tags2) ||
++ options.inband_tags)
++ inband_tags = 1;
++
++ if(yaffs_verify_mtd(mtd, yaffs_version, inband_tags) < 0)
++ return NULL;
++
++ /* OK, so if we got here, we have an MTD that's NAND and looks
++ * like it has the right capabilities
++ * Set the struct yaffs_dev up for mtd
++ */
++
++ if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
++ read_only = 1;
++ printk(KERN_INFO
++ "yaffs: mtd is read only, setting superblock read only\n"
++ );
++ sb->s_flags |= MS_RDONLY;
++ }
++
++ dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL);
++ context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL);
++
++ if (!dev || !context) {
++ kfree(dev);
++ kfree(context);
++ dev = NULL;
++ context = NULL;
++
++ /* Deep shit could not allocate device structure */
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs_read_super: Failed trying to allocate struct yaffs_dev."
++ );
++ return NULL;
++ }
++ memset(dev, 0, sizeof(struct yaffs_dev));
++ param = &(dev->param);
++
++ memset(context, 0, sizeof(struct yaffs_linux_context));
++ dev->os_context = context;
++ INIT_LIST_HEAD(&(context->context_list));
++ context->dev = dev;
++ context->super = sb;
++
++ dev->read_only = read_only;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ sb->s_fs_info = dev;
++#else
++ sb->u.generic_sbp = dev;
++#endif
++
++
++ dev->driver_context = mtd;
++ param->name = mtd->name;
++
++ /* Set up the memory size parameters.... */
++
++
++ param->n_reserved_blocks = 5;
++ param->n_caches = (options.no_cache) ? 0 : 10;
++ param->inband_tags = inband_tags;
++
++ param->enable_xattr = 1;
++ if (options.lazy_loading_overridden)
++ param->disable_lazy_load = !options.lazy_loading_enabled;
++
++ param->defered_dir_update = 1;
++
++ if (options.tags_ecc_overridden)
++ param->no_tags_ecc = !options.tags_ecc_on;
++
++ param->empty_lost_n_found = 1;
++ param->refresh_period = 500;
++ param->disable_summary = options.disable_summary;
++
++
++#ifdef CONFIG_YAFFS_DISABLE_BAD_BLOCK_MARKING
++ param->disable_bad_block_marking = 1;
++#endif
++ if (options.empty_lost_and_found_overridden)
++ param->empty_lost_n_found = options.empty_lost_and_found;
++
++ /* ... and the functions. */
++ if (yaffs_version == 2) {
++ param->is_yaffs2 = 1;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ param->total_bytes_per_chunk = mtd->writesize;
++ param->chunks_per_block = mtd->erasesize / mtd->writesize;
++#else
++ param->total_bytes_per_chunk = mtd->oobblock;
++ param->chunks_per_block = mtd->erasesize / mtd->oobblock;
++#endif
++ n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
++
++ param->start_block = 0;
++ param->end_block = n_blocks - 1;
++ } else {
++ param->is_yaffs2 = 0;
++ n_blocks = YCALCBLOCKS(mtd->size,
++ YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK);
++
++ param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
++ param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
++ }
++
++ param->start_block = 0;
++ param->end_block = n_blocks - 1;
++
++ yaffs_mtd_drv_install(dev);
++
++ param->sb_dirty_fn = yaffs_set_super_dirty;
++ param->gc_control_fn = yaffs_gc_control_callback;
++
++ yaffs_dev_to_lc(dev)->super = sb;
++
++ param->use_nand_ecc = 1;
++
++ param->skip_checkpt_rd = options.skip_checkpoint_read;
++ param->skip_checkpt_wr = options.skip_checkpoint_write;
++
++ mutex_lock(&yaffs_context_lock);
++ /* Get a mount id */
++ found = 0;
++ for (mount_id = 0; !found; mount_id++) {
++ found = 1;
++ list_for_each(l, &yaffs_context_list) {
++ context_iterator =
++ list_entry(l, struct yaffs_linux_context,
++ context_list);
++ if (context_iterator->mount_id == mount_id)
++ found = 0;
++ }
++ }
++ context->mount_id = mount_id;
++
++ list_add_tail(&(yaffs_dev_to_lc(dev)->context_list),
++ &yaffs_context_list);
++ mutex_unlock(&yaffs_context_lock);
++
++ /* Directory search handling... */
++ INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts));
++ param->remove_obj_fn = yaffs_remove_obj_callback;
++
++ mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock));
++
++ yaffs_gross_lock(dev);
++
++ err = yaffs_guts_initialise(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_read_super: guts initialised %s",
++ (err == YAFFS_OK) ? "OK" : "FAILED");
++
++ if (err == YAFFS_OK)
++ yaffs_bg_start(dev);
++
++ if (!context->bg_thread)
++ param->defered_dir_update = 0;
++
++ sb->s_maxbytes = yaffs_max_file_size(dev);
++
++ /* Release lock before yaffs_get_inode() */
++ yaffs_gross_unlock(dev);
++
++ /* Create root inode */
++ if (err == YAFFS_OK)
++ inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev));
++
++ if (!inode)
++ return NULL;
++
++ inode->i_op = &yaffs_dir_inode_operations;
++ inode->i_fop = &yaffs_dir_operations;
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: got root inode");
++
++ root = yaffs_make_root(inode);
++
++ if (!root)
++ return NULL;
++
++ sb->s_root = root;
++ if(!dev->is_checkpointed)
++ yaffs_set_super_dirty(dev);
++
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs_read_super: is_checkpointed %d",
++ dev->is_checkpointed);
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: done");
++ return sb;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
++ int silent)
++{
++ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++static struct dentry *yaffs_mount(struct file_system_type *fs_type, int flags,
++ const char *dev_name, void *data)
++{
++ return mount_bdev(fs_type, flags, dev_name, data, yaffs_internal_read_super_mtd);
++}
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data, struct vfsmount *mnt)
++{
++
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data)
++{
++
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs_fs_type = {
++ .owner = THIS_MODULE,
++ .name = "yaffs",
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++ .mount = yaffs_mount,
++#else
++ .get_sb = yaffs_read_super,
++#endif
++ .kill_sb = kill_block_super,
++ .fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
++ int silent)
++{
++ return yaffs_internal_read_super(1, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
++ FS_REQUIRES_DEV);
++#endif
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
++ int silent)
++{
++ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++static struct dentry *yaffs2_mount(struct file_system_type *fs_type, int flags,
++ const char *dev_name, void *data)
++{
++ return mount_bdev(fs_type, flags, dev_name, data, yaffs2_internal_read_super_mtd);
++}
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs2_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name, void *data,
++ struct vfsmount *mnt)
++{
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs2_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs2_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data)
++{
++
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs2_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs2_fs_type = {
++ .owner = THIS_MODULE,
++ .name = "yaffs2",
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++ .mount = yaffs2_mount,
++#else
++ .get_sb = yaffs2_read_super,
++#endif
++ .kill_sb = kill_block_super,
++ .fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs2_read_super(struct super_block *sb,
++ void *data, int silent)
++{
++ return yaffs_internal_read_super(2, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
++ FS_REQUIRES_DEV);
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
++static struct proc_dir_entry *my_proc_entry;
++
++static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev)
++{
++ struct yaffs_param *param = &dev->param;
++ int bs[10];
++
++ yaffs_count_blocks_by_state(dev,bs);
++
++ buf += sprintf(buf, "start_block.......... %d\n", param->start_block);
++ buf += sprintf(buf, "end_block............ %d\n", param->end_block);
++ buf += sprintf(buf, "total_bytes_per_chunk %d\n",
++ param->total_bytes_per_chunk);
++ buf += sprintf(buf, "use_nand_ecc......... %d\n", param->use_nand_ecc);
++ buf += sprintf(buf, "no_tags_ecc.......... %d\n", param->no_tags_ecc);
++ buf += sprintf(buf, "is_yaffs2............ %d\n", param->is_yaffs2);
++ buf += sprintf(buf, "inband_tags.......... %d\n", param->inband_tags);
++ buf += sprintf(buf, "empty_lost_n_found... %d\n",
++ param->empty_lost_n_found);
++ buf += sprintf(buf, "disable_lazy_load.... %d\n",
++ param->disable_lazy_load);
++ buf += sprintf(buf, "disable_bad_block_mrk %d\n",
++ param->disable_bad_block_marking);
++ buf += sprintf(buf, "refresh_period....... %d\n",
++ param->refresh_period);
++ buf += sprintf(buf, "n_caches............. %d\n", param->n_caches);
++ buf += sprintf(buf, "n_reserved_blocks.... %d\n",
++ param->n_reserved_blocks);
++ buf += sprintf(buf, "always_check_erased.. %d\n",
++ param->always_check_erased);
++ buf += sprintf(buf, "\n");
++ buf += sprintf(buf, "block count by state\n");
++ buf += sprintf(buf, "0:%d 1:%d 2:%d 3:%d 4:%d\n",
++ bs[0], bs[1], bs[2], bs[3], bs[4]);
++ buf += sprintf(buf, "5:%d 6:%d 7:%d 8:%d 9:%d\n",
++ bs[5], bs[6], bs[7], bs[8], bs[9]);
++
++ return buf;
++}
++
++static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev)
++{
++ buf += sprintf(buf, "max file size....... %lld\n",
++ (long long) yaffs_max_file_size(dev));
++ buf += sprintf(buf, "data_bytes_per_chunk. %d\n",
++ dev->data_bytes_per_chunk);
++ buf += sprintf(buf, "chunk_grp_bits....... %d\n", dev->chunk_grp_bits);
++ buf += sprintf(buf, "chunk_grp_size....... %d\n", dev->chunk_grp_size);
++ buf += sprintf(buf, "n_erased_blocks...... %d\n", dev->n_erased_blocks);
++ buf += sprintf(buf, "blocks_in_checkpt.... %d\n",
++ dev->blocks_in_checkpt);
++ buf += sprintf(buf, "\n");
++ buf += sprintf(buf, "n_tnodes............. %d\n", dev->n_tnodes);
++ buf += sprintf(buf, "n_obj................ %d\n", dev->n_obj);
++ buf += sprintf(buf, "n_free_chunks........ %d\n", dev->n_free_chunks);
++ buf += sprintf(buf, "\n");
++ buf += sprintf(buf, "n_page_writes........ %u\n", dev->n_page_writes);
++ buf += sprintf(buf, "n_page_reads......... %u\n", dev->n_page_reads);
++ buf += sprintf(buf, "n_erasures........... %u\n", dev->n_erasures);
++ buf += sprintf(buf, "n_gc_copies.......... %u\n", dev->n_gc_copies);
++ buf += sprintf(buf, "all_gcs.............. %u\n", dev->all_gcs);
++ buf += sprintf(buf, "passive_gc_count..... %u\n",
++ dev->passive_gc_count);
++ buf += sprintf(buf, "oldest_dirty_gc_count %u\n",
++ dev->oldest_dirty_gc_count);
++ buf += sprintf(buf, "n_gc_blocks.......... %u\n", dev->n_gc_blocks);
++ buf += sprintf(buf, "bg_gcs............... %u\n", dev->bg_gcs);
++ buf += sprintf(buf, "n_retried_writes..... %u\n",
++ dev->n_retried_writes);
++ buf += sprintf(buf, "n_retired_blocks..... %u\n",
++ dev->n_retired_blocks);
++ buf += sprintf(buf, "n_ecc_fixed.......... %u\n", dev->n_ecc_fixed);
++ buf += sprintf(buf, "n_ecc_unfixed........ %u\n", dev->n_ecc_unfixed);
++ buf += sprintf(buf, "n_tags_ecc_fixed..... %u\n",
++ dev->n_tags_ecc_fixed);
++ buf += sprintf(buf, "n_tags_ecc_unfixed... %u\n",
++ dev->n_tags_ecc_unfixed);
++ buf += sprintf(buf, "cache_hits........... %u\n", dev->cache_hits);
++ buf += sprintf(buf, "n_deleted_files...... %u\n", dev->n_deleted_files);
++ buf += sprintf(buf, "n_unlinked_files..... %u\n",
++ dev->n_unlinked_files);
++ buf += sprintf(buf, "refresh_count........ %u\n", dev->refresh_count);
++ buf += sprintf(buf, "n_bg_deletions....... %u\n", dev->n_bg_deletions);
++ buf += sprintf(buf, "tags_used............ %u\n", dev->tags_used);
++ buf += sprintf(buf, "summary_used......... %u\n", dev->summary_used);
++
++ return buf;
++}
++
++static int yaffs_proc_read(char *page,
++ char **start,
++ off_t offset, int count, int *eof, void *data)
++{
++ struct list_head *item;
++ char *buf = page;
++ int step = offset;
++ int n = 0;
++
++ /* Get proc_file_read() to step 'offset' by one on each sucessive call.
++ * We use 'offset' (*ppos) to indicate where we are in dev_list.
++ * This also assumes the user has posted a read buffer large
++ * enough to hold the complete output; but that's life in /proc.
++ */
++
++ *(int *)start = 1;
++
++ /* Print header first */
++ if (step == 0)
++ buf +=
++ sprintf(buf,
++ "Multi-version YAFFS built:" __DATE__ " " __TIME__
++ "\n");
++ else if (step == 1)
++ buf += sprintf(buf, "\n");
++ else {
++ step -= 2;
++
++ mutex_lock(&yaffs_context_lock);
++
++ /* Locate and print the Nth entry. Order N-squared but N is small. */
++ list_for_each(item, &yaffs_context_list) {
++ struct yaffs_linux_context *dc =
++ list_entry(item, struct yaffs_linux_context,
++ context_list);
++ struct yaffs_dev *dev = dc->dev;
++
++ if (n < (step & ~1)) {
++ n += 2;
++ continue;
++ }
++ if ((step & 1) == 0) {
++ buf +=
++ sprintf(buf, "\nDevice %d \"%s\"\n", n,
++ dev->param.name);
++ buf = yaffs_dump_dev_part0(buf, dev);
++ } else {
++ buf = yaffs_dump_dev_part1(buf, dev);
++ }
++
++ break;
++ }
++ mutex_unlock(&yaffs_context_lock);
++ }
++
++ return buf - page < count ? buf - page : count;
++}
++
++/**
++ * Set the verbosity of the warnings and error messages.
++ *
++ * Note that the names can only be a..z or _ with the current code.
++ */
++
++static struct {
++ char *mask_name;
++ unsigned mask_bitfield;
++} mask_flags[] = {
++ {"allocate", YAFFS_TRACE_ALLOCATE},
++ {"always", YAFFS_TRACE_ALWAYS},
++ {"background", YAFFS_TRACE_BACKGROUND},
++ {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
++ {"buffers", YAFFS_TRACE_BUFFERS},
++ {"bug", YAFFS_TRACE_BUG},
++ {"checkpt", YAFFS_TRACE_CHECKPOINT},
++ {"deletion", YAFFS_TRACE_DELETION},
++ {"erase", YAFFS_TRACE_ERASE},
++ {"error", YAFFS_TRACE_ERROR},
++ {"gc_detail", YAFFS_TRACE_GC_DETAIL},
++ {"gc", YAFFS_TRACE_GC},
++ {"lock", YAFFS_TRACE_LOCK},
++ {"mtd", YAFFS_TRACE_MTD},
++ {"nandaccess", YAFFS_TRACE_NANDACCESS},
++ {"os", YAFFS_TRACE_OS},
++ {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
++ {"scan", YAFFS_TRACE_SCAN},
++ {"mount", YAFFS_TRACE_MOUNT},
++ {"tracing", YAFFS_TRACE_TRACING},
++ {"sync", YAFFS_TRACE_SYNC},
++ {"write", YAFFS_TRACE_WRITE},
++ {"verify", YAFFS_TRACE_VERIFY},
++ {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
++ {"verify_full", YAFFS_TRACE_VERIFY_FULL},
++ {"verify_all", YAFFS_TRACE_VERIFY_ALL},
++ {"all", 0xffffffff},
++ {"none", 0},
++ {NULL, 0},
++};
++
++#define MAX_MASK_NAME_LENGTH 40
++static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
++ unsigned long count, void *data)
++{
++ unsigned rg = 0, mask_bitfield;
++ char *end;
++ char *mask_name;
++ const char *x;
++ char substring[MAX_MASK_NAME_LENGTH + 1];
++ int i;
++ int done = 0;
++ int add, len = 0;
++ int pos = 0;
++
++ rg = yaffs_trace_mask;
++
++ while (!done && (pos < count)) {
++ done = 1;
++ while ((pos < count) && isspace(buf[pos]))
++ pos++;
++
++ switch (buf[pos]) {
++ case '+':
++ case '-':
++ case '=':
++ add = buf[pos];
++ pos++;
++ break;
++
++ default:
++ add = ' ';
++ break;
++ }
++ mask_name = NULL;
++
++ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
++
++ if (end > buf + pos) {
++ mask_name = "numeral";
++ len = end - (buf + pos);
++ pos += len;
++ done = 0;
++ } else {
++ for (x = buf + pos, i = 0;
++ (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
++ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
++ substring[i] = *x;
++ substring[i] = '\0';
++
++ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++ if (strcmp(substring, mask_flags[i].mask_name)
++ == 0) {
++ mask_name = mask_flags[i].mask_name;
++ mask_bitfield =
++ mask_flags[i].mask_bitfield;
++ done = 0;
++ break;
++ }
++ }
++ }
++
++ if (mask_name != NULL) {
++ done = 0;
++ switch (add) {
++ case '-':
++ rg &= ~mask_bitfield;
++ break;
++ case '+':
++ rg |= mask_bitfield;
++ break;
++ case '=':
++ rg = mask_bitfield;
++ break;
++ default:
++ rg |= mask_bitfield;
++ break;
++ }
++ }
++ }
++
++ yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
++
++ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
++
++ if (rg & YAFFS_TRACE_ALWAYS) {
++ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++ char flag;
++ flag = ((rg & mask_flags[i].mask_bitfield) ==
++ mask_flags[i].mask_bitfield) ? '+' : '-';
++ printk(KERN_DEBUG "%c%s\n", flag,
++ mask_flags[i].mask_name);
++ }
++ }
++
++ return count;
++}
++
++/* Debug strings are of the form:
++ * .bnnn print info on block n
++ * .cobjn,chunkn print nand chunk id for objn:chunkn
++ */
++
++static int yaffs_proc_debug_write(struct file *file, const char *buf,
++ unsigned long count, void *data)
++{
++
++ char str[100];
++ char *p0;
++ char *p1;
++ long p1_val;
++ long p0_val;
++ char cmd;
++ struct list_head *item;
++
++ memset(str, 0, sizeof(str));
++ memcpy(str, buf, min(count, sizeof(str) -1));
++
++ cmd = str[1];
++
++ p0 = str + 2;
++
++ p1 = p0;
++
++ while (*p1 && *p1 != ',') {
++ p1++;
++ }
++ *p1 = '\0';
++ p1++;
++
++ p0_val = simple_strtol(p0, NULL, 0);
++ p1_val = simple_strtol(p1, NULL, 0);
++
++
++ mutex_lock(&yaffs_context_lock);
++
++ /* Locate and print the Nth entry. Order N-squared but N is small. */
++ list_for_each(item, &yaffs_context_list) {
++ struct yaffs_linux_context *dc =
++ list_entry(item, struct yaffs_linux_context,
++ context_list);
++ struct yaffs_dev *dev = dc->dev;
++
++ if (cmd == 'b') {
++ struct yaffs_block_info *bi;
++
++ bi = yaffs_get_block_info(dev,p0_val);
++
++ if(bi) {
++ printk("Block %d: state %d, retire %d, use %d, seq %d\n",
++ (int)p0_val, bi->block_state,
++ bi->needs_retiring, bi->pages_in_use,
++ bi->seq_number);
++ }
++ } else if (cmd == 'c') {
++ struct yaffs_obj *obj;
++ int nand_chunk;
++
++ obj = yaffs_find_by_number(dev, p0_val);
++ if (!obj)
++ printk("No obj %d\n", (int)p0_val);
++ else {
++ if(p1_val == 0)
++ nand_chunk = obj->hdr_chunk;
++ else
++ nand_chunk =
++ yaffs_find_chunk_in_file(obj,
++ p1_val, NULL);
++ printk("Nand chunk for %d:%d is %d\n",
++ (int)p0_val, (int)p1_val, nand_chunk);
++ }
++ }
++ }
++
++ mutex_unlock(&yaffs_context_lock);
++
++ return count;
++}
++
++static int yaffs_proc_write(struct file *file, const char *buf,
++ unsigned long count, void *data)
++{
++ if (buf[0] == '.')
++ return yaffs_proc_debug_write(file, buf, count, data);
++ return yaffs_proc_write_trace_options(file, buf, count, data);
++}
++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
++
++/* Stuff to handle installation of file systems */
++struct file_system_to_install {
++ struct file_system_type *fst;
++ int installed;
++};
++
++static struct file_system_to_install fs_to_install[] = {
++ {&yaffs_fs_type, 0},
++ {&yaffs2_fs_type, 0},
++ {NULL, 0}
++};
++
++static int __init init_yaffs_fs(void)
++{
++ int error = 0;
++ struct file_system_to_install *fsinst;
++
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs built " __DATE__ " " __TIME__ " Installing.");
++
++ mutex_init(&yaffs_context_lock);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
++ /* Install the proc_fs entries */
++ my_proc_entry = create_proc_entry("yaffs",
++ S_IRUGO | S_IFREG, YPROC_ROOT);
++
++ if (my_proc_entry) {
++ my_proc_entry->write_proc = yaffs_proc_write;
++ my_proc_entry->read_proc = yaffs_proc_read;
++ my_proc_entry->data = NULL;
++ } else {
++ return -ENOMEM;
++ }
++#endif
++
++ /* Now add the file system entries */
++
++ fsinst = fs_to_install;
++
++ while (fsinst->fst && !error) {
++ error = register_filesystem(fsinst->fst);
++ if (!error)
++ fsinst->installed = 1;
++ fsinst++;
++ }
++
++ /* Any errors? uninstall */
++ if (error) {
++ fsinst = fs_to_install;
++
++ while (fsinst->fst) {
++ if (fsinst->installed) {
++ unregister_filesystem(fsinst->fst);
++ fsinst->installed = 0;
++ }
++ fsinst++;
++ }
++ }
++
++ return error;
++}
++
++static void __exit exit_yaffs_fs(void)
++{
++
++ struct file_system_to_install *fsinst;
++
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs built " __DATE__ " " __TIME__ " removing.");
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
++ remove_proc_entry("yaffs", YPROC_ROOT);
++#endif
++
++ fsinst = fs_to_install;
++
++ while (fsinst->fst) {
++ if (fsinst->installed) {
++ unregister_filesystem(fsinst->fst);
++ fsinst->installed = 0;
++ }
++ fsinst++;
++ }
++}
++
++module_init(init_yaffs_fs)
++ module_exit(exit_yaffs_fs)
++
++ MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
++MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2011");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_yaffs1.c linux-3.15-rc5/fs/yaffs2/yaffs_yaffs1.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_yaffs1.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_yaffs1.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,422 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_yaffs1.h"
++#include "yportenv.h"
++#include "yaffs_trace.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_nand.h"
++#include "yaffs_attribs.h"
++
++int yaffs1_scan(struct yaffs_dev *dev)
++{
++ struct yaffs_ext_tags tags;
++ int blk;
++ int result;
++ int chunk;
++ int c;
++ int deleted;
++ enum yaffs_block_state state;
++ LIST_HEAD(hard_list);
++ struct yaffs_block_info *bi;
++ u32 seq_number;
++ struct yaffs_obj_hdr *oh;
++ struct yaffs_obj *in;
++ struct yaffs_obj *parent;
++ int alloc_failed = 0;
++ struct yaffs_shadow_fixer *shadow_fixers = NULL;
++ u8 *chunk_data;
++
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "yaffs1_scan starts intstartblk %d intendblk %d...",
++ dev->internal_start_block, dev->internal_end_block);
++
++ chunk_data = yaffs_get_temp_buffer(dev);
++
++ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++ /* Scan all the blocks to determine their state */
++ bi = dev->block_info;
++ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
++ blk++) {
++ yaffs_clear_chunk_bits(dev, blk);
++ bi->pages_in_use = 0;
++ bi->soft_del_pages = 0;
++
++ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
++
++ bi->block_state = state;
++ bi->seq_number = seq_number;
++
++ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
++ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
++
++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
++ "Block scanning block %d state %d seq %d",
++ blk, state, seq_number);
++
++ if (state == YAFFS_BLOCK_STATE_DEAD) {
++ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
++ "block %d is bad", blk);
++ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
++ dev->n_erased_blocks++;
++ dev->n_free_chunks += dev->param.chunks_per_block;
++ }
++ bi++;
++ }
++
++ /* For each block.... */
++ for (blk = dev->internal_start_block;
++ !alloc_failed && blk <= dev->internal_end_block; blk++) {
++
++ cond_resched();
++
++ bi = yaffs_get_block_info(dev, blk);
++ state = bi->block_state;
++
++ deleted = 0;
++
++ /* For each chunk in each block that needs scanning.... */
++ for (c = 0;
++ !alloc_failed && c < dev->param.chunks_per_block &&
++ state == YAFFS_BLOCK_STATE_NEEDS_SCAN; c++) {
++ /* Read the tags and decide what to do */
++ chunk = blk * dev->param.chunks_per_block + c;
++
++ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
++ &tags);
++
++ /* Let's have a good look at this chunk... */
++
++ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED ||
++ tags.is_deleted) {
++ /* YAFFS1 only...
++ * A deleted chunk
++ */
++ deleted++;
++ dev->n_free_chunks++;
++ } else if (!tags.chunk_used) {
++ /* An unassigned chunk in the block
++ * This means that either the block is empty or
++ * this is the one being allocated from
++ */
++
++ if (c == 0) {
++ /* We're looking at the first chunk in
++ *the block so the block is unused */
++ state = YAFFS_BLOCK_STATE_EMPTY;
++ dev->n_erased_blocks++;
++ } else {
++ /* this is the block being allocated */
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ " Allocating from %d %d",
++ blk, c);
++ state = YAFFS_BLOCK_STATE_ALLOCATING;
++ dev->alloc_block = blk;
++ dev->alloc_page = c;
++ dev->alloc_block_finder = blk;
++
++ }
++
++ dev->n_free_chunks +=
++ (dev->param.chunks_per_block - c);
++ } else if (tags.chunk_id > 0) {
++ /* chunk_id > 0 so it is a data chunk... */
++ unsigned int endpos;
++
++ yaffs_set_chunk_bit(dev, blk, c);
++ bi->pages_in_use++;
++
++ in = yaffs_find_or_create_by_number(dev,
++ tags.obj_id,
++ YAFFS_OBJECT_TYPE_FILE);
++ /* PutChunkIntoFile checks for a clash
++ * (two data chunks with the same chunk_id).
++ */
++
++ if (!in)
++ alloc_failed = 1;
++
++ if (in) {
++ if (!yaffs_put_chunk_in_file
++ (in, tags.chunk_id, chunk, 1))
++ alloc_failed = 1;
++ }
++
++ endpos =
++ (tags.chunk_id - 1) *
++ dev->data_bytes_per_chunk +
++ tags.n_bytes;
++ if (in &&
++ in->variant_type ==
++ YAFFS_OBJECT_TYPE_FILE &&
++ in->variant.file_variant.scanned_size <
++ endpos) {
++ in->variant.file_variant.scanned_size =
++ endpos;
++ if (!dev->param.use_header_file_size) {
++ in->variant.
++ file_variant.file_size =
++ in->variant.
++ file_variant.scanned_size;
++ }
++
++ }
++ } else {
++ /* chunk_id == 0, so it is an ObjectHeader.
++ * Make the object
++ */
++ yaffs_set_chunk_bit(dev, blk, c);
++ bi->pages_in_use++;
++
++ result = yaffs_rd_chunk_tags_nand(dev, chunk,
++ chunk_data,
++ NULL);
++
++ oh = (struct yaffs_obj_hdr *)chunk_data;
++
++ in = yaffs_find_by_number(dev, tags.obj_id);
++ if (in && in->variant_type != oh->type) {
++ /* This should not happen, but somehow
++ * Wev'e ended up with an obj_id that
++ * has been reused but not yet deleted,
++ * and worse still it has changed type.
++ * Delete the old object.
++ */
++
++ yaffs_del_obj(in);
++ in = NULL;
++ }
++
++ in = yaffs_find_or_create_by_number(dev,
++ tags.obj_id,
++ oh->type);
++
++ if (!in)
++ alloc_failed = 1;
++
++ if (in && oh->shadows_obj > 0) {
++
++ struct yaffs_shadow_fixer *fixer;
++ fixer =
++ kmalloc(sizeof
++ (struct yaffs_shadow_fixer),
++ GFP_NOFS);
++ if (fixer) {
++ fixer->next = shadow_fixers;
++ shadow_fixers = fixer;
++ fixer->obj_id = tags.obj_id;
++ fixer->shadowed_id =
++ oh->shadows_obj;
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ " Shadow fixer: %d shadows %d",
++ fixer->obj_id,
++ fixer->shadowed_id);
++
++ }
++
++ }
++
++ if (in && in->valid) {
++ /* We have already filled this one.
++ * We have a duplicate and need to
++ * resolve it. */
++
++ unsigned existing_serial = in->serial;
++ unsigned new_serial =
++ tags.serial_number;
++
++ if (((existing_serial + 1) & 3) ==
++ new_serial) {
++ /* Use new one - destroy the
++ * exisiting one */
++ yaffs_chunk_del(dev,
++ in->hdr_chunk,
++ 1, __LINE__);
++ in->valid = 0;
++ } else {
++ /* Use existing - destroy
++ * this one. */
++ yaffs_chunk_del(dev, chunk, 1,
++ __LINE__);
++ }
++ }
++
++ if (in && !in->valid &&
++ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++ tags.obj_id ==
++ YAFFS_OBJECTID_LOSTNFOUND)) {
++ /* We only load some info, don't fiddle
++ * with directory structure */
++ in->valid = 1;
++ in->variant_type = oh->type;
++
++ in->yst_mode = oh->yst_mode;
++ yaffs_load_attribs(in, oh);
++ in->hdr_chunk = chunk;
++ in->serial = tags.serial_number;
++
++ } else if (in && !in->valid) {
++ /* we need to load this info */
++
++ in->valid = 1;
++ in->variant_type = oh->type;
++
++ in->yst_mode = oh->yst_mode;
++ yaffs_load_attribs(in, oh);
++ in->hdr_chunk = chunk;
++ in->serial = tags.serial_number;
++
++ yaffs_set_obj_name_from_oh(in, oh);
++ in->dirty = 0;
++
++ /* directory stuff...
++ * hook up to parent
++ */
++
++ parent =
++ yaffs_find_or_create_by_number
++ (dev, oh->parent_obj_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ if (!parent)
++ alloc_failed = 1;
++ if (parent && parent->variant_type ==
++ YAFFS_OBJECT_TYPE_UNKNOWN) {
++ /* Set up as a directory */
++ parent->variant_type =
++ YAFFS_OBJECT_TYPE_DIRECTORY;
++ INIT_LIST_HEAD(&parent->
++ variant.dir_variant.
++ children);
++ } else if (!parent ||
++ parent->variant_type !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
++ /* Hoosterman, a problem....
++ * We're trying to use a
++ * non-directory as a directory
++ */
++
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
++ );
++ parent = dev->lost_n_found;
++ }
++
++ yaffs_add_obj_to_dir(parent, in);
++
++ switch (in->variant_type) {
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* Todo got a problem */
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ if (dev->param.
++ use_header_file_size)
++ in->variant.
++ file_variant.file_size
++ = yaffs_oh_to_size(oh);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ in->variant.
++ hardlink_variant.equiv_id =
++ oh->equiv_id;
++ list_add(&in->hard_links,
++ &hard_list);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ in->variant.symlink_variant.
++ alias =
++ yaffs_clone_str(oh->alias);
++ if (!in->variant.
++ symlink_variant.alias)
++ alloc_failed = 1;
++ break;
++ }
++ }
++ }
++ }
++
++ if (state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
++ /* If we got this far while scanning,
++ * then the block is fully allocated. */
++ state = YAFFS_BLOCK_STATE_FULL;
++ }
++
++ if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
++ /* If the block was partially allocated then
++ * treat it as fully allocated. */
++ state = YAFFS_BLOCK_STATE_FULL;
++ dev->alloc_block = -1;
++ }
++
++ bi->block_state = state;
++
++ /* Now let's see if it was dirty */
++ if (bi->pages_in_use == 0 &&
++ !bi->has_shrink_hdr &&
++ bi->block_state == YAFFS_BLOCK_STATE_FULL)
++ yaffs_block_became_dirty(dev, blk);
++ }
++
++ /* Ok, we've done all the scanning.
++ * Fix up the hard link chains.
++ * We should now have scanned all the objects, now it's time to add
++ * these hardlinks.
++ */
++
++ yaffs_link_fixup(dev, &hard_list);
++
++ /*
++ * Fix up any shadowed objects.
++ * There should not be more than one of these.
++ */
++ {
++ struct yaffs_shadow_fixer *fixer;
++ struct yaffs_obj *obj;
++
++ while (shadow_fixers) {
++ fixer = shadow_fixers;
++ shadow_fixers = fixer->next;
++ /* Complete the rename transaction by deleting the
++ * shadowed object then setting the object header
++ to unshadowed.
++ */
++ obj = yaffs_find_by_number(dev, fixer->shadowed_id);
++ if (obj)
++ yaffs_del_obj(obj);
++
++ obj = yaffs_find_by_number(dev, fixer->obj_id);
++
++ if (obj)
++ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
++
++ kfree(fixer);
++ }
++ }
++
++ yaffs_release_temp_buffer(dev, chunk_data);
++
++ if (alloc_failed)
++ return YAFFS_FAIL;
++
++ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends");
++
++ return YAFFS_OK;
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_yaffs1.h linux-3.15-rc5/fs/yaffs2/yaffs_yaffs1.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_yaffs1.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_yaffs1.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,22 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_YAFFS1_H__
++#define __YAFFS_YAFFS1_H__
++
++#include "yaffs_guts.h"
++int yaffs1_scan(struct yaffs_dev *dev);
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_yaffs2.c linux-3.15-rc5/fs/yaffs2/yaffs_yaffs2.c
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_yaffs2.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_yaffs2.c 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,1534 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yaffs_yaffs2.h"
++#include "yaffs_checkptrw.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_nand.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_verify.h"
++#include "yaffs_attribs.h"
++#include "yaffs_summary.h"
++
++/*
++ * Checkpoints are really no benefit on very small partitions.
++ *
++ * To save space on small partitions don't bother with checkpoints unless
++ * the partition is at least this big.
++ */
++#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
++#define YAFFS_SMALL_HOLE_THRESHOLD 4
++
++/*
++ * Oldest Dirty Sequence Number handling.
++ */
++
++/* yaffs_calc_oldest_dirty_seq()
++ * yaffs2_find_oldest_dirty_seq()
++ * Calculate the oldest dirty sequence number if we don't know it.
++ */
++void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
++{
++ int i;
++ unsigned seq;
++ unsigned block_no = 0;
++ struct yaffs_block_info *b;
++
++ if (!dev->param.is_yaffs2)
++ return;
++
++ /* Find the oldest dirty sequence number. */
++ seq = dev->seq_number + 1;
++ b = dev->block_info;
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
++ (b->pages_in_use - b->soft_del_pages) <
++ dev->param.chunks_per_block &&
++ b->seq_number < seq) {
++ seq = b->seq_number;
++ block_no = i;
++ }
++ b++;
++ }
++
++ if (block_no) {
++ dev->oldest_dirty_seq = seq;
++ dev->oldest_dirty_block = block_no;
++ }
++}
++
++void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev)
++{
++ if (!dev->param.is_yaffs2)
++ return;
++
++ if (!dev->oldest_dirty_seq)
++ yaffs_calc_oldest_dirty_seq(dev);
++}
++
++/*
++ * yaffs_clear_oldest_dirty_seq()
++ * Called when a block is erased or marked bad. (ie. when its seq_number
++ * becomes invalid). If the value matches the oldest then we clear
++ * dev->oldest_dirty_seq to force its recomputation.
++ */
++void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi)
++{
++
++ if (!dev->param.is_yaffs2)
++ return;
++
++ if (!bi || bi->seq_number == dev->oldest_dirty_seq) {
++ dev->oldest_dirty_seq = 0;
++ dev->oldest_dirty_block = 0;
++ }
++}
++
++/*
++ * yaffs2_update_oldest_dirty_seq()
++ * Update the oldest dirty sequence number whenever we dirty a block.
++ * Only do this if the oldest_dirty_seq is actually being tracked.
++ */
++void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
++ struct yaffs_block_info *bi)
++{
++ if (!dev->param.is_yaffs2)
++ return;
++
++ if (dev->oldest_dirty_seq) {
++ if (dev->oldest_dirty_seq > bi->seq_number) {
++ dev->oldest_dirty_seq = bi->seq_number;
++ dev->oldest_dirty_block = block_no;
++ }
++ }
++}
++
++int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi)
++{
++
++ if (!dev->param.is_yaffs2)
++ return 1; /* disqualification only applies to yaffs2. */
++
++ if (!bi->has_shrink_hdr)
++ return 1; /* can gc */
++
++ yaffs2_find_oldest_dirty_seq(dev);
++
++ /* Can't do gc of this block if there are any blocks older than this
++ * one that have discarded pages.
++ */
++ return (bi->seq_number <= dev->oldest_dirty_seq);
++}
++
++/*
++ * yaffs2_find_refresh_block()
++ * periodically finds the oldest full block by sequence number for refreshing.
++ * Only for yaffs2.
++ */
++u32 yaffs2_find_refresh_block(struct yaffs_dev *dev)
++{
++ u32 b;
++ u32 oldest = 0;
++ u32 oldest_seq = 0;
++ struct yaffs_block_info *bi;
++
++ if (!dev->param.is_yaffs2)
++ return oldest;
++
++ /*
++ * If refresh period < 10 then refreshing is disabled.
++ */
++ if (dev->param.refresh_period < 10)
++ return oldest;
++
++ /*
++ * Fix broken values.
++ */
++ if (dev->refresh_skip > dev->param.refresh_period)
++ dev->refresh_skip = dev->param.refresh_period;
++
++ if (dev->refresh_skip > 0)
++ return oldest;
++
++ /*
++ * Refresh skip is now zero.
++ * We'll do a refresh this time around....
++ * Update the refresh skip and find the oldest block.
++ */
++ dev->refresh_skip = dev->param.refresh_period;
++ dev->refresh_count++;
++ bi = dev->block_info;
++ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL) {
++
++ if (oldest < 1 || bi->seq_number < oldest_seq) {
++ oldest = b;
++ oldest_seq = bi->seq_number;
++ }
++ }
++ bi++;
++ }
++
++ if (oldest > 0) {
++ yaffs_trace(YAFFS_TRACE_GC,
++ "GC refresh count %d selected block %d with seq_number %d",
++ dev->refresh_count, oldest, oldest_seq);
++ }
++
++ return oldest;
++}
++
++int yaffs2_checkpt_required(struct yaffs_dev *dev)
++{
++ int nblocks;
++
++ if (!dev->param.is_yaffs2)
++ return 0;
++
++ nblocks = dev->internal_end_block - dev->internal_start_block + 1;
++
++ return !dev->param.skip_checkpt_wr &&
++ !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
++}
++
++int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev)
++{
++ int retval;
++ int n_bytes = 0;
++ int n_blocks;
++ int dev_blocks;
++
++ if (!dev->param.is_yaffs2)
++ return 0;
++
++ if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) {
++ /* Not a valid value so recalculate */
++ dev_blocks = dev->param.end_block - dev->param.start_block + 1;
++ n_bytes += sizeof(struct yaffs_checkpt_validity);
++ n_bytes += sizeof(struct yaffs_checkpt_dev);
++ n_bytes += dev_blocks * sizeof(struct yaffs_block_info);
++ n_bytes += dev_blocks * dev->chunk_bit_stride;
++ n_bytes +=
++ (sizeof(struct yaffs_checkpt_obj) + sizeof(u32)) *
++ dev->n_obj;
++ n_bytes += (dev->tnode_size + sizeof(u32)) * dev->n_tnodes;
++ n_bytes += sizeof(struct yaffs_checkpt_validity);
++ n_bytes += sizeof(u32); /* checksum */
++
++ /* Round up and add 2 blocks to allow for some bad blocks,
++ * so add 3 */
++
++ n_blocks =
++ (n_bytes /
++ (dev->data_bytes_per_chunk *
++ dev->param.chunks_per_block)) + 3;
++
++ dev->checkpoint_blocks_required = n_blocks;
++ }
++
++ retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
++ if (retval < 0)
++ retval = 0;
++ return retval;
++}
++
++/*--------------------- Checkpointing --------------------*/
++
++static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head)
++{
++ struct yaffs_checkpt_validity cp;
++
++ memset(&cp, 0, sizeof(cp));
++
++ cp.struct_type = sizeof(cp);
++ cp.magic = YAFFS_MAGIC;
++ cp.version = YAFFS_CHECKPOINT_VERSION;
++ cp.head = (head) ? 1 : 0;
++
++ return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head)
++{
++ struct yaffs_checkpt_validity cp;
++ int ok;
++
++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++ if (ok)
++ ok = (cp.struct_type == sizeof(cp)) &&
++ (cp.magic == YAFFS_MAGIC) &&
++ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
++ (cp.head == ((head) ? 1 : 0));
++ return ok ? 1 : 0;
++}
++
++static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp,
++ struct yaffs_dev *dev)
++{
++ cp->n_erased_blocks = dev->n_erased_blocks;
++ cp->alloc_block = dev->alloc_block;
++ cp->alloc_page = dev->alloc_page;
++ cp->n_free_chunks = dev->n_free_chunks;
++
++ cp->n_deleted_files = dev->n_deleted_files;
++ cp->n_unlinked_files = dev->n_unlinked_files;
++ cp->n_bg_deletions = dev->n_bg_deletions;
++ cp->seq_number = dev->seq_number;
++
++}
++
++static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev,
++ struct yaffs_checkpt_dev *cp)
++{
++ dev->n_erased_blocks = cp->n_erased_blocks;
++ dev->alloc_block = cp->alloc_block;
++ dev->alloc_page = cp->alloc_page;
++ dev->n_free_chunks = cp->n_free_chunks;
++
++ dev->n_deleted_files = cp->n_deleted_files;
++ dev->n_unlinked_files = cp->n_unlinked_files;
++ dev->n_bg_deletions = cp->n_bg_deletions;
++ dev->seq_number = cp->seq_number;
++}
++
++static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
++{
++ struct yaffs_checkpt_dev cp;
++ u32 n_bytes;
++ u32 n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
++ int ok;
++
++ /* Write device runtime values */
++ yaffs2_dev_to_checkpt_dev(&cp, dev);
++ cp.struct_type = sizeof(cp);
++
++ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (!ok)
++ return 0;
++
++ /* Write block info */
++ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
++ ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) == n_bytes);
++ if (!ok)
++ return 0;
++
++ /* Write chunk bits */
++ n_bytes = n_blocks * dev->chunk_bit_stride;
++ ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) == n_bytes);
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
++{
++ struct yaffs_checkpt_dev cp;
++ u32 n_bytes;
++ u32 n_blocks =
++ (dev->internal_end_block - dev->internal_start_block + 1);
++ int ok;
++
++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (!ok)
++ return 0;
++
++ if (cp.struct_type != sizeof(cp))
++ return 0;
++
++ yaffs_checkpt_dev_to_dev(dev, &cp);
++
++ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
++
++ ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
++
++ if (!ok)
++ return 0;
++
++ n_bytes = n_blocks * dev->chunk_bit_stride;
++
++ ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
++
++ return ok ? 1 : 0;
++}
++
++static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp,
++ struct yaffs_obj *obj)
++{
++ cp->obj_id = obj->obj_id;
++ cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
++ cp->hdr_chunk = obj->hdr_chunk;
++ cp->variant_type = obj->variant_type;
++ cp->deleted = obj->deleted;
++ cp->soft_del = obj->soft_del;
++ cp->unlinked = obj->unlinked;
++ cp->fake = obj->fake;
++ cp->rename_allowed = obj->rename_allowed;
++ cp->unlink_allowed = obj->unlink_allowed;
++ cp->serial = obj->serial;
++ cp->n_data_chunks = obj->n_data_chunks;
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
++ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
++}
++
++static int yaffs2_checkpt_obj_to_obj(struct yaffs_obj *obj,
++ struct yaffs_checkpt_obj *cp)
++{
++ struct yaffs_obj *parent;
++
++ if (obj->variant_type != cp->variant_type) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "Checkpoint read object %d type %d chunk %d does not match existing object type %d",
++ cp->obj_id, cp->variant_type, cp->hdr_chunk,
++ obj->variant_type);
++ return 0;
++ }
++
++ obj->obj_id = cp->obj_id;
++
++ if (cp->parent_id)
++ parent = yaffs_find_or_create_by_number(obj->my_dev,
++ cp->parent_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ else
++ parent = NULL;
++
++ if (parent) {
++ if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory",
++ cp->obj_id, cp->parent_id,
++ cp->variant_type, cp->hdr_chunk,
++ parent->variant_type);
++ return 0;
++ }
++ yaffs_add_obj_to_dir(parent, obj);
++ }
++
++ obj->hdr_chunk = cp->hdr_chunk;
++ obj->variant_type = cp->variant_type;
++ obj->deleted = cp->deleted;
++ obj->soft_del = cp->soft_del;
++ obj->unlinked = cp->unlinked;
++ obj->fake = cp->fake;
++ obj->rename_allowed = cp->rename_allowed;
++ obj->unlink_allowed = cp->unlink_allowed;
++ obj->serial = cp->serial;
++ obj->n_data_chunks = cp->n_data_chunks;
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
++ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
++
++ if (obj->hdr_chunk > 0)
++ obj->lazy_loaded = 1;
++ return 1;
++}
++
++static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in,
++ struct yaffs_tnode *tn, u32 level,
++ int chunk_offset)
++{
++ int i;
++ struct yaffs_dev *dev = in->my_dev;
++ int ok = 1;
++ u32 base_offset;
++
++ if (!tn)
++ return 1;
++
++ if (level > 0) {
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
++ if (!tn->internal[i])
++ continue;
++ ok = yaffs2_checkpt_tnode_worker(in,
++ tn->internal[i],
++ level - 1,
++ (chunk_offset <<
++ YAFFS_TNODES_INTERNAL_BITS) + i);
++ }
++ return ok;
++ }
++
++ /* Level 0 tnode */
++ base_offset = chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
++ ok = (yaffs2_checkpt_wr(dev, &base_offset, sizeof(base_offset)) ==
++ sizeof(base_offset));
++ if (ok)
++ ok = (yaffs2_checkpt_wr(dev, tn, dev->tnode_size) ==
++ dev->tnode_size);
++
++ return ok;
++}
++
++static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj)
++{
++ u32 end_marker = ~0;
++ int ok = 1;
++
++ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
++ return ok;
++
++ ok = yaffs2_checkpt_tnode_worker(obj,
++ obj->variant.file_variant.top,
++ obj->variant.file_variant.
++ top_level, 0);
++ if (ok)
++ ok = (yaffs2_checkpt_wr(obj->my_dev, &end_marker,
++ sizeof(end_marker)) == sizeof(end_marker));
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj)
++{
++ u32 base_chunk;
++ int ok = 1;
++ struct yaffs_dev *dev = obj->my_dev;
++ struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant;
++ struct yaffs_tnode *tn;
++ int nread = 0;
++
++ ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) ==
++ sizeof(base_chunk));
++
++ while (ok && (~base_chunk)) {
++ nread++;
++ /* Read level 0 tnode */
++
++ tn = yaffs_get_tnode(dev);
++ if (tn)
++ ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
++ dev->tnode_size);
++ else
++ ok = 0;
++
++ if (tn && ok)
++ ok = yaffs_add_find_tnode_0(dev,
++ file_stuct_ptr,
++ base_chunk, tn) ? 1 : 0;
++
++ if (ok)
++ ok = (yaffs2_checkpt_rd
++ (dev, &base_chunk,
++ sizeof(base_chunk)) == sizeof(base_chunk));
++ }
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "Checkpoint read tnodes %d records, last %d. ok %d",
++ nread, base_chunk, ok);
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_checkpt_obj cp;
++ int i;
++ int ok = 1;
++ struct list_head *lh;
++
++ /* Iterate through the objects in each hash entry,
++ * dumping them to the checkpointing stream.
++ */
++
++ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
++ list_for_each(lh, &dev->obj_bucket[i].list) {
++ obj = list_entry(lh, struct yaffs_obj, hash_link);
++ if (!obj->defered_free) {
++ yaffs2_obj_checkpt_obj(&cp, obj);
++ cp.struct_type = sizeof(cp);
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "Checkpoint write object %d parent %d type %d chunk %d obj addr %p",
++ cp.obj_id, cp.parent_id,
++ cp.variant_type, cp.hdr_chunk, obj);
++
++ ok = (yaffs2_checkpt_wr(dev, &cp,
++ sizeof(cp)) == sizeof(cp));
++
++ if (ok &&
++ obj->variant_type ==
++ YAFFS_OBJECT_TYPE_FILE)
++ ok = yaffs2_wr_checkpt_tnodes(obj);
++ }
++ }
++ }
++
++ /* Dump end of list */
++ memset(&cp, 0xff, sizeof(struct yaffs_checkpt_obj));
++ cp.struct_type = sizeof(cp);
++
++ if (ok)
++ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_checkpt_obj cp;
++ int ok = 1;
++ int done = 0;
++ LIST_HEAD(hard_list);
++
++
++ while (ok && !done) {
++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (cp.struct_type != sizeof(cp)) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "struct size %d instead of %d ok %d",
++ cp.struct_type, (int)sizeof(cp), ok);
++ ok = 0;
++ }
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "Checkpoint read object %d parent %d type %d chunk %d ",
++ cp.obj_id, cp.parent_id, cp.variant_type,
++ cp.hdr_chunk);
++
++ if (ok && cp.obj_id == ~0) {
++ done = 1;
++ } else if (ok) {
++ obj =
++ yaffs_find_or_create_by_number(dev, cp.obj_id,
++ cp.variant_type);
++ if (obj) {
++ ok = yaffs2_checkpt_obj_to_obj(obj, &cp);
++ if (!ok)
++ break;
++ if (obj->variant_type ==
++ YAFFS_OBJECT_TYPE_FILE) {
++ ok = yaffs2_rd_checkpt_tnodes(obj);
++ } else if (obj->variant_type ==
++ YAFFS_OBJECT_TYPE_HARDLINK) {
++ list_add(&obj->hard_links, &hard_list);
++ }
++ } else {
++ ok = 0;
++ }
++ }
++ }
++
++ if (ok)
++ yaffs_link_fixup(dev, &hard_list);
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev)
++{
++ u32 checkpt_sum;
++ int ok;
++
++ yaffs2_get_checkpt_sum(dev, &checkpt_sum);
++
++ ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) ==
++ sizeof(checkpt_sum));
++
++ if (!ok)
++ return 0;
++
++ return 1;
++}
++
++static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev)
++{
++ u32 checkpt_sum0;
++ u32 checkpt_sum1;
++ int ok;
++
++ yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
++
++ ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) ==
++ sizeof(checkpt_sum1));
++
++ if (!ok)
++ return 0;
++
++ if (checkpt_sum0 != checkpt_sum1)
++ return 0;
++
++ return 1;
++}
++
++static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev)
++{
++ int ok = 1;
++
++ if (!yaffs2_checkpt_required(dev)) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "skipping checkpoint write");
++ ok = 0;
++ }
++
++ if (ok)
++ ok = yaffs2_checkpt_open(dev, 1);
++
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "write checkpoint validity");
++ ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
++ }
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "write checkpoint device");
++ ok = yaffs2_wr_checkpt_dev(dev);
++ }
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "write checkpoint objects");
++ ok = yaffs2_wr_checkpt_objs(dev);
++ }
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "write checkpoint validity");
++ ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
++ }
++
++ if (ok)
++ ok = yaffs2_wr_checkpt_sum(dev);
++
++ if (!yaffs_checkpt_close(dev))
++ ok = 0;
++
++ if (ok)
++ dev->is_checkpointed = 1;
++ else
++ dev->is_checkpointed = 0;
++
++ return dev->is_checkpointed;
++}
++
++static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev)
++{
++ int ok = 1;
++
++ if (!dev->param.is_yaffs2)
++ ok = 0;
++
++ if (ok && dev->param.skip_checkpt_rd) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "skipping checkpoint read");
++ ok = 0;
++ }
++
++ if (ok)
++ ok = yaffs2_checkpt_open(dev, 0); /* open for read */
++
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "read checkpoint validity");
++ ok = yaffs2_rd_checkpt_validity_marker(dev, 1);
++ }
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "read checkpoint device");
++ ok = yaffs2_rd_checkpt_dev(dev);
++ }
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "read checkpoint objects");
++ ok = yaffs2_rd_checkpt_objs(dev);
++ }
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "read checkpoint validity");
++ ok = yaffs2_rd_checkpt_validity_marker(dev, 0);
++ }
++
++ if (ok) {
++ ok = yaffs2_rd_checkpt_sum(dev);
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "read checkpoint checksum %d", ok);
++ }
++
++ if (!yaffs_checkpt_close(dev))
++ ok = 0;
++
++ if (ok)
++ dev->is_checkpointed = 1;
++ else
++ dev->is_checkpointed = 0;
++
++ return ok ? 1 : 0;
++}
++
++void yaffs2_checkpt_invalidate(struct yaffs_dev *dev)
++{
++ if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) {
++ dev->is_checkpointed = 0;
++ yaffs2_checkpt_invalidate_stream(dev);
++ }
++ if (dev->param.sb_dirty_fn)
++ dev->param.sb_dirty_fn(dev);
++}
++
++int yaffs_checkpoint_save(struct yaffs_dev *dev)
++{
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "save entry: is_checkpointed %d",
++ dev->is_checkpointed);
++
++ yaffs_verify_objects(dev);
++ yaffs_verify_blocks(dev);
++ yaffs_verify_free_chunks(dev);
++
++ if (!dev->is_checkpointed) {
++ yaffs2_checkpt_invalidate(dev);
++ yaffs2_wr_checkpt_data(dev);
++ }
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
++ "save exit: is_checkpointed %d",
++ dev->is_checkpointed);
++
++ return dev->is_checkpointed;
++}
++
++int yaffs2_checkpt_restore(struct yaffs_dev *dev)
++{
++ int retval;
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "restore entry: is_checkpointed %d",
++ dev->is_checkpointed);
++
++ retval = yaffs2_rd_checkpt_data(dev);
++
++ if (dev->is_checkpointed) {
++ yaffs_verify_objects(dev);
++ yaffs_verify_blocks(dev);
++ yaffs_verify_free_chunks(dev);
++ }
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "restore exit: is_checkpointed %d",
++ dev->is_checkpointed);
++
++ return retval;
++}
++
++int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size)
++{
++ /* if new_size > old_file_size.
++ * We're going to be writing a hole.
++ * If the hole is small then write zeros otherwise write a start
++ * of hole marker.
++ */
++ loff_t old_file_size;
++ loff_t increase;
++ int small_hole;
++ int result = YAFFS_OK;
++ struct yaffs_dev *dev = NULL;
++ u8 *local_buffer = NULL;
++ int small_increase_ok = 0;
++
++ if (!obj)
++ return YAFFS_FAIL;
++
++ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
++ return YAFFS_FAIL;
++
++ dev = obj->my_dev;
++
++ /* Bail out if not yaffs2 mode */
++ if (!dev->param.is_yaffs2)
++ return YAFFS_OK;
++
++ old_file_size = obj->variant.file_variant.file_size;
++
++ if (new_size <= old_file_size)
++ return YAFFS_OK;
++
++ increase = new_size - old_file_size;
++
++ if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
++ yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
++ small_hole = 1;
++ else
++ small_hole = 0;
++
++ if (small_hole)
++ local_buffer = yaffs_get_temp_buffer(dev);
++
++ if (local_buffer) {
++ /* fill hole with zero bytes */
++ loff_t pos = old_file_size;
++ int this_write;
++ int written;
++ memset(local_buffer, 0, dev->data_bytes_per_chunk);
++ small_increase_ok = 1;
++
++ while (increase > 0 && small_increase_ok) {
++ this_write = increase;
++ if (this_write > dev->data_bytes_per_chunk)
++ this_write = dev->data_bytes_per_chunk;
++ written =
++ yaffs_do_file_wr(obj, local_buffer, pos, this_write,
++ 0);
++ if (written == this_write) {
++ pos += this_write;
++ increase -= this_write;
++ } else {
++ small_increase_ok = 0;
++ }
++ }
++
++ yaffs_release_temp_buffer(dev, local_buffer);
++
++ /* If out of space then reverse any chunks we've added */
++ if (!small_increase_ok)
++ yaffs_resize_file_down(obj, old_file_size);
++ }
++
++ if (!small_increase_ok &&
++ obj->parent &&
++ obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
++ obj->parent->obj_id != YAFFS_OBJECTID_DELETED) {
++ /* Write a hole start header with the old file size */
++ yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
++ }
++
++ return result;
++}
++
++struct yaffs_block_index {
++ int seq;
++ int block;
++};
++
++static int yaffs2_ybicmp(const void *a, const void *b)
++{
++ int aseq = ((struct yaffs_block_index *)a)->seq;
++ int bseq = ((struct yaffs_block_index *)b)->seq;
++ int ablock = ((struct yaffs_block_index *)a)->block;
++ int bblock = ((struct yaffs_block_index *)b)->block;
++
++ if (aseq == bseq)
++ return ablock - bblock;
++
++ return aseq - bseq;
++}
++
++static inline int yaffs2_scan_chunk(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi,
++ int blk, int chunk_in_block,
++ int *found_chunks,
++ u8 *chunk_data,
++ struct list_head *hard_list,
++ int summary_available)
++{
++ struct yaffs_obj_hdr *oh;
++ struct yaffs_obj *in;
++ struct yaffs_obj *parent;
++ int equiv_id;
++ loff_t file_size;
++ int is_shrink;
++ int is_unlinked;
++ struct yaffs_ext_tags tags;
++ int result;
++ int alloc_failed = 0;
++ int chunk = blk * dev->param.chunks_per_block + chunk_in_block;
++ struct yaffs_file_var *file_var;
++ struct yaffs_hardlink_var *hl_var;
++ struct yaffs_symlink_var *sl_var;
++
++ if (summary_available) {
++ result = yaffs_summary_fetch(dev, &tags, chunk_in_block);
++ tags.seq_number = bi->seq_number;
++ }
++
++ if (!summary_available || tags.obj_id == 0) {
++ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags);
++ dev->tags_used++;
++ } else {
++ dev->summary_used++;
++ }
++
++ /* Let's have a good look at this chunk... */
++
++ if (!tags.chunk_used) {
++ /* An unassigned chunk in the block.
++ * If there are used chunks after this one, then
++ * it is a chunk that was skipped due to failing
++ * the erased check. Just skip it so that it can
++ * be deleted.
++ * But, more typically, We get here when this is
++ * an unallocated chunk and his means that
++ * either the block is empty or this is the one
++ * being allocated from
++ */
++
++ if (*found_chunks) {
++ /* This is a chunk that was skipped due
++ * to failing the erased check */
++ } else if (chunk_in_block == 0) {
++ /* We're looking at the first chunk in
++ * the block so the block is unused */
++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++ dev->n_erased_blocks++;
++ } else {
++ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
++ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
++ if (dev->seq_number == bi->seq_number) {
++ /* Allocating from this block*/
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ " Allocating from %d %d",
++ blk, chunk_in_block);
++
++ bi->block_state =
++ YAFFS_BLOCK_STATE_ALLOCATING;
++ dev->alloc_block = blk;
++ dev->alloc_page = chunk_in_block;
++ dev->alloc_block_finder = blk;
++ } else {
++ /* This is a partially written block
++ * that is not the current
++ * allocation block.
++ */
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "Partially written block %d detected. gc will fix this.",
++ blk);
++ }
++ }
++ }
++
++ dev->n_free_chunks++;
++
++ } else if (tags.ecc_result ==
++ YAFFS_ECC_RESULT_UNFIXED) {
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ " Unfixed ECC in chunk(%d:%d), chunk ignored",
++ blk, chunk_in_block);
++ dev->n_free_chunks++;
++ } else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
++ tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
++ tags.obj_id == YAFFS_OBJECTID_SUMMARY ||
++ (tags.chunk_id > 0 &&
++ tags.n_bytes > dev->data_bytes_per_chunk) ||
++ tags.seq_number != bi->seq_number) {
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored",
++ blk, chunk_in_block, tags.obj_id,
++ tags.chunk_id, tags.n_bytes);
++ dev->n_free_chunks++;
++ } else if (tags.chunk_id > 0) {
++ /* chunk_id > 0 so it is a data chunk... */
++ loff_t endpos;
++ loff_t chunk_base = (tags.chunk_id - 1) *
++ dev->data_bytes_per_chunk;
++
++ *found_chunks = 1;
++
++ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
++ bi->pages_in_use++;
++
++ in = yaffs_find_or_create_by_number(dev,
++ tags.obj_id,
++ YAFFS_OBJECT_TYPE_FILE);
++ if (!in)
++ /* Out of memory */
++ alloc_failed = 1;
++
++ if (in &&
++ in->variant_type == YAFFS_OBJECT_TYPE_FILE &&
++ chunk_base < in->variant.file_variant.shrink_size) {
++ /* This has not been invalidated by
++ * a resize */
++ if (!yaffs_put_chunk_in_file(in, tags.chunk_id,
++ chunk, -1))
++ alloc_failed = 1;
++
++ /* File size is calculated by looking at
++ * the data chunks if we have not
++ * seen an object header yet.
++ * Stop this practice once we find an
++ * object header.
++ */
++ endpos = chunk_base + tags.n_bytes;
++
++ if (!in->valid &&
++ in->variant.file_variant.scanned_size < endpos) {
++ in->variant.file_variant.
++ scanned_size = endpos;
++ in->variant.file_variant.
++ file_size = endpos;
++ }
++ } else if (in) {
++ /* This chunk has been invalidated by a
++ * resize, or a past file deletion
++ * so delete the chunk*/
++ yaffs_chunk_del(dev, chunk, 1, __LINE__);
++ }
++ } else {
++ /* chunk_id == 0, so it is an ObjectHeader.
++ * Thus, we read in the object header and make
++ * the object
++ */
++ *found_chunks = 1;
++
++ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
++ bi->pages_in_use++;
++
++ oh = NULL;
++ in = NULL;
++
++ if (tags.extra_available) {
++ in = yaffs_find_or_create_by_number(dev,
++ tags.obj_id,
++ tags.extra_obj_type);
++ if (!in)
++ alloc_failed = 1;
++ }
++
++ if (!in ||
++ (!in->valid && dev->param.disable_lazy_load) ||
++ tags.extra_shadows ||
++ (!in->valid && (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND))) {
++
++ /* If we don't have valid info then we
++ * need to read the chunk
++ * TODO In future we can probably defer
++ * reading the chunk and living with
++ * invalid data until needed.
++ */
++
++ result = yaffs_rd_chunk_tags_nand(dev,
++ chunk,
++ chunk_data,
++ NULL);
++
++ oh = (struct yaffs_obj_hdr *)chunk_data;
++
++ if (dev->param.inband_tags) {
++ /* Fix up the header if they got
++ * corrupted by inband tags */
++ oh->shadows_obj =
++ oh->inband_shadowed_obj_id;
++ oh->is_shrink =
++ oh->inband_is_shrink;
++ }
++
++ if (!in) {
++ in = yaffs_find_or_create_by_number(dev,
++ tags.obj_id, oh->type);
++ if (!in)
++ alloc_failed = 1;
++ }
++ }
++
++ if (!in) {
++ /* TODO Hoosterman we have a problem! */
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: Could not make object for object %d at chunk %d during scan",
++ tags.obj_id, chunk);
++ return YAFFS_FAIL;
++ }
++
++ if (in->valid) {
++ /* We have already filled this one.
++ * We have a duplicate that will be
++ * discarded, but we first have to suck
++ * out resize info if it is a file.
++ */
++ if ((in->variant_type == YAFFS_OBJECT_TYPE_FILE) &&
++ ((oh && oh->type == YAFFS_OBJECT_TYPE_FILE) ||
++ (tags.extra_available &&
++ tags.extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
++ )) {
++ loff_t this_size = (oh) ?
++ yaffs_oh_to_size(oh) :
++ tags.extra_file_size;
++ u32 parent_obj_id = (oh) ?
++ oh->parent_obj_id :
++ tags.extra_parent_id;
++
++ is_shrink = (oh) ?
++ oh->is_shrink :
++ tags.extra_is_shrink;
++
++ /* If it is deleted (unlinked
++ * at start also means deleted)
++ * we treat the file size as
++ * being zeroed at this point.
++ */
++ if (parent_obj_id == YAFFS_OBJECTID_DELETED ||
++ parent_obj_id == YAFFS_OBJECTID_UNLINKED) {
++ this_size = 0;
++ is_shrink = 1;
++ }
++
++ if (is_shrink &&
++ in->variant.file_variant.shrink_size >
++ this_size)
++ in->variant.file_variant.shrink_size =
++ this_size;
++
++ if (is_shrink)
++ bi->has_shrink_hdr = 1;
++ }
++ /* Use existing - destroy this one. */
++ yaffs_chunk_del(dev, chunk, 1, __LINE__);
++ }
++
++ if (!in->valid && in->variant_type !=
++ (oh ? oh->type : tags.extra_obj_type)) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: Bad type, %d != %d, for object %d at chunk %d during scan",
++ oh ? oh->type : tags.extra_obj_type,
++ in->variant_type, tags.obj_id,
++ chunk);
++ in = yaffs_retype_obj(in, oh ? oh->type : tags.extra_obj_type);
++ }
++
++ if (!in->valid &&
++ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) {
++ /* We only load some info, don't fiddle
++ * with directory structure */
++ in->valid = 1;
++
++ if (oh) {
++ in->yst_mode = oh->yst_mode;
++ yaffs_load_attribs(in, oh);
++ in->lazy_loaded = 0;
++ } else {
++ in->lazy_loaded = 1;
++ }
++ in->hdr_chunk = chunk;
++
++ } else if (!in->valid) {
++ /* we need to load this info */
++ in->valid = 1;
++ in->hdr_chunk = chunk;
++ if (oh) {
++ in->variant_type = oh->type;
++ in->yst_mode = oh->yst_mode;
++ yaffs_load_attribs(in, oh);
++
++ if (oh->shadows_obj > 0)
++ yaffs_handle_shadowed_obj(dev,
++ oh->shadows_obj, 1);
++
++ yaffs_set_obj_name_from_oh(in, oh);
++ parent = yaffs_find_or_create_by_number(dev,
++ oh->parent_obj_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ file_size = yaffs_oh_to_size(oh);
++ is_shrink = oh->is_shrink;
++ equiv_id = oh->equiv_id;
++ } else {
++ in->variant_type = tags.extra_obj_type;
++ parent = yaffs_find_or_create_by_number(dev,
++ tags.extra_parent_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ file_size = tags.extra_file_size;
++ is_shrink = tags.extra_is_shrink;
++ equiv_id = tags.extra_equiv_id;
++ in->lazy_loaded = 1;
++ }
++ in->dirty = 0;
++
++ if (!parent)
++ alloc_failed = 1;
++
++ /* directory stuff...
++ * hook up to parent
++ */
++
++ if (parent &&
++ parent->variant_type == YAFFS_OBJECT_TYPE_UNKNOWN) {
++ /* Set up as a directory */
++ parent->variant_type =
++ YAFFS_OBJECT_TYPE_DIRECTORY;
++ INIT_LIST_HEAD(&parent->
++ variant.dir_variant.children);
++ } else if (!parent ||
++ parent->variant_type !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
++ /* Hoosterman, another problem....
++ * Trying to use a non-directory as a directory
++ */
++
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
++ );
++ parent = dev->lost_n_found;
++ }
++ yaffs_add_obj_to_dir(parent, in);
++
++ is_unlinked = (parent == dev->del_dir) ||
++ (parent == dev->unlinked_dir);
++
++ if (is_shrink)
++ /* Mark the block */
++ bi->has_shrink_hdr = 1;
++
++ /* Note re hardlinks.
++ * Since we might scan a hardlink before its equivalent
++ * object is scanned we put them all in a list.
++ * After scanning is complete, we should have all the
++ * objects, so we run through this list and fix up all
++ * the chains.
++ */
++
++ switch (in->variant_type) {
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* Todo got a problem */
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ file_var = &in->variant.file_variant;
++ if (file_var->scanned_size < file_size) {
++ /* This covers the case where the file
++ * size is greater than the data held.
++ * This will happen if the file is
++ * resized to be larger than its
++ * current data extents.
++ */
++ file_var->file_size = file_size;
++ file_var->scanned_size = file_size;
++ }
++
++ if (file_var->shrink_size > file_size)
++ file_var->shrink_size = file_size;
++
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ hl_var = &in->variant.hardlink_variant;
++ if (!is_unlinked) {
++ hl_var->equiv_id = equiv_id;
++ list_add(&in->hard_links, hard_list);
++ }
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ sl_var = &in->variant.symlink_variant;
++ if (oh) {
++ sl_var->alias =
++ yaffs_clone_str(oh->alias);
++ if (!sl_var->alias)
++ alloc_failed = 1;
++ }
++ break;
++ }
++ }
++ }
++ return alloc_failed ? YAFFS_FAIL : YAFFS_OK;
++}
++
++int yaffs2_scan_backwards(struct yaffs_dev *dev)
++{
++ int blk;
++ int block_iter;
++ int start_iter;
++ int end_iter;
++ int n_to_scan = 0;
++ enum yaffs_block_state state;
++ int c;
++ int deleted;
++ LIST_HEAD(hard_list);
++ struct yaffs_block_info *bi;
++ u32 seq_number;
++ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
++ u8 *chunk_data;
++ int found_chunks;
++ int alloc_failed = 0;
++ struct yaffs_block_index *block_index = NULL;
++ int alt_block_index = 0;
++ int summary_available;
++
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "yaffs2_scan_backwards starts intstartblk %d intendblk %d...",
++ dev->internal_start_block, dev->internal_end_block);
++
++ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++ block_index =
++ kmalloc(n_blocks * sizeof(struct yaffs_block_index), GFP_NOFS);
++
++ if (!block_index) {
++ block_index =
++ vmalloc(n_blocks * sizeof(struct yaffs_block_index));
++ alt_block_index = 1;
++ }
++
++ if (!block_index) {
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "yaffs2_scan_backwards() could not allocate block index!"
++ );
++ return YAFFS_FAIL;
++ }
++
++ dev->blocks_in_checkpt = 0;
++
++ chunk_data = yaffs_get_temp_buffer(dev);
++
++ /* Scan all the blocks to determine their state */
++ bi = dev->block_info;
++ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
++ blk++) {
++ yaffs_clear_chunk_bits(dev, blk);
++ bi->pages_in_use = 0;
++ bi->soft_del_pages = 0;
++
++ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
++
++ bi->block_state = state;
++ bi->seq_number = seq_number;
++
++ if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
++ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
++ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
++
++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
++ "Block scanning block %d state %d seq %d",
++ blk, bi->block_state, seq_number);
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
++ dev->blocks_in_checkpt++;
++
++ } else if (bi->block_state == YAFFS_BLOCK_STATE_DEAD) {
++ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
++ "block %d is bad", blk);
++ } else if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
++ dev->n_erased_blocks++;
++ dev->n_free_chunks += dev->param.chunks_per_block;
++ } else if (bi->block_state ==
++ YAFFS_BLOCK_STATE_NEEDS_SCAN) {
++ /* Determine the highest sequence number */
++ if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
++ seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
++ block_index[n_to_scan].seq = seq_number;
++ block_index[n_to_scan].block = blk;
++ n_to_scan++;
++ if (seq_number >= dev->seq_number)
++ dev->seq_number = seq_number;
++ } else {
++ /* TODO: Nasty sequence number! */
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "Block scanning block %d has bad sequence number %d",
++ blk, seq_number);
++ }
++ }
++ bi++;
++ }
++
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "%d blocks to be sorted...", n_to_scan);
++
++ cond_resched();
++
++ /* Sort the blocks by sequence number */
++ sort(block_index, n_to_scan, sizeof(struct yaffs_block_index),
++ yaffs2_ybicmp, NULL);
++
++ cond_resched();
++
++ yaffs_trace(YAFFS_TRACE_SCAN, "...done");
++
++ /* Now scan the blocks looking at the data. */
++ start_iter = 0;
++ end_iter = n_to_scan - 1;
++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan);
++
++ /* For each block.... backwards */
++ for (block_iter = end_iter;
++ !alloc_failed && block_iter >= start_iter;
++ block_iter--) {
++ /* Cooperative multitasking! This loop can run for so
++ long that watchdog timers expire. */
++ cond_resched();
++
++ /* get the block to scan in the correct order */
++ blk = block_index[block_iter].block;
++ bi = yaffs_get_block_info(dev, blk);
++ deleted = 0;
++
++ summary_available = yaffs_summary_read(dev, dev->sum_tags, blk);
++
++ /* For each chunk in each block that needs scanning.... */
++ found_chunks = 0;
++ if (summary_available)
++ c = dev->chunks_per_summary - 1;
++ else
++ c = dev->param.chunks_per_block - 1;
++
++ for (/* c is already initialised */;
++ !alloc_failed && c >= 0 &&
++ (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
++ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING);
++ c--) {
++ /* Scan backwards...
++ * Read the tags and decide what to do
++ */
++ if (yaffs2_scan_chunk(dev, bi, blk, c,
++ &found_chunks, chunk_data,
++ &hard_list, summary_available) ==
++ YAFFS_FAIL)
++ alloc_failed = 1;
++ }
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
++ /* If we got this far while scanning, then the block
++ * is fully allocated. */
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ }
++
++ /* Now let's see if it was dirty */
++ if (bi->pages_in_use == 0 &&
++ !bi->has_shrink_hdr &&
++ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
++ yaffs_block_became_dirty(dev, blk);
++ }
++ }
++
++ yaffs_skip_rest_of_block(dev);
++
++ if (alt_block_index)
++ vfree(block_index);
++ else
++ kfree(block_index);
++
++ /* Ok, we've done all the scanning.
++ * Fix up the hard link chains.
++ * We have scanned all the objects, now it's time to add these
++ * hardlinks.
++ */
++ yaffs_link_fixup(dev, &hard_list);
++
++ yaffs_release_temp_buffer(dev, chunk_data);
++
++ if (alloc_failed)
++ return YAFFS_FAIL;
++
++ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends");
++
++ return YAFFS_OK;
++}
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yaffs_yaffs2.h linux-3.15-rc5/fs/yaffs2/yaffs_yaffs2.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yaffs_yaffs2.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yaffs_yaffs2.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_YAFFS2_H__
++#define __YAFFS_YAFFS2_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev);
++void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev);
++void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi);
++void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
++ struct yaffs_block_info *bi);
++int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi);
++u32 yaffs2_find_refresh_block(struct yaffs_dev *dev);
++int yaffs2_checkpt_required(struct yaffs_dev *dev);
++int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev);
++
++void yaffs2_checkpt_invalidate(struct yaffs_dev *dev);
++int yaffs2_checkpt_save(struct yaffs_dev *dev);
++int yaffs2_checkpt_restore(struct yaffs_dev *dev);
++
++int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size);
++int yaffs2_scan_backwards(struct yaffs_dev *dev);
++
++#endif
+diff -Nur linux-3.15-rc5.orig/fs/yaffs2/yportenv.h linux-3.15-rc5/fs/yaffs2/yportenv.h
+--- linux-3.15-rc5.orig/fs/yaffs2/yportenv.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.15-rc5/fs/yaffs2/yportenv.h 2014-05-17 01:53:27.000000000 +0200
+@@ -0,0 +1,85 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YPORTENV_H__
++#define __YPORTENV_H__
++
++/*
++ * Define the MTD version in terms of Linux Kernel versions
++ * This allows yaffs to be used independantly of the kernel
++ * as well as with it.
++ */
++
++#define MTD_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
++
++#ifdef YAFFS_OUT_OF_TREE
++#include "moduleconfig.h"
++#endif
++
++#include <linux/version.h>
++#define MTD_VERSION_CODE LINUX_VERSION_CODE
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++#include <linux/config.h>
++#endif
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/xattr.h>
++#include <linux/list.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/sort.h>
++#include <linux/bitops.h>
++
++/* These type wrappings are used to support Unicode names in WinCE. */
++#define YCHAR char
++#define YUCHAR unsigned char
++#define _Y(x) x
++
++#define YAFFS_LOSTNFOUND_NAME "lost+found"
++#define YAFFS_LOSTNFOUND_PREFIX "obj"
++
++
++#define YAFFS_ROOT_MODE 0755
++#define YAFFS_LOSTNFOUND_MODE 0700
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
++#define Y_TIME_CONVERT(x) (x).tv_sec
++#else
++#define Y_CURRENT_TIME CURRENT_TIME
++#define Y_TIME_CONVERT(x) (x)
++#endif
++
++#define compile_time_assertion(assertion) \
++ ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
++
++
++#define yaffs_printf(msk, fmt, ...) \
++ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__)
++
++#define yaffs_trace(msk, fmt, ...) do { \
++ if (yaffs_trace_mask & (msk)) \
++ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__); \
++} while (0)
++
++
++#endif
diff --git a/target/linux/patches/3.18.9/ppc64-missing-zlib.patch b/target/linux/patches/3.18.9/ppc64-missing-zlib.patch
new file mode 100644
index 000000000..c6e0616be
--- /dev/null
+++ b/target/linux/patches/3.18.9/ppc64-missing-zlib.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-3.11.5.orig/arch/powerpc/platforms/pseries/Kconfig linux-3.11.5/arch/powerpc/platforms/pseries/Kconfig
+--- linux-3.11.5.orig/arch/powerpc/platforms/pseries/Kconfig 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/arch/powerpc/platforms/pseries/Kconfig 2013-11-01 15:23:09.000000000 +0100
+@@ -17,6 +17,7 @@
+ select PPC_NATIVE
+ select PPC_PCI_CHOICE if EXPERT
+ select ZLIB_DEFLATE
++ select ZLIB_INFLATE
+ select PPC_DOORBELL
+ select HAVE_CONTEXT_TRACKING
+ select HOTPLUG_CPU if SMP
diff --git a/target/linux/patches/3.18.9/regmap-bool.patch b/target/linux/patches/3.18.9/regmap-bool.patch
new file mode 100644
index 000000000..5c0ff5e2c
--- /dev/null
+++ b/target/linux/patches/3.18.9/regmap-bool.patch
@@ -0,0 +1,27 @@
+diff -Nur linux-3.18.5.orig/drivers/base/regmap/Kconfig linux-3.18.5/drivers/base/regmap/Kconfig
+--- linux-3.18.5.orig/drivers/base/regmap/Kconfig 2015-01-30 02:41:03.000000000 +0100
++++ linux-3.18.5/drivers/base/regmap/Kconfig 2015-02-02 11:53:27.854106073 +0100
+@@ -10,19 +10,19 @@
+ bool
+
+ config REGMAP_I2C
+- tristate
++ bool
+ depends on I2C
+
+ config REGMAP_SPI
+- tristate
++ bool
+ depends on SPI
+
+ config REGMAP_SPMI
+- tristate
++ bool
+ depends on SPMI
+
+ config REGMAP_MMIO
+- tristate
++ bool
+
+ config REGMAP_IRQ
+ bool
diff --git a/target/linux/patches/3.18.9/relocs.patch b/target/linux/patches/3.18.9/relocs.patch
new file mode 100644
index 000000000..69a7c88a9
--- /dev/null
+++ b/target/linux/patches/3.18.9/relocs.patch
@@ -0,0 +1,2709 @@
+diff -Nur linux-3.13.6.orig/arch/x86/tools/relocs.c linux-3.13.6/arch/x86/tools/relocs.c
+--- linux-3.13.6.orig/arch/x86/tools/relocs.c 2014-03-07 07:07:02.000000000 +0100
++++ linux-3.13.6/arch/x86/tools/relocs.c 2014-03-15 19:39:45.000000000 +0100
+@@ -126,6 +126,7 @@
+
+ if (err) {
+ regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf);
++ printf("foo: %s\n", sym_regex[i]);
+ die("%s", errbuf);
+ }
+ }
+diff -Nur linux-3.13.6.orig/arch/x86/tools/relocs.h linux-3.13.6/arch/x86/tools/relocs.h
+--- linux-3.13.6.orig/arch/x86/tools/relocs.h 2014-03-07 07:07:02.000000000 +0100
++++ linux-3.13.6/arch/x86/tools/relocs.h 2014-03-15 18:48:40.000000000 +0100
+@@ -9,11 +9,19 @@
+ #include <string.h>
+ #include <errno.h>
+ #include <unistd.h>
++#ifdef __linux__
+ #include <elf.h>
+ #include <byteswap.h>
+ #define USE_BSD
+ #include <endian.h>
++#else
++#include "elf.h"
++#endif
++#ifdef __APPLE__
++#include <pcreposix.h>
++#else
+ #include <regex.h>
++#endif
+ #include <tools/le_byteshift.h>
+
+ void die(char *fmt, ...);
+diff -Nur linux-3.13.6.orig/tools/include/elf.h linux-3.13.6/tools/include/elf.h
+--- linux-3.13.6.orig/tools/include/elf.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.13.6/tools/include/elf.h 2014-03-15 18:47:36.000000000 +0100
+@@ -0,0 +1,2671 @@
++#ifndef _ELF_H
++#define _ELF_H
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include <stdint.h>
++
++typedef uint16_t Elf32_Half;
++typedef uint16_t Elf64_Half;
++
++typedef uint32_t Elf32_Word;
++typedef int32_t Elf32_Sword;
++typedef uint32_t Elf64_Word;
++typedef int32_t Elf64_Sword;
++
++typedef uint64_t Elf32_Xword;
++typedef int64_t Elf32_Sxword;
++typedef uint64_t Elf64_Xword;
++typedef int64_t Elf64_Sxword;
++
++typedef uint32_t Elf32_Addr;
++typedef uint64_t Elf64_Addr;
++
++typedef uint32_t Elf32_Off;
++typedef uint64_t Elf64_Off;
++
++typedef uint16_t Elf32_Section;
++typedef uint16_t Elf64_Section;
++
++typedef Elf32_Half Elf32_Versym;
++typedef Elf64_Half Elf64_Versym;
++
++#define EI_NIDENT (16)
++
++typedef struct {
++ unsigned char e_ident[EI_NIDENT];
++ Elf32_Half e_type;
++ Elf32_Half e_machine;
++ Elf32_Word e_version;
++ Elf32_Addr e_entry;
++ Elf32_Off e_phoff;
++ Elf32_Off e_shoff;
++ Elf32_Word e_flags;
++ Elf32_Half e_ehsize;
++ Elf32_Half e_phentsize;
++ Elf32_Half e_phnum;
++ Elf32_Half e_shentsize;
++ Elf32_Half e_shnum;
++ Elf32_Half e_shstrndx;
++} Elf32_Ehdr;
++
++typedef struct {
++ unsigned char e_ident[EI_NIDENT];
++ Elf64_Half e_type;
++ Elf64_Half e_machine;
++ Elf64_Word e_version;
++ Elf64_Addr e_entry;
++ Elf64_Off e_phoff;
++ Elf64_Off e_shoff;
++ Elf64_Word e_flags;
++ Elf64_Half e_ehsize;
++ Elf64_Half e_phentsize;
++ Elf64_Half e_phnum;
++ Elf64_Half e_shentsize;
++ Elf64_Half e_shnum;
++ Elf64_Half e_shstrndx;
++} Elf64_Ehdr;
++
++#define EI_MAG0 0
++#define ELFMAG0 0x7f
++
++#define EI_MAG1 1
++#define ELFMAG1 'E'
++
++#define EI_MAG2 2
++#define ELFMAG2 'L'
++
++#define EI_MAG3 3
++#define ELFMAG3 'F'
++
++
++#define ELFMAG "\177ELF"
++#define SELFMAG 4
++
++#define EI_CLASS 4
++#define ELFCLASSNONE 0
++#define ELFCLASS32 1
++#define ELFCLASS64 2
++#define ELFCLASSNUM 3
++
++#define EI_DATA 5
++#define ELFDATANONE 0
++#define ELFDATA2LSB 1
++#define ELFDATA2MSB 2
++#define ELFDATANUM 3
++
++#define EI_VERSION 6
++
++
++#define EI_OSABI 7
++#define ELFOSABI_NONE 0
++#define ELFOSABI_SYSV 0
++#define ELFOSABI_HPUX 1
++#define ELFOSABI_NETBSD 2
++#define ELFOSABI_LINUX 3
++#define ELFOSABI_GNU 3
++#define ELFOSABI_SOLARIS 6
++#define ELFOSABI_AIX 7
++#define ELFOSABI_IRIX 8
++#define ELFOSABI_FREEBSD 9
++#define ELFOSABI_TRU64 10
++#define ELFOSABI_MODESTO 11
++#define ELFOSABI_OPENBSD 12
++#define ELFOSABI_ARM 97
++#define ELFOSABI_STANDALONE 255
++
++#define EI_ABIVERSION 8
++
++#define EI_PAD 9
++
++
++
++#define ET_NONE 0
++#define ET_REL 1
++#define ET_EXEC 2
++#define ET_DYN 3
++#define ET_CORE 4
++#define ET_NUM 5
++#define ET_LOOS 0xfe00
++#define ET_HIOS 0xfeff
++#define ET_LOPROC 0xff00
++#define ET_HIPROC 0xffff
++
++
++
++#define EM_NONE 0
++#define EM_M32 1
++#define EM_SPARC 2
++#define EM_386 3
++#define EM_68K 4
++#define EM_88K 5
++#define EM_860 7
++#define EM_MIPS 8
++#define EM_S370 9
++#define EM_MIPS_RS3_LE 10
++
++#define EM_PARISC 15
++#define EM_VPP500 17
++#define EM_SPARC32PLUS 18
++#define EM_960 19
++#define EM_PPC 20
++#define EM_PPC64 21
++#define EM_S390 22
++
++#define EM_V800 36
++#define EM_FR20 37
++#define EM_RH32 38
++#define EM_RCE 39
++#define EM_ARM 40
++#define EM_FAKE_ALPHA 41
++#define EM_SH 42
++#define EM_SPARCV9 43
++#define EM_TRICORE 44
++#define EM_ARC 45
++#define EM_H8_300 46
++#define EM_H8_300H 47
++#define EM_H8S 48
++#define EM_H8_500 49
++#define EM_IA_64 50
++#define EM_MIPS_X 51
++#define EM_COLDFIRE 52
++#define EM_68HC12 53
++#define EM_MMA 54
++#define EM_PCP 55
++#define EM_NCPU 56
++#define EM_NDR1 57
++#define EM_STARCORE 58
++#define EM_ME16 59
++#define EM_ST100 60
++#define EM_TINYJ 61
++#define EM_X86_64 62
++#define EM_PDSP 63
++
++#define EM_FX66 66
++#define EM_ST9PLUS 67
++#define EM_ST7 68
++#define EM_68HC16 69
++#define EM_68HC11 70
++#define EM_68HC08 71
++#define EM_68HC05 72
++#define EM_SVX 73
++#define EM_ST19 74
++#define EM_VAX 75
++#define EM_CRIS 76
++#define EM_JAVELIN 77
++#define EM_FIREPATH 78
++#define EM_ZSP 79
++#define EM_MMIX 80
++#define EM_HUANY 81
++#define EM_PRISM 82
++#define EM_AVR 83
++#define EM_FR30 84
++#define EM_D10V 85
++#define EM_D30V 86
++#define EM_V850 87
++#define EM_M32R 88
++#define EM_MN10300 89
++#define EM_MN10200 90
++#define EM_PJ 91
++#define EM_OPENRISC 92
++#define EM_ARC_A5 93
++#define EM_XTENSA 94
++#define EM_AARCH64 183
++#define EM_TILEPRO 188
++#define EM_MICROBLAZE 189
++#define EM_TILEGX 191
++#define EM_NUM 192
++#define EM_ALPHA 0x9026
++
++#define EV_NONE 0
++#define EV_CURRENT 1
++#define EV_NUM 2
++
++typedef struct {
++ Elf32_Word sh_name;
++ Elf32_Word sh_type;
++ Elf32_Word sh_flags;
++ Elf32_Addr sh_addr;
++ Elf32_Off sh_offset;
++ Elf32_Word sh_size;
++ Elf32_Word sh_link;
++ Elf32_Word sh_info;
++ Elf32_Word sh_addralign;
++ Elf32_Word sh_entsize;
++} Elf32_Shdr;
++
++typedef struct {
++ Elf64_Word sh_name;
++ Elf64_Word sh_type;
++ Elf64_Xword sh_flags;
++ Elf64_Addr sh_addr;
++ Elf64_Off sh_offset;
++ Elf64_Xword sh_size;
++ Elf64_Word sh_link;
++ Elf64_Word sh_info;
++ Elf64_Xword sh_addralign;
++ Elf64_Xword sh_entsize;
++} Elf64_Shdr;
++
++
++
++#define SHN_UNDEF 0
++#define SHN_LORESERVE 0xff00
++#define SHN_LOPROC 0xff00
++#define SHN_BEFORE 0xff00
++
++#define SHN_AFTER 0xff01
++
++#define SHN_HIPROC 0xff1f
++#define SHN_LOOS 0xff20
++#define SHN_HIOS 0xff3f
++#define SHN_ABS 0xfff1
++#define SHN_COMMON 0xfff2
++#define SHN_XINDEX 0xffff
++#define SHN_HIRESERVE 0xffff
++
++
++
++#define SHT_NULL 0
++#define SHT_PROGBITS 1
++#define SHT_SYMTAB 2
++#define SHT_STRTAB 3
++#define SHT_RELA 4
++#define SHT_HASH 5
++#define SHT_DYNAMIC 6
++#define SHT_NOTE 7
++#define SHT_NOBITS 8
++#define SHT_REL 9
++#define SHT_SHLIB 10
++#define SHT_DYNSYM 11
++#define SHT_INIT_ARRAY 14
++#define SHT_FINI_ARRAY 15
++#define SHT_PREINIT_ARRAY 16
++#define SHT_GROUP 17
++#define SHT_SYMTAB_SHNDX 18
++#define SHT_NUM 19
++#define SHT_LOOS 0x60000000
++#define SHT_GNU_ATTRIBUTES 0x6ffffff5
++#define SHT_GNU_HASH 0x6ffffff6
++#define SHT_GNU_LIBLIST 0x6ffffff7
++#define SHT_CHECKSUM 0x6ffffff8
++#define SHT_LOSUNW 0x6ffffffa
++#define SHT_SUNW_move 0x6ffffffa
++#define SHT_SUNW_COMDAT 0x6ffffffb
++#define SHT_SUNW_syminfo 0x6ffffffc
++#define SHT_GNU_verdef 0x6ffffffd
++#define SHT_GNU_verneed 0x6ffffffe
++#define SHT_GNU_versym 0x6fffffff
++#define SHT_HISUNW 0x6fffffff
++#define SHT_HIOS 0x6fffffff
++#define SHT_LOPROC 0x70000000
++#define SHT_HIPROC 0x7fffffff
++#define SHT_LOUSER 0x80000000
++#define SHT_HIUSER 0x8fffffff
++
++#define SHF_WRITE (1 << 0)
++#define SHF_ALLOC (1 << 1)
++#define SHF_EXECINSTR (1 << 2)
++#define SHF_MERGE (1 << 4)
++#define SHF_STRINGS (1 << 5)
++#define SHF_INFO_LINK (1 << 6)
++#define SHF_LINK_ORDER (1 << 7)
++#define SHF_OS_NONCONFORMING (1 << 8)
++
++#define SHF_GROUP (1 << 9)
++#define SHF_TLS (1 << 10)
++#define SHF_MASKOS 0x0ff00000
++#define SHF_MASKPROC 0xf0000000
++#define SHF_ORDERED (1 << 30)
++#define SHF_EXCLUDE (1 << 31)
++
++#define GRP_COMDAT 0x1
++
++typedef struct {
++ Elf32_Word st_name;
++ Elf32_Addr st_value;
++ Elf32_Word st_size;
++ unsigned char st_info;
++ unsigned char st_other;
++ Elf32_Section st_shndx;
++} Elf32_Sym;
++
++typedef struct {
++ Elf64_Word st_name;
++ unsigned char st_info;
++ unsigned char st_other;
++ Elf64_Section st_shndx;
++ Elf64_Addr st_value;
++ Elf64_Xword st_size;
++} Elf64_Sym;
++
++typedef struct {
++ Elf32_Half si_boundto;
++ Elf32_Half si_flags;
++} Elf32_Syminfo;
++
++typedef struct {
++ Elf64_Half si_boundto;
++ Elf64_Half si_flags;
++} Elf64_Syminfo;
++
++#define SYMINFO_BT_SELF 0xffff
++#define SYMINFO_BT_PARENT 0xfffe
++#define SYMINFO_BT_LOWRESERVE 0xff00
++
++#define SYMINFO_FLG_DIRECT 0x0001
++#define SYMINFO_FLG_PASSTHRU 0x0002
++#define SYMINFO_FLG_COPY 0x0004
++#define SYMINFO_FLG_LAZYLOAD 0x0008
++
++#define SYMINFO_NONE 0
++#define SYMINFO_CURRENT 1
++#define SYMINFO_NUM 2
++
++#define ELF32_ST_BIND(val) (((unsigned char) (val)) >> 4)
++#define ELF32_ST_TYPE(val) ((val) & 0xf)
++#define ELF32_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
++
++#define ELF64_ST_BIND(val) ELF32_ST_BIND (val)
++#define ELF64_ST_TYPE(val) ELF32_ST_TYPE (val)
++#define ELF64_ST_INFO(bind, type) ELF32_ST_INFO ((bind), (type))
++
++#define STB_LOCAL 0
++#define STB_GLOBAL 1
++#define STB_WEAK 2
++#define STB_NUM 3
++#define STB_LOOS 10
++#define STB_GNU_UNIQUE 10
++#define STB_HIOS 12
++#define STB_LOPROC 13
++#define STB_HIPROC 15
++
++#define STT_NOTYPE 0
++#define STT_OBJECT 1
++#define STT_FUNC 2
++#define STT_SECTION 3
++#define STT_FILE 4
++#define STT_COMMON 5
++#define STT_TLS 6
++#define STT_NUM 7
++#define STT_LOOS 10
++#define STT_GNU_IFUNC 10
++#define STT_HIOS 12
++#define STT_LOPROC 13
++#define STT_HIPROC 15
++
++#define STN_UNDEF 0
++
++#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
++#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
++
++#define STV_DEFAULT 0
++#define STV_INTERNAL 1
++#define STV_HIDDEN 2
++#define STV_PROTECTED 3
++
++
++
++
++typedef struct
++{
++ Elf32_Addr r_offset;
++ Elf32_Word r_info;
++} Elf32_Rel;
++
++typedef struct {
++ Elf64_Addr r_offset;
++ Elf64_Xword r_info;
++} Elf64_Rel;
++
++
++
++typedef struct {
++ Elf32_Addr r_offset;
++ Elf32_Word r_info;
++ Elf32_Sword r_addend;
++} Elf32_Rela;
++
++typedef struct {
++ Elf64_Addr r_offset;
++ Elf64_Xword r_info;
++ Elf64_Sxword r_addend;
++} Elf64_Rela;
++
++
++
++#define ELF32_R_SYM(val) ((val) >> 8)
++#define ELF32_R_TYPE(val) ((val) & 0xff)
++#define ELF32_R_INFO(sym, type) (((sym) << 8) + ((type) & 0xff))
++
++#define ELF64_R_SYM(i) ((i) >> 32)
++#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
++#define ELF64_R_INFO(sym,type) ((((Elf64_Xword) (sym)) << 32) + (type))
++
++
++
++typedef struct {
++ Elf32_Word p_type;
++ Elf32_Off p_offset;
++ Elf32_Addr p_vaddr;
++ Elf32_Addr p_paddr;
++ Elf32_Word p_filesz;
++ Elf32_Word p_memsz;
++ Elf32_Word p_flags;
++ Elf32_Word p_align;
++} Elf32_Phdr;
++
++typedef struct {
++ Elf64_Word p_type;
++ Elf64_Word p_flags;
++ Elf64_Off p_offset;
++ Elf64_Addr p_vaddr;
++ Elf64_Addr p_paddr;
++ Elf64_Xword p_filesz;
++ Elf64_Xword p_memsz;
++ Elf64_Xword p_align;
++} Elf64_Phdr;
++
++
++
++#define PT_NULL 0
++#define PT_LOAD 1
++#define PT_DYNAMIC 2
++#define PT_INTERP 3
++#define PT_NOTE 4
++#define PT_SHLIB 5
++#define PT_PHDR 6
++#define PT_TLS 7
++#define PT_NUM 8
++#define PT_LOOS 0x60000000
++#define PT_GNU_EH_FRAME 0x6474e550
++#define PT_GNU_STACK 0x6474e551
++#define PT_GNU_RELRO 0x6474e552
++#define PT_LOSUNW 0x6ffffffa
++#define PT_SUNWBSS 0x6ffffffa
++#define PT_SUNWSTACK 0x6ffffffb
++#define PT_HISUNW 0x6fffffff
++#define PT_HIOS 0x6fffffff
++#define PT_LOPROC 0x70000000
++#define PT_HIPROC 0x7fffffff
++
++
++#define PN_XNUM 0xffff
++
++
++#define PF_X (1 << 0)
++#define PF_W (1 << 1)
++#define PF_R (1 << 2)
++#define PF_MASKOS 0x0ff00000
++#define PF_MASKPROC 0xf0000000
++
++
++
++#define NT_PRSTATUS 1
++#define NT_FPREGSET 2
++#define NT_PRPSINFO 3
++#define NT_PRXREG 4
++#define NT_TASKSTRUCT 4
++#define NT_PLATFORM 5
++#define NT_AUXV 6
++#define NT_GWINDOWS 7
++#define NT_ASRS 8
++#define NT_PSTATUS 10
++#define NT_PSINFO 13
++#define NT_PRCRED 14
++#define NT_UTSNAME 15
++#define NT_LWPSTATUS 16
++#define NT_LWPSINFO 17
++#define NT_PRFPXREG 20
++#define NT_SIGINFO 0x53494749
++#define NT_FILE 0x46494c45
++#define NT_PRXFPREG 0x46e62b7f
++#define NT_PPC_VMX 0x100
++#define NT_PPC_SPE 0x101
++#define NT_PPC_VSX 0x102
++#define NT_386_TLS 0x200
++#define NT_386_IOPERM 0x201
++#define NT_X86_XSTATE 0x202
++#define NT_S390_HIGH_GPRS 0x300
++#define NT_S390_TIMER 0x301
++#define NT_S390_TODCMP 0x302
++#define NT_S390_TODPREG 0x303
++#define NT_S390_CTRS 0x304
++#define NT_S390_PREFIX 0x305
++#define NT_S390_LAST_BREAK 0x306
++#define NT_S390_SYSTEM_CALL 0x307
++#define NT_S390_TDB 0x308
++#define NT_ARM_VFP 0x400
++#define NT_ARM_TLS 0x401
++#define NT_ARM_HW_BREAK 0x402
++#define NT_ARM_HW_WATCH 0x403
++#define NT_METAG_CBUF 0x500
++#define NT_METAG_RPIPE 0x501
++#define NT_METAG_TLS 0x502
++#define NT_VERSION 1
++
++
++
++
++typedef struct {
++ Elf32_Sword d_tag;
++ union {
++ Elf32_Word d_val;
++ Elf32_Addr d_ptr;
++ } d_un;
++} Elf32_Dyn;
++
++typedef struct {
++ Elf64_Sxword d_tag;
++ union {
++ Elf64_Xword d_val;
++ Elf64_Addr d_ptr;
++ } d_un;
++} Elf64_Dyn;
++
++
++
++#define DT_NULL 0
++#define DT_NEEDED 1
++#define DT_PLTRELSZ 2
++#define DT_PLTGOT 3
++#define DT_HASH 4
++#define DT_STRTAB 5
++#define DT_SYMTAB 6
++#define DT_RELA 7
++#define DT_RELASZ 8
++#define DT_RELAENT 9
++#define DT_STRSZ 10
++#define DT_SYMENT 11
++#define DT_INIT 12
++#define DT_FINI 13
++#define DT_SONAME 14
++#define DT_RPATH 15
++#define DT_SYMBOLIC 16
++#define DT_REL 17
++#define DT_RELSZ 18
++#define DT_RELENT 19
++#define DT_PLTREL 20
++#define DT_DEBUG 21
++#define DT_TEXTREL 22
++#define DT_JMPREL 23
++#define DT_BIND_NOW 24
++#define DT_INIT_ARRAY 25
++#define DT_FINI_ARRAY 26
++#define DT_INIT_ARRAYSZ 27
++#define DT_FINI_ARRAYSZ 28
++#define DT_RUNPATH 29
++#define DT_FLAGS 30
++#define DT_ENCODING 32
++#define DT_PREINIT_ARRAY 32
++#define DT_PREINIT_ARRAYSZ 33
++#define DT_NUM 34
++#define DT_LOOS 0x6000000d
++#define DT_HIOS 0x6ffff000
++#define DT_LOPROC 0x70000000
++#define DT_HIPROC 0x7fffffff
++#define DT_PROCNUM DT_MIPS_NUM
++
++#define DT_VALRNGLO 0x6ffffd00
++#define DT_GNU_PRELINKED 0x6ffffdf5
++#define DT_GNU_CONFLICTSZ 0x6ffffdf6
++#define DT_GNU_LIBLISTSZ 0x6ffffdf7
++#define DT_CHECKSUM 0x6ffffdf8
++#define DT_PLTPADSZ 0x6ffffdf9
++#define DT_MOVEENT 0x6ffffdfa
++#define DT_MOVESZ 0x6ffffdfb
++#define DT_FEATURE_1 0x6ffffdfc
++#define DT_POSFLAG_1 0x6ffffdfd
++
++#define DT_SYMINSZ 0x6ffffdfe
++#define DT_SYMINENT 0x6ffffdff
++#define DT_VALRNGHI 0x6ffffdff
++#define DT_VALTAGIDX(tag) (DT_VALRNGHI - (tag))
++#define DT_VALNUM 12
++
++#define DT_ADDRRNGLO 0x6ffffe00
++#define DT_GNU_HASH 0x6ffffef5
++#define DT_TLSDESC_PLT 0x6ffffef6
++#define DT_TLSDESC_GOT 0x6ffffef7
++#define DT_GNU_CONFLICT 0x6ffffef8
++#define DT_GNU_LIBLIST 0x6ffffef9
++#define DT_CONFIG 0x6ffffefa
++#define DT_DEPAUDIT 0x6ffffefb
++#define DT_AUDIT 0x6ffffefc
++#define DT_PLTPAD 0x6ffffefd
++#define DT_MOVETAB 0x6ffffefe
++#define DT_SYMINFO 0x6ffffeff
++#define DT_ADDRRNGHI 0x6ffffeff
++#define DT_ADDRTAGIDX(tag) (DT_ADDRRNGHI - (tag))
++#define DT_ADDRNUM 11
++
++
++
++#define DT_VERSYM 0x6ffffff0
++
++#define DT_RELACOUNT 0x6ffffff9
++#define DT_RELCOUNT 0x6ffffffa
++
++
++#define DT_FLAGS_1 0x6ffffffb
++#define DT_VERDEF 0x6ffffffc
++
++#define DT_VERDEFNUM 0x6ffffffd
++#define DT_VERNEED 0x6ffffffe
++
++#define DT_VERNEEDNUM 0x6fffffff
++#define DT_VERSIONTAGIDX(tag) (DT_VERNEEDNUM - (tag))
++#define DT_VERSIONTAGNUM 16
++
++
++
++#define DT_AUXILIARY 0x7ffffffd
++#define DT_FILTER 0x7fffffff
++#define DT_EXTRATAGIDX(tag) ((Elf32_Word)-((Elf32_Sword) (tag) <<1>>1)-1)
++#define DT_EXTRANUM 3
++
++
++#define DF_ORIGIN 0x00000001
++#define DF_SYMBOLIC 0x00000002
++#define DF_TEXTREL 0x00000004
++#define DF_BIND_NOW 0x00000008
++#define DF_STATIC_TLS 0x00000010
++
++
++
++#define DF_1_NOW 0x00000001
++#define DF_1_GLOBAL 0x00000002
++#define DF_1_GROUP 0x00000004
++#define DF_1_NODELETE 0x00000008
++#define DF_1_LOADFLTR 0x00000010
++#define DF_1_INITFIRST 0x00000020
++#define DF_1_NOOPEN 0x00000040
++#define DF_1_ORIGIN 0x00000080
++#define DF_1_DIRECT 0x00000100
++#define DF_1_TRANS 0x00000200
++#define DF_1_INTERPOSE 0x00000400
++#define DF_1_NODEFLIB 0x00000800
++#define DF_1_NODUMP 0x00001000
++#define DF_1_CONFALT 0x00002000
++#define DF_1_ENDFILTEE 0x00004000
++#define DF_1_DISPRELDNE 0x00008000
++#define DF_1_DISPRELPND 0x00010000
++#define DF_1_NODIRECT 0x00020000
++#define DF_1_IGNMULDEF 0x00040000
++#define DF_1_NOKSYMS 0x00080000
++#define DF_1_NOHDR 0x00100000
++#define DF_1_EDITED 0x00200000
++#define DF_1_NORELOC 0x00400000
++#define DF_1_SYMINTPOSE 0x00800000
++#define DF_1_GLOBAUDIT 0x01000000
++#define DF_1_SINGLETON 0x02000000
++
++#define DTF_1_PARINIT 0x00000001
++#define DTF_1_CONFEXP 0x00000002
++
++
++#define DF_P1_LAZYLOAD 0x00000001
++#define DF_P1_GROUPPERM 0x00000002
++
++
++
++
++typedef struct {
++ Elf32_Half vd_version;
++ Elf32_Half vd_flags;
++ Elf32_Half vd_ndx;
++ Elf32_Half vd_cnt;
++ Elf32_Word vd_hash;
++ Elf32_Word vd_aux;
++ Elf32_Word vd_next;
++} Elf32_Verdef;
++
++typedef struct {
++ Elf64_Half vd_version;
++ Elf64_Half vd_flags;
++ Elf64_Half vd_ndx;
++ Elf64_Half vd_cnt;
++ Elf64_Word vd_hash;
++ Elf64_Word vd_aux;
++ Elf64_Word vd_next;
++} Elf64_Verdef;
++
++
++
++#define VER_DEF_NONE 0
++#define VER_DEF_CURRENT 1
++#define VER_DEF_NUM 2
++
++
++#define VER_FLG_BASE 0x1
++#define VER_FLG_WEAK 0x2
++
++
++#define VER_NDX_LOCAL 0
++#define VER_NDX_GLOBAL 1
++#define VER_NDX_LORESERVE 0xff00
++#define VER_NDX_ELIMINATE 0xff01
++
++
++
++typedef struct {
++ Elf32_Word vda_name;
++ Elf32_Word vda_next;
++} Elf32_Verdaux;
++
++typedef struct {
++ Elf64_Word vda_name;
++ Elf64_Word vda_next;
++} Elf64_Verdaux;
++
++
++
++
++typedef struct {
++ Elf32_Half vn_version;
++ Elf32_Half vn_cnt;
++ Elf32_Word vn_file;
++ Elf32_Word vn_aux;
++ Elf32_Word vn_next;
++} Elf32_Verneed;
++
++typedef struct {
++ Elf64_Half vn_version;
++ Elf64_Half vn_cnt;
++ Elf64_Word vn_file;
++ Elf64_Word vn_aux;
++ Elf64_Word vn_next;
++} Elf64_Verneed;
++
++
++
++#define VER_NEED_NONE 0
++#define VER_NEED_CURRENT 1
++#define VER_NEED_NUM 2
++
++
++
++typedef struct {
++ Elf32_Word vna_hash;
++ Elf32_Half vna_flags;
++ Elf32_Half vna_other;
++ Elf32_Word vna_name;
++ Elf32_Word vna_next;
++} Elf32_Vernaux;
++
++typedef struct {
++ Elf64_Word vna_hash;
++ Elf64_Half vna_flags;
++ Elf64_Half vna_other;
++ Elf64_Word vna_name;
++ Elf64_Word vna_next;
++} Elf64_Vernaux;
++
++
++
++#define VER_FLG_WEAK 0x2
++
++
++
++typedef struct {
++ uint32_t a_type;
++ union {
++ uint32_t a_val;
++ } a_un;
++} Elf32_auxv_t;
++
++typedef struct {
++ uint64_t a_type;
++ union {
++ uint64_t a_val;
++ } a_un;
++} Elf64_auxv_t;
++
++
++
++#define AT_NULL 0
++#define AT_IGNORE 1
++#define AT_EXECFD 2
++#define AT_PHDR 3
++#define AT_PHENT 4
++#define AT_PHNUM 5
++#define AT_PAGESZ 6
++#define AT_BASE 7
++#define AT_FLAGS 8
++#define AT_ENTRY 9
++#define AT_NOTELF 10
++#define AT_UID 11
++#define AT_EUID 12
++#define AT_GID 13
++#define AT_EGID 14
++#define AT_CLKTCK 17
++
++
++#define AT_PLATFORM 15
++#define AT_HWCAP 16
++
++
++
++
++#define AT_FPUCW 18
++
++
++#define AT_DCACHEBSIZE 19
++#define AT_ICACHEBSIZE 20
++#define AT_UCACHEBSIZE 21
++
++
++
++#define AT_IGNOREPPC 22
++
++#define AT_SECURE 23
++
++#define AT_BASE_PLATFORM 24
++
++#define AT_RANDOM 25
++
++#define AT_HWCAP2 26
++
++#define AT_EXECFN 31
++
++
++
++#define AT_SYSINFO 32
++#define AT_SYSINFO_EHDR 33
++
++
++
++#define AT_L1I_CACHESHAPE 34
++#define AT_L1D_CACHESHAPE 35
++#define AT_L2_CACHESHAPE 36
++#define AT_L3_CACHESHAPE 37
++
++
++
++
++typedef struct {
++ Elf32_Word n_namesz;
++ Elf32_Word n_descsz;
++ Elf32_Word n_type;
++} Elf32_Nhdr;
++
++typedef struct {
++ Elf64_Word n_namesz;
++ Elf64_Word n_descsz;
++ Elf64_Word n_type;
++} Elf64_Nhdr;
++
++
++
++
++#define ELF_NOTE_SOLARIS "SUNW Solaris"
++
++
++#define ELF_NOTE_GNU "GNU"
++
++
++
++
++
++#define ELF_NOTE_PAGESIZE_HINT 1
++
++
++#define NT_GNU_ABI_TAG 1
++#define ELF_NOTE_ABI NT_GNU_ABI_TAG
++
++
++
++#define ELF_NOTE_OS_LINUX 0
++#define ELF_NOTE_OS_GNU 1
++#define ELF_NOTE_OS_SOLARIS2 2
++#define ELF_NOTE_OS_FREEBSD 3
++
++#define NT_GNU_BUILD_ID 3
++#define NT_GNU_GOLD_VERSION 4
++
++
++
++typedef struct {
++ Elf32_Xword m_value;
++ Elf32_Word m_info;
++ Elf32_Word m_poffset;
++ Elf32_Half m_repeat;
++ Elf32_Half m_stride;
++} Elf32_Move;
++
++typedef struct {
++ Elf64_Xword m_value;
++ Elf64_Xword m_info;
++ Elf64_Xword m_poffset;
++ Elf64_Half m_repeat;
++ Elf64_Half m_stride;
++} Elf64_Move;
++
++
++#define ELF32_M_SYM(info) ((info) >> 8)
++#define ELF32_M_SIZE(info) ((unsigned char) (info))
++#define ELF32_M_INFO(sym, size) (((sym) << 8) + (unsigned char) (size))
++
++#define ELF64_M_SYM(info) ELF32_M_SYM (info)
++#define ELF64_M_SIZE(info) ELF32_M_SIZE (info)
++#define ELF64_M_INFO(sym, size) ELF32_M_INFO (sym, size)
++
++#define EF_CPU32 0x00810000
++
++#define R_68K_NONE 0
++#define R_68K_32 1
++#define R_68K_16 2
++#define R_68K_8 3
++#define R_68K_PC32 4
++#define R_68K_PC16 5
++#define R_68K_PC8 6
++#define R_68K_GOT32 7
++#define R_68K_GOT16 8
++#define R_68K_GOT8 9
++#define R_68K_GOT32O 10
++#define R_68K_GOT16O 11
++#define R_68K_GOT8O 12
++#define R_68K_PLT32 13
++#define R_68K_PLT16 14
++#define R_68K_PLT8 15
++#define R_68K_PLT32O 16
++#define R_68K_PLT16O 17
++#define R_68K_PLT8O 18
++#define R_68K_COPY 19
++#define R_68K_GLOB_DAT 20
++#define R_68K_JMP_SLOT 21
++#define R_68K_RELATIVE 22
++#define R_68K_NUM 23
++
++#define R_386_NONE 0
++#define R_386_32 1
++#define R_386_PC32 2
++#define R_386_GOT32 3
++#define R_386_PLT32 4
++#define R_386_COPY 5
++#define R_386_GLOB_DAT 6
++#define R_386_JMP_SLOT 7
++#define R_386_RELATIVE 8
++#define R_386_GOTOFF 9
++#define R_386_GOTPC 10
++#define R_386_32PLT 11
++#define R_386_TLS_TPOFF 14
++#define R_386_TLS_IE 15
++#define R_386_TLS_GOTIE 16
++#define R_386_TLS_LE 17
++#define R_386_TLS_GD 18
++#define R_386_TLS_LDM 19
++#define R_386_16 20
++#define R_386_PC16 21
++#define R_386_8 22
++#define R_386_PC8 23
++#define R_386_TLS_GD_32 24
++#define R_386_TLS_GD_PUSH 25
++#define R_386_TLS_GD_CALL 26
++#define R_386_TLS_GD_POP 27
++#define R_386_TLS_LDM_32 28
++#define R_386_TLS_LDM_PUSH 29
++#define R_386_TLS_LDM_CALL 30
++#define R_386_TLS_LDM_POP 31
++#define R_386_TLS_LDO_32 32
++#define R_386_TLS_IE_32 33
++#define R_386_TLS_LE_32 34
++#define R_386_TLS_DTPMOD32 35
++#define R_386_TLS_DTPOFF32 36
++#define R_386_TLS_TPOFF32 37
++#define R_386_SIZE32 38
++#define R_386_TLS_GOTDESC 39
++#define R_386_TLS_DESC_CALL 40
++#define R_386_TLS_DESC 41
++#define R_386_IRELATIVE 42
++#define R_386_NUM 43
++
++
++
++
++
++#define STT_SPARC_REGISTER 13
++
++
++
++#define EF_SPARCV9_MM 3
++#define EF_SPARCV9_TSO 0
++#define EF_SPARCV9_PSO 1
++#define EF_SPARCV9_RMO 2
++#define EF_SPARC_LEDATA 0x800000
++#define EF_SPARC_EXT_MASK 0xFFFF00
++#define EF_SPARC_32PLUS 0x000100
++#define EF_SPARC_SUN_US1 0x000200
++#define EF_SPARC_HAL_R1 0x000400
++#define EF_SPARC_SUN_US3 0x000800
++
++
++
++#define R_SPARC_NONE 0
++#define R_SPARC_8 1
++#define R_SPARC_16 2
++#define R_SPARC_32 3
++#define R_SPARC_DISP8 4
++#define R_SPARC_DISP16 5
++#define R_SPARC_DISP32 6
++#define R_SPARC_WDISP30 7
++#define R_SPARC_WDISP22 8
++#define R_SPARC_HI22 9
++#define R_SPARC_22 10
++#define R_SPARC_13 11
++#define R_SPARC_LO10 12
++#define R_SPARC_GOT10 13
++#define R_SPARC_GOT13 14
++#define R_SPARC_GOT22 15
++#define R_SPARC_PC10 16
++#define R_SPARC_PC22 17
++#define R_SPARC_WPLT30 18
++#define R_SPARC_COPY 19
++#define R_SPARC_GLOB_DAT 20
++#define R_SPARC_JMP_SLOT 21
++#define R_SPARC_RELATIVE 22
++#define R_SPARC_UA32 23
++
++
++
++#define R_SPARC_PLT32 24
++#define R_SPARC_HIPLT22 25
++#define R_SPARC_LOPLT10 26
++#define R_SPARC_PCPLT32 27
++#define R_SPARC_PCPLT22 28
++#define R_SPARC_PCPLT10 29
++#define R_SPARC_10 30
++#define R_SPARC_11 31
++#define R_SPARC_64 32
++#define R_SPARC_OLO10 33
++#define R_SPARC_HH22 34
++#define R_SPARC_HM10 35
++#define R_SPARC_LM22 36
++#define R_SPARC_PC_HH22 37
++#define R_SPARC_PC_HM10 38
++#define R_SPARC_PC_LM22 39
++#define R_SPARC_WDISP16 40
++#define R_SPARC_WDISP19 41
++#define R_SPARC_GLOB_JMP 42
++#define R_SPARC_7 43
++#define R_SPARC_5 44
++#define R_SPARC_6 45
++#define R_SPARC_DISP64 46
++#define R_SPARC_PLT64 47
++#define R_SPARC_HIX22 48
++#define R_SPARC_LOX10 49
++#define R_SPARC_H44 50
++#define R_SPARC_M44 51
++#define R_SPARC_L44 52
++#define R_SPARC_REGISTER 53
++#define R_SPARC_UA64 54
++#define R_SPARC_UA16 55
++#define R_SPARC_TLS_GD_HI22 56
++#define R_SPARC_TLS_GD_LO10 57
++#define R_SPARC_TLS_GD_ADD 58
++#define R_SPARC_TLS_GD_CALL 59
++#define R_SPARC_TLS_LDM_HI22 60
++#define R_SPARC_TLS_LDM_LO10 61
++#define R_SPARC_TLS_LDM_ADD 62
++#define R_SPARC_TLS_LDM_CALL 63
++#define R_SPARC_TLS_LDO_HIX22 64
++#define R_SPARC_TLS_LDO_LOX10 65
++#define R_SPARC_TLS_LDO_ADD 66
++#define R_SPARC_TLS_IE_HI22 67
++#define R_SPARC_TLS_IE_LO10 68
++#define R_SPARC_TLS_IE_LD 69
++#define R_SPARC_TLS_IE_LDX 70
++#define R_SPARC_TLS_IE_ADD 71
++#define R_SPARC_TLS_LE_HIX22 72
++#define R_SPARC_TLS_LE_LOX10 73
++#define R_SPARC_TLS_DTPMOD32 74
++#define R_SPARC_TLS_DTPMOD64 75
++#define R_SPARC_TLS_DTPOFF32 76
++#define R_SPARC_TLS_DTPOFF64 77
++#define R_SPARC_TLS_TPOFF32 78
++#define R_SPARC_TLS_TPOFF64 79
++#define R_SPARC_GOTDATA_HIX22 80
++#define R_SPARC_GOTDATA_LOX10 81
++#define R_SPARC_GOTDATA_OP_HIX22 82
++#define R_SPARC_GOTDATA_OP_LOX10 83
++#define R_SPARC_GOTDATA_OP 84
++#define R_SPARC_H34 85
++#define R_SPARC_SIZE32 86
++#define R_SPARC_SIZE64 87
++#define R_SPARC_GNU_VTINHERIT 250
++#define R_SPARC_GNU_VTENTRY 251
++#define R_SPARC_REV32 252
++
++#define R_SPARC_NUM 253
++
++
++
++#define DT_SPARC_REGISTER 0x70000001
++#define DT_SPARC_NUM 2
++
++
++#define EF_MIPS_NOREORDER 1
++#define EF_MIPS_PIC 2
++#define EF_MIPS_CPIC 4
++#define EF_MIPS_XGOT 8
++#define EF_MIPS_64BIT_WHIRL 16
++#define EF_MIPS_ABI2 32
++#define EF_MIPS_ABI_ON32 64
++#define EF_MIPS_ARCH 0xf0000000
++
++
++
++#define EF_MIPS_ARCH_1 0x00000000
++#define EF_MIPS_ARCH_2 0x10000000
++#define EF_MIPS_ARCH_3 0x20000000
++#define EF_MIPS_ARCH_4 0x30000000
++#define EF_MIPS_ARCH_5 0x40000000
++#define EF_MIPS_ARCH_32 0x50000000
++#define EF_MIPS_ARCH_64 0x60000000
++#define EF_MIPS_ARCH_32R2 0x70000000
++#define EF_MIPS_ARCH_64R2 0x80000000
++
++
++#define E_MIPS_ARCH_1 0x00000000
++#define E_MIPS_ARCH_2 0x10000000
++#define E_MIPS_ARCH_3 0x20000000
++#define E_MIPS_ARCH_4 0x30000000
++#define E_MIPS_ARCH_5 0x40000000
++#define E_MIPS_ARCH_32 0x50000000
++#define E_MIPS_ARCH_64 0x60000000
++
++
++
++#define SHN_MIPS_ACOMMON 0xff00
++#define SHN_MIPS_TEXT 0xff01
++#define SHN_MIPS_DATA 0xff02
++#define SHN_MIPS_SCOMMON 0xff03
++#define SHN_MIPS_SUNDEFINED 0xff04
++
++
++
++#define SHT_MIPS_LIBLIST 0x70000000
++#define SHT_MIPS_MSYM 0x70000001
++#define SHT_MIPS_CONFLICT 0x70000002
++#define SHT_MIPS_GPTAB 0x70000003
++#define SHT_MIPS_UCODE 0x70000004
++#define SHT_MIPS_DEBUG 0x70000005
++#define SHT_MIPS_REGINFO 0x70000006
++#define SHT_MIPS_PACKAGE 0x70000007
++#define SHT_MIPS_PACKSYM 0x70000008
++#define SHT_MIPS_RELD 0x70000009
++#define SHT_MIPS_IFACE 0x7000000b
++#define SHT_MIPS_CONTENT 0x7000000c
++#define SHT_MIPS_OPTIONS 0x7000000d
++#define SHT_MIPS_SHDR 0x70000010
++#define SHT_MIPS_FDESC 0x70000011
++#define SHT_MIPS_EXTSYM 0x70000012
++#define SHT_MIPS_DENSE 0x70000013
++#define SHT_MIPS_PDESC 0x70000014
++#define SHT_MIPS_LOCSYM 0x70000015
++#define SHT_MIPS_AUXSYM 0x70000016
++#define SHT_MIPS_OPTSYM 0x70000017
++#define SHT_MIPS_LOCSTR 0x70000018
++#define SHT_MIPS_LINE 0x70000019
++#define SHT_MIPS_RFDESC 0x7000001a
++#define SHT_MIPS_DELTASYM 0x7000001b
++#define SHT_MIPS_DELTAINST 0x7000001c
++#define SHT_MIPS_DELTACLASS 0x7000001d
++#define SHT_MIPS_DWARF 0x7000001e
++#define SHT_MIPS_DELTADECL 0x7000001f
++#define SHT_MIPS_SYMBOL_LIB 0x70000020
++#define SHT_MIPS_EVENTS 0x70000021
++#define SHT_MIPS_TRANSLATE 0x70000022
++#define SHT_MIPS_PIXIE 0x70000023
++#define SHT_MIPS_XLATE 0x70000024
++#define SHT_MIPS_XLATE_DEBUG 0x70000025
++#define SHT_MIPS_WHIRL 0x70000026
++#define SHT_MIPS_EH_REGION 0x70000027
++#define SHT_MIPS_XLATE_OLD 0x70000028
++#define SHT_MIPS_PDR_EXCEPTION 0x70000029
++
++
++
++#define SHF_MIPS_GPREL 0x10000000
++#define SHF_MIPS_MERGE 0x20000000
++#define SHF_MIPS_ADDR 0x40000000
++#define SHF_MIPS_STRINGS 0x80000000
++#define SHF_MIPS_NOSTRIP 0x08000000
++#define SHF_MIPS_LOCAL 0x04000000
++#define SHF_MIPS_NAMES 0x02000000
++#define SHF_MIPS_NODUPE 0x01000000
++
++
++
++
++
++#define STO_MIPS_DEFAULT 0x0
++#define STO_MIPS_INTERNAL 0x1
++#define STO_MIPS_HIDDEN 0x2
++#define STO_MIPS_PROTECTED 0x3
++#define STO_MIPS_PLT 0x8
++#define STO_MIPS_SC_ALIGN_UNUSED 0xff
++
++
++#define STB_MIPS_SPLIT_COMMON 13
++
++
++
++typedef union {
++ struct {
++ Elf32_Word gt_current_g_value;
++ Elf32_Word gt_unused;
++ } gt_header;
++ struct {
++ Elf32_Word gt_g_value;
++ Elf32_Word gt_bytes;
++ } gt_entry;
++} Elf32_gptab;
++
++
++
++typedef struct {
++ Elf32_Word ri_gprmask;
++ Elf32_Word ri_cprmask[4];
++ Elf32_Sword ri_gp_value;
++} Elf32_RegInfo;
++
++
++
++typedef struct {
++ unsigned char kind;
++
++ unsigned char size;
++ Elf32_Section section;
++
++ Elf32_Word info;
++} Elf_Options;
++
++
++
++#define ODK_NULL 0
++#define ODK_REGINFO 1
++#define ODK_EXCEPTIONS 2
++#define ODK_PAD 3
++#define ODK_HWPATCH 4
++#define ODK_FILL 5
++#define ODK_TAGS 6
++#define ODK_HWAND 7
++#define ODK_HWOR 8
++
++
++
++#define OEX_FPU_MIN 0x1f
++#define OEX_FPU_MAX 0x1f00
++#define OEX_PAGE0 0x10000
++#define OEX_SMM 0x20000
++#define OEX_FPDBUG 0x40000
++#define OEX_PRECISEFP OEX_FPDBUG
++#define OEX_DISMISS 0x80000
++
++#define OEX_FPU_INVAL 0x10
++#define OEX_FPU_DIV0 0x08
++#define OEX_FPU_OFLO 0x04
++#define OEX_FPU_UFLO 0x02
++#define OEX_FPU_INEX 0x01
++
++
++
++#define OHW_R4KEOP 0x1
++#define OHW_R8KPFETCH 0x2
++#define OHW_R5KEOP 0x4
++#define OHW_R5KCVTL 0x8
++
++#define OPAD_PREFIX 0x1
++#define OPAD_POSTFIX 0x2
++#define OPAD_SYMBOL 0x4
++
++
++
++typedef struct {
++ Elf32_Word hwp_flags1;
++ Elf32_Word hwp_flags2;
++} Elf_Options_Hw;
++
++
++
++#define OHWA0_R4KEOP_CHECKED 0x00000001
++#define OHWA1_R4KEOP_CLEAN 0x00000002
++
++
++
++#define R_MIPS_NONE 0
++#define R_MIPS_16 1
++#define R_MIPS_32 2
++#define R_MIPS_REL32 3
++#define R_MIPS_26 4
++#define R_MIPS_HI16 5
++#define R_MIPS_LO16 6
++#define R_MIPS_GPREL16 7
++#define R_MIPS_LITERAL 8
++#define R_MIPS_GOT16 9
++#define R_MIPS_PC16 10
++#define R_MIPS_CALL16 11
++#define R_MIPS_GPREL32 12
++
++#define R_MIPS_SHIFT5 16
++#define R_MIPS_SHIFT6 17
++#define R_MIPS_64 18
++#define R_MIPS_GOT_DISP 19
++#define R_MIPS_GOT_PAGE 20
++#define R_MIPS_GOT_OFST 21
++#define R_MIPS_GOT_HI16 22
++#define R_MIPS_GOT_LO16 23
++#define R_MIPS_SUB 24
++#define R_MIPS_INSERT_A 25
++#define R_MIPS_INSERT_B 26
++#define R_MIPS_DELETE 27
++#define R_MIPS_HIGHER 28
++#define R_MIPS_HIGHEST 29
++#define R_MIPS_CALL_HI16 30
++#define R_MIPS_CALL_LO16 31
++#define R_MIPS_SCN_DISP 32
++#define R_MIPS_REL16 33
++#define R_MIPS_ADD_IMMEDIATE 34
++#define R_MIPS_PJUMP 35
++#define R_MIPS_RELGOT 36
++#define R_MIPS_JALR 37
++#define R_MIPS_TLS_DTPMOD32 38
++#define R_MIPS_TLS_DTPREL32 39
++#define R_MIPS_TLS_DTPMOD64 40
++#define R_MIPS_TLS_DTPREL64 41
++#define R_MIPS_TLS_GD 42
++#define R_MIPS_TLS_LDM 43
++#define R_MIPS_TLS_DTPREL_HI16 44
++#define R_MIPS_TLS_DTPREL_LO16 45
++#define R_MIPS_TLS_GOTTPREL 46
++#define R_MIPS_TLS_TPREL32 47
++#define R_MIPS_TLS_TPREL64 48
++#define R_MIPS_TLS_TPREL_HI16 49
++#define R_MIPS_TLS_TPREL_LO16 50
++#define R_MIPS_GLOB_DAT 51
++#define R_MIPS_COPY 126
++#define R_MIPS_JUMP_SLOT 127
++
++#define R_MIPS_NUM 128
++
++
++
++#define PT_MIPS_REGINFO 0x70000000
++#define PT_MIPS_RTPROC 0x70000001
++#define PT_MIPS_OPTIONS 0x70000002
++
++
++
++#define PF_MIPS_LOCAL 0x10000000
++
++
++
++#define DT_MIPS_RLD_VERSION 0x70000001
++#define DT_MIPS_TIME_STAMP 0x70000002
++#define DT_MIPS_ICHECKSUM 0x70000003
++#define DT_MIPS_IVERSION 0x70000004
++#define DT_MIPS_FLAGS 0x70000005
++#define DT_MIPS_BASE_ADDRESS 0x70000006
++#define DT_MIPS_MSYM 0x70000007
++#define DT_MIPS_CONFLICT 0x70000008
++#define DT_MIPS_LIBLIST 0x70000009
++#define DT_MIPS_LOCAL_GOTNO 0x7000000a
++#define DT_MIPS_CONFLICTNO 0x7000000b
++#define DT_MIPS_LIBLISTNO 0x70000010
++#define DT_MIPS_SYMTABNO 0x70000011
++#define DT_MIPS_UNREFEXTNO 0x70000012
++#define DT_MIPS_GOTSYM 0x70000013
++#define DT_MIPS_HIPAGENO 0x70000014
++#define DT_MIPS_RLD_MAP 0x70000016
++#define DT_MIPS_DELTA_CLASS 0x70000017
++#define DT_MIPS_DELTA_CLASS_NO 0x70000018
++
++#define DT_MIPS_DELTA_INSTANCE 0x70000019
++#define DT_MIPS_DELTA_INSTANCE_NO 0x7000001a
++
++#define DT_MIPS_DELTA_RELOC 0x7000001b
++#define DT_MIPS_DELTA_RELOC_NO 0x7000001c
++
++#define DT_MIPS_DELTA_SYM 0x7000001d
++
++#define DT_MIPS_DELTA_SYM_NO 0x7000001e
++
++#define DT_MIPS_DELTA_CLASSSYM 0x70000020
++
++#define DT_MIPS_DELTA_CLASSSYM_NO 0x70000021
++
++#define DT_MIPS_CXX_FLAGS 0x70000022
++#define DT_MIPS_PIXIE_INIT 0x70000023
++#define DT_MIPS_SYMBOL_LIB 0x70000024
++#define DT_MIPS_LOCALPAGE_GOTIDX 0x70000025
++#define DT_MIPS_LOCAL_GOTIDX 0x70000026
++#define DT_MIPS_HIDDEN_GOTIDX 0x70000027
++#define DT_MIPS_PROTECTED_GOTIDX 0x70000028
++#define DT_MIPS_OPTIONS 0x70000029
++#define DT_MIPS_INTERFACE 0x7000002a
++#define DT_MIPS_DYNSTR_ALIGN 0x7000002b
++#define DT_MIPS_INTERFACE_SIZE 0x7000002c
++#define DT_MIPS_RLD_TEXT_RESOLVE_ADDR 0x7000002d
++
++#define DT_MIPS_PERF_SUFFIX 0x7000002e
++
++#define DT_MIPS_COMPACT_SIZE 0x7000002f
++#define DT_MIPS_GP_VALUE 0x70000030
++#define DT_MIPS_AUX_DYNAMIC 0x70000031
++
++#define DT_MIPS_PLTGOT 0x70000032
++
++#define DT_MIPS_RWPLT 0x70000034
++#define DT_MIPS_NUM 0x35
++
++
++
++#define RHF_NONE 0
++#define RHF_QUICKSTART (1 << 0)
++#define RHF_NOTPOT (1 << 1)
++#define RHF_NO_LIBRARY_REPLACEMENT (1 << 2)
++#define RHF_NO_MOVE (1 << 3)
++#define RHF_SGI_ONLY (1 << 4)
++#define RHF_GUARANTEE_INIT (1 << 5)
++#define RHF_DELTA_C_PLUS_PLUS (1 << 6)
++#define RHF_GUARANTEE_START_INIT (1 << 7)
++#define RHF_PIXIE (1 << 8)
++#define RHF_DEFAULT_DELAY_LOAD (1 << 9)
++#define RHF_REQUICKSTART (1 << 10)
++#define RHF_REQUICKSTARTED (1 << 11)
++#define RHF_CORD (1 << 12)
++#define RHF_NO_UNRES_UNDEF (1 << 13)
++#define RHF_RLD_ORDER_SAFE (1 << 14)
++
++
++
++typedef struct
++{
++ Elf32_Word l_name;
++ Elf32_Word l_time_stamp;
++ Elf32_Word l_checksum;
++ Elf32_Word l_version;
++ Elf32_Word l_flags;
++} Elf32_Lib;
++
++typedef struct
++{
++ Elf64_Word l_name;
++ Elf64_Word l_time_stamp;
++ Elf64_Word l_checksum;
++ Elf64_Word l_version;
++ Elf64_Word l_flags;
++} Elf64_Lib;
++
++
++
++
++#define LL_NONE 0
++#define LL_EXACT_MATCH (1 << 0)
++#define LL_IGNORE_INT_VER (1 << 1)
++#define LL_REQUIRE_MINOR (1 << 2)
++#define LL_EXPORTS (1 << 3)
++#define LL_DELAY_LOAD (1 << 4)
++#define LL_DELTA (1 << 5)
++
++
++
++typedef Elf32_Addr Elf32_Conflict;
++
++
++
++
++
++
++#define EF_PARISC_TRAPNIL 0x00010000
++#define EF_PARISC_EXT 0x00020000
++#define EF_PARISC_LSB 0x00040000
++#define EF_PARISC_WIDE 0x00080000
++#define EF_PARISC_NO_KABP 0x00100000
++
++#define EF_PARISC_LAZYSWAP 0x00400000
++#define EF_PARISC_ARCH 0x0000ffff
++
++
++
++#define EFA_PARISC_1_0 0x020b
++#define EFA_PARISC_1_1 0x0210
++#define EFA_PARISC_2_0 0x0214
++
++
++
++#define SHN_PARISC_ANSI_COMMON 0xff00
++
++#define SHN_PARISC_HUGE_COMMON 0xff01
++
++
++
++#define SHT_PARISC_EXT 0x70000000
++#define SHT_PARISC_UNWIND 0x70000001
++#define SHT_PARISC_DOC 0x70000002
++
++
++
++#define SHF_PARISC_SHORT 0x20000000
++#define SHF_PARISC_HUGE 0x40000000
++#define SHF_PARISC_SBP 0x80000000
++
++
++
++#define STT_PARISC_MILLICODE 13
++
++#define STT_HP_OPAQUE (STT_LOOS + 0x1)
++#define STT_HP_STUB (STT_LOOS + 0x2)
++
++
++
++#define R_PARISC_NONE 0
++#define R_PARISC_DIR32 1
++#define R_PARISC_DIR21L 2
++#define R_PARISC_DIR17R 3
++#define R_PARISC_DIR17F 4
++#define R_PARISC_DIR14R 6
++#define R_PARISC_PCREL32 9
++#define R_PARISC_PCREL21L 10
++#define R_PARISC_PCREL17R 11
++#define R_PARISC_PCREL17F 12
++#define R_PARISC_PCREL14R 14
++#define R_PARISC_DPREL21L 18
++#define R_PARISC_DPREL14R 22
++#define R_PARISC_GPREL21L 26
++#define R_PARISC_GPREL14R 30
++#define R_PARISC_LTOFF21L 34
++#define R_PARISC_LTOFF14R 38
++#define R_PARISC_SECREL32 41
++#define R_PARISC_SEGBASE 48
++#define R_PARISC_SEGREL32 49
++#define R_PARISC_PLTOFF21L 50
++#define R_PARISC_PLTOFF14R 54
++#define R_PARISC_LTOFF_FPTR32 57
++#define R_PARISC_LTOFF_FPTR21L 58
++#define R_PARISC_LTOFF_FPTR14R 62
++#define R_PARISC_FPTR64 64
++#define R_PARISC_PLABEL32 65
++#define R_PARISC_PLABEL21L 66
++#define R_PARISC_PLABEL14R 70
++#define R_PARISC_PCREL64 72
++#define R_PARISC_PCREL22F 74
++#define R_PARISC_PCREL14WR 75
++#define R_PARISC_PCREL14DR 76
++#define R_PARISC_PCREL16F 77
++#define R_PARISC_PCREL16WF 78
++#define R_PARISC_PCREL16DF 79
++#define R_PARISC_DIR64 80
++#define R_PARISC_DIR14WR 83
++#define R_PARISC_DIR14DR 84
++#define R_PARISC_DIR16F 85
++#define R_PARISC_DIR16WF 86
++#define R_PARISC_DIR16DF 87
++#define R_PARISC_GPREL64 88
++#define R_PARISC_GPREL14WR 91
++#define R_PARISC_GPREL14DR 92
++#define R_PARISC_GPREL16F 93
++#define R_PARISC_GPREL16WF 94
++#define R_PARISC_GPREL16DF 95
++#define R_PARISC_LTOFF64 96
++#define R_PARISC_LTOFF14WR 99
++#define R_PARISC_LTOFF14DR 100
++#define R_PARISC_LTOFF16F 101
++#define R_PARISC_LTOFF16WF 102
++#define R_PARISC_LTOFF16DF 103
++#define R_PARISC_SECREL64 104
++#define R_PARISC_SEGREL64 112
++#define R_PARISC_PLTOFF14WR 115
++#define R_PARISC_PLTOFF14DR 116
++#define R_PARISC_PLTOFF16F 117
++#define R_PARISC_PLTOFF16WF 118
++#define R_PARISC_PLTOFF16DF 119
++#define R_PARISC_LTOFF_FPTR64 120
++#define R_PARISC_LTOFF_FPTR14WR 123
++#define R_PARISC_LTOFF_FPTR14DR 124
++#define R_PARISC_LTOFF_FPTR16F 125
++#define R_PARISC_LTOFF_FPTR16WF 126
++#define R_PARISC_LTOFF_FPTR16DF 127
++#define R_PARISC_LORESERVE 128
++#define R_PARISC_COPY 128
++#define R_PARISC_IPLT 129
++#define R_PARISC_EPLT 130
++#define R_PARISC_TPREL32 153
++#define R_PARISC_TPREL21L 154
++#define R_PARISC_TPREL14R 158
++#define R_PARISC_LTOFF_TP21L 162
++#define R_PARISC_LTOFF_TP14R 166
++#define R_PARISC_LTOFF_TP14F 167
++#define R_PARISC_TPREL64 216
++#define R_PARISC_TPREL14WR 219
++#define R_PARISC_TPREL14DR 220
++#define R_PARISC_TPREL16F 221
++#define R_PARISC_TPREL16WF 222
++#define R_PARISC_TPREL16DF 223
++#define R_PARISC_LTOFF_TP64 224
++#define R_PARISC_LTOFF_TP14WR 227
++#define R_PARISC_LTOFF_TP14DR 228
++#define R_PARISC_LTOFF_TP16F 229
++#define R_PARISC_LTOFF_TP16WF 230
++#define R_PARISC_LTOFF_TP16DF 231
++#define R_PARISC_GNU_VTENTRY 232
++#define R_PARISC_GNU_VTINHERIT 233
++#define R_PARISC_TLS_GD21L 234
++#define R_PARISC_TLS_GD14R 235
++#define R_PARISC_TLS_GDCALL 236
++#define R_PARISC_TLS_LDM21L 237
++#define R_PARISC_TLS_LDM14R 238
++#define R_PARISC_TLS_LDMCALL 239
++#define R_PARISC_TLS_LDO21L 240
++#define R_PARISC_TLS_LDO14R 241
++#define R_PARISC_TLS_DTPMOD32 242
++#define R_PARISC_TLS_DTPMOD64 243
++#define R_PARISC_TLS_DTPOFF32 244
++#define R_PARISC_TLS_DTPOFF64 245
++#define R_PARISC_TLS_LE21L R_PARISC_TPREL21L
++#define R_PARISC_TLS_LE14R R_PARISC_TPREL14R
++#define R_PARISC_TLS_IE21L R_PARISC_LTOFF_TP21L
++#define R_PARISC_TLS_IE14R R_PARISC_LTOFF_TP14R
++#define R_PARISC_TLS_TPREL32 R_PARISC_TPREL32
++#define R_PARISC_TLS_TPREL64 R_PARISC_TPREL64
++#define R_PARISC_HIRESERVE 255
++
++
++
++#define PT_HP_TLS (PT_LOOS + 0x0)
++#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
++#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
++#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
++#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
++#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
++#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
++#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
++#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
++#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
++#define PT_HP_PARALLEL (PT_LOOS + 0x10)
++#define PT_HP_FASTBIND (PT_LOOS + 0x11)
++#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
++#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
++#define PT_HP_STACK (PT_LOOS + 0x14)
++
++#define PT_PARISC_ARCHEXT 0x70000000
++#define PT_PARISC_UNWIND 0x70000001
++
++
++
++#define PF_PARISC_SBP 0x08000000
++
++#define PF_HP_PAGE_SIZE 0x00100000
++#define PF_HP_FAR_SHARED 0x00200000
++#define PF_HP_NEAR_SHARED 0x00400000
++#define PF_HP_CODE 0x01000000
++#define PF_HP_MODIFY 0x02000000
++#define PF_HP_LAZYSWAP 0x04000000
++#define PF_HP_SBP 0x08000000
++
++
++
++
++
++
++#define EF_ALPHA_32BIT 1
++#define EF_ALPHA_CANRELAX 2
++
++
++
++
++#define SHT_ALPHA_DEBUG 0x70000001
++#define SHT_ALPHA_REGINFO 0x70000002
++
++
++
++#define SHF_ALPHA_GPREL 0x10000000
++
++
++#define STO_ALPHA_NOPV 0x80
++#define STO_ALPHA_STD_GPLOAD 0x88
++
++
++
++#define R_ALPHA_NONE 0
++#define R_ALPHA_REFLONG 1
++#define R_ALPHA_REFQUAD 2
++#define R_ALPHA_GPREL32 3
++#define R_ALPHA_LITERAL 4
++#define R_ALPHA_LITUSE 5
++#define R_ALPHA_GPDISP 6
++#define R_ALPHA_BRADDR 7
++#define R_ALPHA_HINT 8
++#define R_ALPHA_SREL16 9
++#define R_ALPHA_SREL32 10
++#define R_ALPHA_SREL64 11
++#define R_ALPHA_GPRELHIGH 17
++#define R_ALPHA_GPRELLOW 18
++#define R_ALPHA_GPREL16 19
++#define R_ALPHA_COPY 24
++#define R_ALPHA_GLOB_DAT 25
++#define R_ALPHA_JMP_SLOT 26
++#define R_ALPHA_RELATIVE 27
++#define R_ALPHA_TLS_GD_HI 28
++#define R_ALPHA_TLSGD 29
++#define R_ALPHA_TLS_LDM 30
++#define R_ALPHA_DTPMOD64 31
++#define R_ALPHA_GOTDTPREL 32
++#define R_ALPHA_DTPREL64 33
++#define R_ALPHA_DTPRELHI 34
++#define R_ALPHA_DTPRELLO 35
++#define R_ALPHA_DTPREL16 36
++#define R_ALPHA_GOTTPREL 37
++#define R_ALPHA_TPREL64 38
++#define R_ALPHA_TPRELHI 39
++#define R_ALPHA_TPRELLO 40
++#define R_ALPHA_TPREL16 41
++
++#define R_ALPHA_NUM 46
++
++
++#define LITUSE_ALPHA_ADDR 0
++#define LITUSE_ALPHA_BASE 1
++#define LITUSE_ALPHA_BYTOFF 2
++#define LITUSE_ALPHA_JSR 3
++#define LITUSE_ALPHA_TLS_GD 4
++#define LITUSE_ALPHA_TLS_LDM 5
++
++
++#define DT_ALPHA_PLTRO (DT_LOPROC + 0)
++#define DT_ALPHA_NUM 1
++
++
++
++
++#define EF_PPC_EMB 0x80000000
++
++
++#define EF_PPC_RELOCATABLE 0x00010000
++#define EF_PPC_RELOCATABLE_LIB 0x00008000
++
++
++
++#define R_PPC_NONE 0
++#define R_PPC_ADDR32 1
++#define R_PPC_ADDR24 2
++#define R_PPC_ADDR16 3
++#define R_PPC_ADDR16_LO 4
++#define R_PPC_ADDR16_HI 5
++#define R_PPC_ADDR16_HA 6
++#define R_PPC_ADDR14 7
++#define R_PPC_ADDR14_BRTAKEN 8
++#define R_PPC_ADDR14_BRNTAKEN 9
++#define R_PPC_REL24 10
++#define R_PPC_REL14 11
++#define R_PPC_REL14_BRTAKEN 12
++#define R_PPC_REL14_BRNTAKEN 13
++#define R_PPC_GOT16 14
++#define R_PPC_GOT16_LO 15
++#define R_PPC_GOT16_HI 16
++#define R_PPC_GOT16_HA 17
++#define R_PPC_PLTREL24 18
++#define R_PPC_COPY 19
++#define R_PPC_GLOB_DAT 20
++#define R_PPC_JMP_SLOT 21
++#define R_PPC_RELATIVE 22
++#define R_PPC_LOCAL24PC 23
++#define R_PPC_UADDR32 24
++#define R_PPC_UADDR16 25
++#define R_PPC_REL32 26
++#define R_PPC_PLT32 27
++#define R_PPC_PLTREL32 28
++#define R_PPC_PLT16_LO 29
++#define R_PPC_PLT16_HI 30
++#define R_PPC_PLT16_HA 31
++#define R_PPC_SDAREL16 32
++#define R_PPC_SECTOFF 33
++#define R_PPC_SECTOFF_LO 34
++#define R_PPC_SECTOFF_HI 35
++#define R_PPC_SECTOFF_HA 36
++
++
++#define R_PPC_TLS 67
++#define R_PPC_DTPMOD32 68
++#define R_PPC_TPREL16 69
++#define R_PPC_TPREL16_LO 70
++#define R_PPC_TPREL16_HI 71
++#define R_PPC_TPREL16_HA 72
++#define R_PPC_TPREL32 73
++#define R_PPC_DTPREL16 74
++#define R_PPC_DTPREL16_LO 75
++#define R_PPC_DTPREL16_HI 76
++#define R_PPC_DTPREL16_HA 77
++#define R_PPC_DTPREL32 78
++#define R_PPC_GOT_TLSGD16 79
++#define R_PPC_GOT_TLSGD16_LO 80
++#define R_PPC_GOT_TLSGD16_HI 81
++#define R_PPC_GOT_TLSGD16_HA 82
++#define R_PPC_GOT_TLSLD16 83
++#define R_PPC_GOT_TLSLD16_LO 84
++#define R_PPC_GOT_TLSLD16_HI 85
++#define R_PPC_GOT_TLSLD16_HA 86
++#define R_PPC_GOT_TPREL16 87
++#define R_PPC_GOT_TPREL16_LO 88
++#define R_PPC_GOT_TPREL16_HI 89
++#define R_PPC_GOT_TPREL16_HA 90
++#define R_PPC_GOT_DTPREL16 91
++#define R_PPC_GOT_DTPREL16_LO 92
++#define R_PPC_GOT_DTPREL16_HI 93
++#define R_PPC_GOT_DTPREL16_HA 94
++
++
++
++#define R_PPC_EMB_NADDR32 101
++#define R_PPC_EMB_NADDR16 102
++#define R_PPC_EMB_NADDR16_LO 103
++#define R_PPC_EMB_NADDR16_HI 104
++#define R_PPC_EMB_NADDR16_HA 105
++#define R_PPC_EMB_SDAI16 106
++#define R_PPC_EMB_SDA2I16 107
++#define R_PPC_EMB_SDA2REL 108
++#define R_PPC_EMB_SDA21 109
++#define R_PPC_EMB_MRKREF 110
++#define R_PPC_EMB_RELSEC16 111
++#define R_PPC_EMB_RELST_LO 112
++#define R_PPC_EMB_RELST_HI 113
++#define R_PPC_EMB_RELST_HA 114
++#define R_PPC_EMB_BIT_FLD 115
++#define R_PPC_EMB_RELSDA 116
++
++
++#define R_PPC_DIAB_SDA21_LO 180
++#define R_PPC_DIAB_SDA21_HI 181
++#define R_PPC_DIAB_SDA21_HA 182
++#define R_PPC_DIAB_RELSDA_LO 183
++#define R_PPC_DIAB_RELSDA_HI 184
++#define R_PPC_DIAB_RELSDA_HA 185
++
++
++#define R_PPC_IRELATIVE 248
++
++
++#define R_PPC_REL16 249
++#define R_PPC_REL16_LO 250
++#define R_PPC_REL16_HI 251
++#define R_PPC_REL16_HA 252
++
++
++
++#define R_PPC_TOC16 255
++
++
++#define DT_PPC_GOT (DT_LOPROC + 0)
++#define DT_PPC_NUM 1
++
++
++#define R_PPC64_NONE R_PPC_NONE
++#define R_PPC64_ADDR32 R_PPC_ADDR32
++#define R_PPC64_ADDR24 R_PPC_ADDR24
++#define R_PPC64_ADDR16 R_PPC_ADDR16
++#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO
++#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI
++#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA
++#define R_PPC64_ADDR14 R_PPC_ADDR14
++#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN
++#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN
++#define R_PPC64_REL24 R_PPC_REL24
++#define R_PPC64_REL14 R_PPC_REL14
++#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN
++#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN
++#define R_PPC64_GOT16 R_PPC_GOT16
++#define R_PPC64_GOT16_LO R_PPC_GOT16_LO
++#define R_PPC64_GOT16_HI R_PPC_GOT16_HI
++#define R_PPC64_GOT16_HA R_PPC_GOT16_HA
++
++#define R_PPC64_COPY R_PPC_COPY
++#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT
++#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT
++#define R_PPC64_RELATIVE R_PPC_RELATIVE
++
++#define R_PPC64_UADDR32 R_PPC_UADDR32
++#define R_PPC64_UADDR16 R_PPC_UADDR16
++#define R_PPC64_REL32 R_PPC_REL32
++#define R_PPC64_PLT32 R_PPC_PLT32
++#define R_PPC64_PLTREL32 R_PPC_PLTREL32
++#define R_PPC64_PLT16_LO R_PPC_PLT16_LO
++#define R_PPC64_PLT16_HI R_PPC_PLT16_HI
++#define R_PPC64_PLT16_HA R_PPC_PLT16_HA
++
++#define R_PPC64_SECTOFF R_PPC_SECTOFF
++#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO
++#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI
++#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA
++#define R_PPC64_ADDR30 37
++#define R_PPC64_ADDR64 38
++#define R_PPC64_ADDR16_HIGHER 39
++#define R_PPC64_ADDR16_HIGHERA 40
++#define R_PPC64_ADDR16_HIGHEST 41
++#define R_PPC64_ADDR16_HIGHESTA 42
++#define R_PPC64_UADDR64 43
++#define R_PPC64_REL64 44
++#define R_PPC64_PLT64 45
++#define R_PPC64_PLTREL64 46
++#define R_PPC64_TOC16 47
++#define R_PPC64_TOC16_LO 48
++#define R_PPC64_TOC16_HI 49
++#define R_PPC64_TOC16_HA 50
++#define R_PPC64_TOC 51
++#define R_PPC64_PLTGOT16 52
++#define R_PPC64_PLTGOT16_LO 53
++#define R_PPC64_PLTGOT16_HI 54
++#define R_PPC64_PLTGOT16_HA 55
++
++#define R_PPC64_ADDR16_DS 56
++#define R_PPC64_ADDR16_LO_DS 57
++#define R_PPC64_GOT16_DS 58
++#define R_PPC64_GOT16_LO_DS 59
++#define R_PPC64_PLT16_LO_DS 60
++#define R_PPC64_SECTOFF_DS 61
++#define R_PPC64_SECTOFF_LO_DS 62
++#define R_PPC64_TOC16_DS 63
++#define R_PPC64_TOC16_LO_DS 64
++#define R_PPC64_PLTGOT16_DS 65
++#define R_PPC64_PLTGOT16_LO_DS 66
++
++
++#define R_PPC64_TLS 67
++#define R_PPC64_DTPMOD64 68
++#define R_PPC64_TPREL16 69
++#define R_PPC64_TPREL16_LO 70
++#define R_PPC64_TPREL16_HI 71
++#define R_PPC64_TPREL16_HA 72
++#define R_PPC64_TPREL64 73
++#define R_PPC64_DTPREL16 74
++#define R_PPC64_DTPREL16_LO 75
++#define R_PPC64_DTPREL16_HI 76
++#define R_PPC64_DTPREL16_HA 77
++#define R_PPC64_DTPREL64 78
++#define R_PPC64_GOT_TLSGD16 79
++#define R_PPC64_GOT_TLSGD16_LO 80
++#define R_PPC64_GOT_TLSGD16_HI 81
++#define R_PPC64_GOT_TLSGD16_HA 82
++#define R_PPC64_GOT_TLSLD16 83
++#define R_PPC64_GOT_TLSLD16_LO 84
++#define R_PPC64_GOT_TLSLD16_HI 85
++#define R_PPC64_GOT_TLSLD16_HA 86
++#define R_PPC64_GOT_TPREL16_DS 87
++#define R_PPC64_GOT_TPREL16_LO_DS 88
++#define R_PPC64_GOT_TPREL16_HI 89
++#define R_PPC64_GOT_TPREL16_HA 90
++#define R_PPC64_GOT_DTPREL16_DS 91
++#define R_PPC64_GOT_DTPREL16_LO_DS 92
++#define R_PPC64_GOT_DTPREL16_HI 93
++#define R_PPC64_GOT_DTPREL16_HA 94
++#define R_PPC64_TPREL16_DS 95
++#define R_PPC64_TPREL16_LO_DS 96
++#define R_PPC64_TPREL16_HIGHER 97
++#define R_PPC64_TPREL16_HIGHERA 98
++#define R_PPC64_TPREL16_HIGHEST 99
++#define R_PPC64_TPREL16_HIGHESTA 100
++#define R_PPC64_DTPREL16_DS 101
++#define R_PPC64_DTPREL16_LO_DS 102
++#define R_PPC64_DTPREL16_HIGHER 103
++#define R_PPC64_DTPREL16_HIGHERA 104
++#define R_PPC64_DTPREL16_HIGHEST 105
++#define R_PPC64_DTPREL16_HIGHESTA 106
++
++
++#define R_PPC64_JMP_IREL 247
++#define R_PPC64_IRELATIVE 248
++#define R_PPC64_REL16 249
++#define R_PPC64_REL16_LO 250
++#define R_PPC64_REL16_HI 251
++#define R_PPC64_REL16_HA 252
++
++
++#define DT_PPC64_GLINK (DT_LOPROC + 0)
++#define DT_PPC64_OPD (DT_LOPROC + 1)
++#define DT_PPC64_OPDSZ (DT_LOPROC + 2)
++#define DT_PPC64_NUM 3
++
++
++
++
++
++#define EF_ARM_RELEXEC 0x01
++#define EF_ARM_HASENTRY 0x02
++#define EF_ARM_INTERWORK 0x04
++#define EF_ARM_APCS_26 0x08
++#define EF_ARM_APCS_FLOAT 0x10
++#define EF_ARM_PIC 0x20
++#define EF_ARM_ALIGN8 0x40
++#define EF_ARM_NEW_ABI 0x80
++#define EF_ARM_OLD_ABI 0x100
++#define EF_ARM_SOFT_FLOAT 0x200
++#define EF_ARM_VFP_FLOAT 0x400
++#define EF_ARM_MAVERICK_FLOAT 0x800
++
++#define EF_ARM_ABI_FLOAT_SOFT 0x200
++#define EF_ARM_ABI_FLOAT_HARD 0x400
++
++
++#define EF_ARM_SYMSARESORTED 0x04
++#define EF_ARM_DYNSYMSUSESEGIDX 0x08
++#define EF_ARM_MAPSYMSFIRST 0x10
++#define EF_ARM_EABIMASK 0XFF000000
++
++
++#define EF_ARM_BE8 0x00800000
++#define EF_ARM_LE8 0x00400000
++
++#define EF_ARM_EABI_VERSION(flags) ((flags) & EF_ARM_EABIMASK)
++#define EF_ARM_EABI_UNKNOWN 0x00000000
++#define EF_ARM_EABI_VER1 0x01000000
++#define EF_ARM_EABI_VER2 0x02000000
++#define EF_ARM_EABI_VER3 0x03000000
++#define EF_ARM_EABI_VER4 0x04000000
++#define EF_ARM_EABI_VER5 0x05000000
++
++
++#define STT_ARM_TFUNC STT_LOPROC
++#define STT_ARM_16BIT STT_HIPROC
++
++
++#define SHF_ARM_ENTRYSECT 0x10000000
++#define SHF_ARM_COMDEF 0x80000000
++
++
++
++#define PF_ARM_SB 0x10000000
++
++#define PF_ARM_PI 0x20000000
++#define PF_ARM_ABS 0x40000000
++
++
++#define PT_ARM_EXIDX (PT_LOPROC + 1)
++
++
++#define SHT_ARM_EXIDX (SHT_LOPROC + 1)
++#define SHT_ARM_PREEMPTMAP (SHT_LOPROC + 2)
++#define SHT_ARM_ATTRIBUTES (SHT_LOPROC + 3)
++
++
++#define R_AARCH64_NONE 0
++#define R_AARCH64_ABS64 257
++#define R_AARCH64_ABS32 258
++#define R_AARCH64_COPY 1024
++#define R_AARCH64_GLOB_DAT 1025
++#define R_AARCH64_JUMP_SLOT 1026
++#define R_AARCH64_RELATIVE 1027
++#define R_AARCH64_TLS_DTPMOD64 1028
++#define R_AARCH64_TLS_DTPREL64 1029
++#define R_AARCH64_TLS_TPREL64 1030
++#define R_AARCH64_TLSDESC 1031
++
++
++#define R_ARM_NONE 0
++#define R_ARM_PC24 1
++#define R_ARM_ABS32 2
++#define R_ARM_REL32 3
++#define R_ARM_PC13 4
++#define R_ARM_ABS16 5
++#define R_ARM_ABS12 6
++#define R_ARM_THM_ABS5 7
++#define R_ARM_ABS8 8
++#define R_ARM_SBREL32 9
++#define R_ARM_THM_PC22 10
++#define R_ARM_THM_PC8 11
++#define R_ARM_AMP_VCALL9 12
++#define R_ARM_TLS_DESC 13
++#define R_ARM_THM_SWI8 14
++#define R_ARM_XPC25 15
++#define R_ARM_THM_XPC22 16
++#define R_ARM_TLS_DTPMOD32 17
++#define R_ARM_TLS_DTPOFF32 18
++#define R_ARM_TLS_TPOFF32 19
++#define R_ARM_COPY 20
++#define R_ARM_GLOB_DAT 21
++#define R_ARM_JUMP_SLOT 22
++#define R_ARM_RELATIVE 23
++#define R_ARM_GOTOFF 24
++#define R_ARM_GOTPC 25
++#define R_ARM_GOT32 26
++#define R_ARM_PLT32 27
++#define R_ARM_CALL 28
++#define R_ARM_JUMP24 29
++#define R_ARM_THM_JUMP24 30
++#define R_ARM_BASE_ABS 31
++#define R_ARM_ALU_PCREL_7_0 32
++#define R_ARM_ALU_PCREL_15_8 33
++#define R_ARM_ALU_PCREL_23_15 34
++#define R_ARM_LDR_SBREL_11_0 35
++#define R_ARM_ALU_SBREL_19_12 36
++#define R_ARM_ALU_SBREL_27_20 37
++#define R_ARM_TARGET1 38
++#define R_ARM_SBREL31 39
++#define R_ARM_V4BX 40
++#define R_ARM_TARGET2 41
++#define R_ARM_PREL31 42
++#define R_ARM_MOVW_ABS_NC 43
++#define R_ARM_MOVT_ABS 44
++#define R_ARM_MOVW_PREL_NC 45
++#define R_ARM_MOVT_PREL 46
++#define R_ARM_THM_MOVW_ABS_NC 47
++#define R_ARM_THM_MOVT_ABS 48
++#define R_ARM_THM_MOVW_PREL_NC 49
++#define R_ARM_THM_MOVT_PREL 50
++#define R_ARM_THM_JUMP19 51
++#define R_ARM_THM_JUMP6 52
++#define R_ARM_THM_ALU_PREL_11_0 53
++#define R_ARM_THM_PC12 54
++#define R_ARM_ABS32_NOI 55
++#define R_ARM_REL32_NOI 56
++#define R_ARM_ALU_PC_G0_NC 57
++#define R_ARM_ALU_PC_G0 58
++#define R_ARM_ALU_PC_G1_NC 59
++#define R_ARM_ALU_PC_G1 60
++#define R_ARM_ALU_PC_G2 61
++#define R_ARM_LDR_PC_G1 62
++#define R_ARM_LDR_PC_G2 63
++#define R_ARM_LDRS_PC_G0 64
++#define R_ARM_LDRS_PC_G1 65
++#define R_ARM_LDRS_PC_G2 66
++#define R_ARM_LDC_PC_G0 67
++#define R_ARM_LDC_PC_G1 68
++#define R_ARM_LDC_PC_G2 69
++#define R_ARM_ALU_SB_G0_NC 70
++#define R_ARM_ALU_SB_G0 71
++#define R_ARM_ALU_SB_G1_NC 72
++#define R_ARM_ALU_SB_G1 73
++#define R_ARM_ALU_SB_G2 74
++#define R_ARM_LDR_SB_G0 75
++#define R_ARM_LDR_SB_G1 76
++#define R_ARM_LDR_SB_G2 77
++#define R_ARM_LDRS_SB_G0 78
++#define R_ARM_LDRS_SB_G1 79
++#define R_ARM_LDRS_SB_G2 80
++#define R_ARM_LDC_SB_G0 81
++#define R_ARM_LDC_SB_G1 82
++#define R_ARM_LDC_SB_G2 83
++#define R_ARM_MOVW_BREL_NC 84
++#define R_ARM_MOVT_BREL 85
++#define R_ARM_MOVW_BREL 86
++#define R_ARM_THM_MOVW_BREL_NC 87
++#define R_ARM_THM_MOVT_BREL 88
++#define R_ARM_THM_MOVW_BREL 89
++#define R_ARM_TLS_GOTDESC 90
++#define R_ARM_TLS_CALL 91
++#define R_ARM_TLS_DESCSEQ 92
++#define R_ARM_THM_TLS_CALL 93
++#define R_ARM_PLT32_ABS 94
++#define R_ARM_GOT_ABS 95
++#define R_ARM_GOT_PREL 96
++#define R_ARM_GOT_BREL12 97
++#define R_ARM_GOTOFF12 98
++#define R_ARM_GOTRELAX 99
++#define R_ARM_GNU_VTENTRY 100
++#define R_ARM_GNU_VTINHERIT 101
++#define R_ARM_THM_PC11 102
++#define R_ARM_THM_PC9 103
++#define R_ARM_TLS_GD32 104
++
++#define R_ARM_TLS_LDM32 105
++
++#define R_ARM_TLS_LDO32 106
++
++#define R_ARM_TLS_IE32 107
++
++#define R_ARM_TLS_LE32 108
++#define R_ARM_TLS_LDO12 109
++#define R_ARM_TLS_LE12 110
++#define R_ARM_TLS_IE12GP 111
++#define R_ARM_ME_TOO 128
++#define R_ARM_THM_TLS_DESCSEQ 129
++#define R_ARM_THM_TLS_DESCSEQ16 129
++#define R_ARM_THM_TLS_DESCSEQ32 130
++#define R_ARM_THM_GOT_BREL12 131
++#define R_ARM_IRELATIVE 160
++#define R_ARM_RXPC25 249
++#define R_ARM_RSBREL32 250
++#define R_ARM_THM_RPC22 251
++#define R_ARM_RREL32 252
++#define R_ARM_RABS22 253
++#define R_ARM_RPC24 254
++#define R_ARM_RBASE 255
++
++#define R_ARM_NUM 256
++
++
++
++
++#define EF_IA_64_MASKOS 0x0000000f
++#define EF_IA_64_ABI64 0x00000010
++#define EF_IA_64_ARCH 0xff000000
++
++
++#define PT_IA_64_ARCHEXT (PT_LOPROC + 0)
++#define PT_IA_64_UNWIND (PT_LOPROC + 1)
++#define PT_IA_64_HP_OPT_ANOT (PT_LOOS + 0x12)
++#define PT_IA_64_HP_HSL_ANOT (PT_LOOS + 0x13)
++#define PT_IA_64_HP_STACK (PT_LOOS + 0x14)
++
++
++#define PF_IA_64_NORECOV 0x80000000
++
++
++#define SHT_IA_64_EXT (SHT_LOPROC + 0)
++#define SHT_IA_64_UNWIND (SHT_LOPROC + 1)
++
++
++#define SHF_IA_64_SHORT 0x10000000
++#define SHF_IA_64_NORECOV 0x20000000
++
++
++#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0)
++#define DT_IA_64_NUM 1
++
++
++#define R_IA64_NONE 0x00
++#define R_IA64_IMM14 0x21
++#define R_IA64_IMM22 0x22
++#define R_IA64_IMM64 0x23
++#define R_IA64_DIR32MSB 0x24
++#define R_IA64_DIR32LSB 0x25
++#define R_IA64_DIR64MSB 0x26
++#define R_IA64_DIR64LSB 0x27
++#define R_IA64_GPREL22 0x2a
++#define R_IA64_GPREL64I 0x2b
++#define R_IA64_GPREL32MSB 0x2c
++#define R_IA64_GPREL32LSB 0x2d
++#define R_IA64_GPREL64MSB 0x2e
++#define R_IA64_GPREL64LSB 0x2f
++#define R_IA64_LTOFF22 0x32
++#define R_IA64_LTOFF64I 0x33
++#define R_IA64_PLTOFF22 0x3a
++#define R_IA64_PLTOFF64I 0x3b
++#define R_IA64_PLTOFF64MSB 0x3e
++#define R_IA64_PLTOFF64LSB 0x3f
++#define R_IA64_FPTR64I 0x43
++#define R_IA64_FPTR32MSB 0x44
++#define R_IA64_FPTR32LSB 0x45
++#define R_IA64_FPTR64MSB 0x46
++#define R_IA64_FPTR64LSB 0x47
++#define R_IA64_PCREL60B 0x48
++#define R_IA64_PCREL21B 0x49
++#define R_IA64_PCREL21M 0x4a
++#define R_IA64_PCREL21F 0x4b
++#define R_IA64_PCREL32MSB 0x4c
++#define R_IA64_PCREL32LSB 0x4d
++#define R_IA64_PCREL64MSB 0x4e
++#define R_IA64_PCREL64LSB 0x4f
++#define R_IA64_LTOFF_FPTR22 0x52
++#define R_IA64_LTOFF_FPTR64I 0x53
++#define R_IA64_LTOFF_FPTR32MSB 0x54
++#define R_IA64_LTOFF_FPTR32LSB 0x55
++#define R_IA64_LTOFF_FPTR64MSB 0x56
++#define R_IA64_LTOFF_FPTR64LSB 0x57
++#define R_IA64_SEGREL32MSB 0x5c
++#define R_IA64_SEGREL32LSB 0x5d
++#define R_IA64_SEGREL64MSB 0x5e
++#define R_IA64_SEGREL64LSB 0x5f
++#define R_IA64_SECREL32MSB 0x64
++#define R_IA64_SECREL32LSB 0x65
++#define R_IA64_SECREL64MSB 0x66
++#define R_IA64_SECREL64LSB 0x67
++#define R_IA64_REL32MSB 0x6c
++#define R_IA64_REL32LSB 0x6d
++#define R_IA64_REL64MSB 0x6e
++#define R_IA64_REL64LSB 0x6f
++#define R_IA64_LTV32MSB 0x74
++#define R_IA64_LTV32LSB 0x75
++#define R_IA64_LTV64MSB 0x76
++#define R_IA64_LTV64LSB 0x77
++#define R_IA64_PCREL21BI 0x79
++#define R_IA64_PCREL22 0x7a
++#define R_IA64_PCREL64I 0x7b
++#define R_IA64_IPLTMSB 0x80
++#define R_IA64_IPLTLSB 0x81
++#define R_IA64_COPY 0x84
++#define R_IA64_SUB 0x85
++#define R_IA64_LTOFF22X 0x86
++#define R_IA64_LDXMOV 0x87
++#define R_IA64_TPREL14 0x91
++#define R_IA64_TPREL22 0x92
++#define R_IA64_TPREL64I 0x93
++#define R_IA64_TPREL64MSB 0x96
++#define R_IA64_TPREL64LSB 0x97
++#define R_IA64_LTOFF_TPREL22 0x9a
++#define R_IA64_DTPMOD64MSB 0xa6
++#define R_IA64_DTPMOD64LSB 0xa7
++#define R_IA64_LTOFF_DTPMOD22 0xaa
++#define R_IA64_DTPREL14 0xb1
++#define R_IA64_DTPREL22 0xb2
++#define R_IA64_DTPREL64I 0xb3
++#define R_IA64_DTPREL32MSB 0xb4
++#define R_IA64_DTPREL32LSB 0xb5
++#define R_IA64_DTPREL64MSB 0xb6
++#define R_IA64_DTPREL64LSB 0xb7
++#define R_IA64_LTOFF_DTPREL22 0xba
++
++
++
++
++#define R_SH_NONE 0
++#define R_SH_DIR32 1
++#define R_SH_REL32 2
++#define R_SH_DIR8WPN 3
++#define R_SH_IND12W 4
++#define R_SH_DIR8WPL 5
++#define R_SH_DIR8WPZ 6
++#define R_SH_DIR8BP 7
++#define R_SH_DIR8W 8
++#define R_SH_DIR8L 9
++#define R_SH_SWITCH16 25
++#define R_SH_SWITCH32 26
++#define R_SH_USES 27
++#define R_SH_COUNT 28
++#define R_SH_ALIGN 29
++#define R_SH_CODE 30
++#define R_SH_DATA 31
++#define R_SH_LABEL 32
++#define R_SH_SWITCH8 33
++#define R_SH_GNU_VTINHERIT 34
++#define R_SH_GNU_VTENTRY 35
++#define R_SH_TLS_GD_32 144
++#define R_SH_TLS_LD_32 145
++#define R_SH_TLS_LDO_32 146
++#define R_SH_TLS_IE_32 147
++#define R_SH_TLS_LE_32 148
++#define R_SH_TLS_DTPMOD32 149
++#define R_SH_TLS_DTPOFF32 150
++#define R_SH_TLS_TPOFF32 151
++#define R_SH_GOT32 160
++#define R_SH_PLT32 161
++#define R_SH_COPY 162
++#define R_SH_GLOB_DAT 163
++#define R_SH_JMP_SLOT 164
++#define R_SH_RELATIVE 165
++#define R_SH_GOTOFF 166
++#define R_SH_GOTPC 167
++
++#define R_SH_NUM 256
++
++
++
++#define R_390_NONE 0
++#define R_390_8 1
++#define R_390_12 2
++#define R_390_16 3
++#define R_390_32 4
++#define R_390_PC32 5
++#define R_390_GOT12 6
++#define R_390_GOT32 7
++#define R_390_PLT32 8
++#define R_390_COPY 9
++#define R_390_GLOB_DAT 10
++#define R_390_JMP_SLOT 11
++#define R_390_RELATIVE 12
++#define R_390_GOTOFF32 13
++#define R_390_GOTPC 14
++#define R_390_GOT16 15
++#define R_390_PC16 16
++#define R_390_PC16DBL 17
++#define R_390_PLT16DBL 18
++#define R_390_PC32DBL 19
++#define R_390_PLT32DBL 20
++#define R_390_GOTPCDBL 21
++#define R_390_64 22
++#define R_390_PC64 23
++#define R_390_GOT64 24
++#define R_390_PLT64 25
++#define R_390_GOTENT 26
++#define R_390_GOTOFF16 27
++#define R_390_GOTOFF64 28
++#define R_390_GOTPLT12 29
++#define R_390_GOTPLT16 30
++#define R_390_GOTPLT32 31
++#define R_390_GOTPLT64 32
++#define R_390_GOTPLTENT 33
++#define R_390_PLTOFF16 34
++#define R_390_PLTOFF32 35
++#define R_390_PLTOFF64 36
++#define R_390_TLS_LOAD 37
++#define R_390_TLS_GDCALL 38
++
++#define R_390_TLS_LDCALL 39
++
++#define R_390_TLS_GD32 40
++
++#define R_390_TLS_GD64 41
++
++#define R_390_TLS_GOTIE12 42
++
++#define R_390_TLS_GOTIE32 43
++
++#define R_390_TLS_GOTIE64 44
++
++#define R_390_TLS_LDM32 45
++
++#define R_390_TLS_LDM64 46
++
++#define R_390_TLS_IE32 47
++
++#define R_390_TLS_IE64 48
++
++#define R_390_TLS_IEENT 49
++
++#define R_390_TLS_LE32 50
++
++#define R_390_TLS_LE64 51
++
++#define R_390_TLS_LDO32 52
++
++#define R_390_TLS_LDO64 53
++
++#define R_390_TLS_DTPMOD 54
++#define R_390_TLS_DTPOFF 55
++#define R_390_TLS_TPOFF 56
++
++#define R_390_20 57
++#define R_390_GOT20 58
++#define R_390_GOTPLT20 59
++#define R_390_TLS_GOTIE20 60
++
++
++#define R_390_NUM 61
++
++
++
++#define R_CRIS_NONE 0
++#define R_CRIS_8 1
++#define R_CRIS_16 2
++#define R_CRIS_32 3
++#define R_CRIS_8_PCREL 4
++#define R_CRIS_16_PCREL 5
++#define R_CRIS_32_PCREL 6
++#define R_CRIS_GNU_VTINHERIT 7
++#define R_CRIS_GNU_VTENTRY 8
++#define R_CRIS_COPY 9
++#define R_CRIS_GLOB_DAT 10
++#define R_CRIS_JUMP_SLOT 11
++#define R_CRIS_RELATIVE 12
++#define R_CRIS_16_GOT 13
++#define R_CRIS_32_GOT 14
++#define R_CRIS_16_GOTPLT 15
++#define R_CRIS_32_GOTPLT 16
++#define R_CRIS_32_GOTREL 17
++#define R_CRIS_32_PLT_GOTREL 18
++#define R_CRIS_32_PLT_PCREL 19
++
++#define R_CRIS_NUM 20
++
++
++
++#define R_X86_64_NONE 0
++#define R_X86_64_64 1
++#define R_X86_64_PC32 2
++#define R_X86_64_GOT32 3
++#define R_X86_64_PLT32 4
++#define R_X86_64_COPY 5
++#define R_X86_64_GLOB_DAT 6
++#define R_X86_64_JUMP_SLOT 7
++#define R_X86_64_RELATIVE 8
++#define R_X86_64_GOTPCREL 9
++
++#define R_X86_64_32 10
++#define R_X86_64_32S 11
++#define R_X86_64_16 12
++#define R_X86_64_PC16 13
++#define R_X86_64_8 14
++#define R_X86_64_PC8 15
++#define R_X86_64_DTPMOD64 16
++#define R_X86_64_DTPOFF64 17
++#define R_X86_64_TPOFF64 18
++#define R_X86_64_TLSGD 19
++
++#define R_X86_64_TLSLD 20
++
++#define R_X86_64_DTPOFF32 21
++#define R_X86_64_GOTTPOFF 22
++
++#define R_X86_64_TPOFF32 23
++#define R_X86_64_PC64 24
++#define R_X86_64_GOTOFF64 25
++#define R_X86_64_GOTPC32 26
++#define R_X86_64_GOT64 27
++#define R_X86_64_GOTPCREL64 28
++#define R_X86_64_GOTPC64 29
++#define R_X86_64_GOTPLT64 30
++#define R_X86_64_PLTOFF64 31
++#define R_X86_64_SIZE32 32
++#define R_X86_64_SIZE64 33
++
++#define R_X86_64_GOTPC32_TLSDESC 34
++#define R_X86_64_TLSDESC_CALL 35
++
++#define R_X86_64_TLSDESC 36
++#define R_X86_64_IRELATIVE 37
++#define R_X86_64_RELATIVE64 38
++#define R_X86_64_NUM 39
++
++
++
++#define R_MN10300_NONE 0
++#define R_MN10300_32 1
++#define R_MN10300_16 2
++#define R_MN10300_8 3
++#define R_MN10300_PCREL32 4
++#define R_MN10300_PCREL16 5
++#define R_MN10300_PCREL8 6
++#define R_MN10300_GNU_VTINHERIT 7
++#define R_MN10300_GNU_VTENTRY 8
++#define R_MN10300_24 9
++#define R_MN10300_GOTPC32 10
++#define R_MN10300_GOTPC16 11
++#define R_MN10300_GOTOFF32 12
++#define R_MN10300_GOTOFF24 13
++#define R_MN10300_GOTOFF16 14
++#define R_MN10300_PLT32 15
++#define R_MN10300_PLT16 16
++#define R_MN10300_GOT32 17
++#define R_MN10300_GOT24 18
++#define R_MN10300_GOT16 19
++#define R_MN10300_COPY 20
++#define R_MN10300_GLOB_DAT 21
++#define R_MN10300_JMP_SLOT 22
++#define R_MN10300_RELATIVE 23
++
++#define R_MN10300_NUM 24
++
++
++
++#define R_M32R_NONE 0
++#define R_M32R_16 1
++#define R_M32R_32 2
++#define R_M32R_24 3
++#define R_M32R_10_PCREL 4
++#define R_M32R_18_PCREL 5
++#define R_M32R_26_PCREL 6
++#define R_M32R_HI16_ULO 7
++#define R_M32R_HI16_SLO 8
++#define R_M32R_LO16 9
++#define R_M32R_SDA16 10
++#define R_M32R_GNU_VTINHERIT 11
++#define R_M32R_GNU_VTENTRY 12
++
++#define R_M32R_16_RELA 33
++#define R_M32R_32_RELA 34
++#define R_M32R_24_RELA 35
++#define R_M32R_10_PCREL_RELA 36
++#define R_M32R_18_PCREL_RELA 37
++#define R_M32R_26_PCREL_RELA 38
++#define R_M32R_HI16_ULO_RELA 39
++#define R_M32R_HI16_SLO_RELA 40
++#define R_M32R_LO16_RELA 41
++#define R_M32R_SDA16_RELA 42
++#define R_M32R_RELA_GNU_VTINHERIT 43
++#define R_M32R_RELA_GNU_VTENTRY 44
++#define R_M32R_REL32 45
++
++#define R_M32R_GOT24 48
++#define R_M32R_26_PLTREL 49
++#define R_M32R_COPY 50
++#define R_M32R_GLOB_DAT 51
++#define R_M32R_JMP_SLOT 52
++#define R_M32R_RELATIVE 53
++#define R_M32R_GOTOFF 54
++#define R_M32R_GOTPC24 55
++#define R_M32R_GOT16_HI_ULO 56
++
++#define R_M32R_GOT16_HI_SLO 57
++
++#define R_M32R_GOT16_LO 58
++#define R_M32R_GOTPC_HI_ULO 59
++
++#define R_M32R_GOTPC_HI_SLO 60
++
++#define R_M32R_GOTPC_LO 61
++
++#define R_M32R_GOTOFF_HI_ULO 62
++
++#define R_M32R_GOTOFF_HI_SLO 63
++
++#define R_M32R_GOTOFF_LO 64
++#define R_M32R_NUM 256
++
++#define R_MICROBLAZE_NONE 0
++#define R_MICROBLAZE_32 1
++#define R_MICROBLAZE_32_PCREL 2
++#define R_MICROBLAZE_64_PCREL 3
++#define R_MICROBLAZE_32_PCREL_LO 4
++#define R_MICROBLAZE_64 5
++#define R_MICROBLAZE_32_LO 6
++#define R_MICROBLAZE_SRO32 7
++#define R_MICROBLAZE_SRW32 8
++#define R_MICROBLAZE_64_NONE 9
++#define R_MICROBLAZE_32_SYM_OP_SYM 10
++#define R_MICROBLAZE_GNU_VTINHERIT 11
++#define R_MICROBLAZE_GNU_VTENTRY 12
++#define R_MICROBLAZE_GOTPC_64 13
++#define R_MICROBLAZE_GOT_64 14
++#define R_MICROBLAZE_PLT_64 15
++#define R_MICROBLAZE_REL 16
++#define R_MICROBLAZE_JUMP_SLOT 17
++#define R_MICROBLAZE_GLOB_DAT 18
++#define R_MICROBLAZE_GOTOFF_64 19
++#define R_MICROBLAZE_GOTOFF_32 20
++#define R_MICROBLAZE_COPY 21
++#define R_MICROBLAZE_TLS 22
++#define R_MICROBLAZE_TLSGD 23
++#define R_MICROBLAZE_TLSLD 24
++#define R_MICROBLAZE_TLSDTPMOD32 25
++#define R_MICROBLAZE_TLSDTPREL32 26
++#define R_MICROBLAZE_TLSDTPREL64 27
++#define R_MICROBLAZE_TLSGOTTPREL32 28
++#define R_MICROBLAZE_TLSTPREL32 29
++
++#ifdef __cplusplus
++}
++#endif
++
++
++#endif
diff --git a/target/linux/patches/3.18.9/sgidefs.patch b/target/linux/patches/3.18.9/sgidefs.patch
new file mode 100644
index 000000000..f00a284d9
--- /dev/null
+++ b/target/linux/patches/3.18.9/sgidefs.patch
@@ -0,0 +1,18 @@
+diff -Nur linux-3.11.5.orig/arch/mips/include/uapi/asm/sgidefs.h linux-3.11.5/arch/mips/include/uapi/asm/sgidefs.h
+--- linux-3.11.5.orig/arch/mips/include/uapi/asm/sgidefs.h 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/arch/mips/include/uapi/asm/sgidefs.h 2013-11-08 22:01:28.000000000 +0100
+@@ -11,14 +11,6 @@
+ #define __ASM_SGIDEFS_H
+
+ /*
+- * Using a Linux compiler for building Linux seems logic but not to
+- * everybody.
+- */
+-#ifndef __linux__
+-#error Use a Linux compiler or give up.
+-#endif
+-
+-/*
+ * Definitions for the ISA levels
+ *
+ * With the introduction of MIPS32 / MIPS64 instruction sets definitions
diff --git a/target/linux/patches/3.18.9/sortext.patch b/target/linux/patches/3.18.9/sortext.patch
new file mode 100644
index 000000000..8fd4e1d6b
--- /dev/null
+++ b/target/linux/patches/3.18.9/sortext.patch
@@ -0,0 +1,33 @@
+diff -Nur linux-3.12.6.orig/arch/arm/Kconfig linux-3.12.6/arch/arm/Kconfig
+--- linux-3.12.6.orig/arch/arm/Kconfig 2013-12-20 16:51:33.000000000 +0100
++++ linux-3.12.6/arch/arm/Kconfig 2013-12-28 19:29:33.000000000 +0100
+@@ -6,7 +6,6 @@
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAVE_CUSTOM_GPIO_H
+ select ARCH_WANT_IPC_PARSE_VERSION
+- select BUILDTIME_EXTABLE_SORT if MMU
+ select CLONE_BACKWARDS
+ select CPU_PM if (SUSPEND || CPU_IDLE)
+ select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN && MMU
+diff -Nur linux-3.12.6.orig/arch/mips/Kconfig linux-3.12.6/arch/mips/Kconfig
+--- linux-3.12.6.orig/arch/mips/Kconfig 2013-12-20 16:51:33.000000000 +0100
++++ linux-3.12.6/arch/mips/Kconfig 2013-12-28 19:30:06.000000000 +0100
+@@ -35,7 +35,6 @@
+ select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
+ select GENERIC_SMP_IDLE_THREAD
+- select BUILDTIME_EXTABLE_SORT
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CMOS_UPDATE
+ select HAVE_MOD_ARCH_SPECIFIC
+diff -Nur linux-3.12.6.orig/arch/x86/Kconfig linux-3.12.6/arch/x86/Kconfig
+--- linux-3.12.6.orig/arch/x86/Kconfig 2013-12-20 16:51:33.000000000 +0100
++++ linux-3.12.6/arch/x86/Kconfig 2013-12-28 19:29:50.000000000 +0100
+@@ -100,7 +100,6 @@
+ select GENERIC_SMP_IDLE_THREAD
+ select ARCH_WANT_IPC_PARSE_VERSION if X86_32
+ select HAVE_ARCH_SECCOMP_FILTER
+- select BUILDTIME_EXTABLE_SORT
+ select GENERIC_CMOS_UPDATE
+ select HAVE_ARCH_SOFT_DIRTY
+ select CLOCKSOURCE_WATCHDOG
diff --git a/target/linux/patches/3.18.9/startup.patch b/target/linux/patches/3.18.9/startup.patch
new file mode 100644
index 000000000..d396b75e4
--- /dev/null
+++ b/target/linux/patches/3.18.9/startup.patch
@@ -0,0 +1,37 @@
+diff -Nur linux-3.13.3.orig/init/main.c linux-3.13.3/init/main.c
+--- linux-3.13.3.orig/init/main.c 2014-02-13 23:00:14.000000000 +0100
++++ linux-3.13.3/init/main.c 2014-02-17 11:35:14.000000000 +0100
+@@ -916,6 +917,8 @@
+ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+ pr_err("Warning: unable to open an initial console.\n");
+
++ printk(KERN_WARNING "Starting Linux (built with OpenADK).\n");
++
+ (void) sys_dup(0);
+ (void) sys_dup(0);
+ /*
+diff -Nur linux-3.13.6.orig/init/initramfs.c linux-3.13.6/init/initramfs.c
+--- linux-3.13.6.orig/init/initramfs.c 2014-03-07 07:07:02.000000000 +0100
++++ linux-3.13.6/init/initramfs.c 2014-03-15 12:11:31.882731916 +0100
+@@ -622,6 +622,9 @@
+ */
+ load_default_modules();
+ }
++#ifdef CONFIG_DEVTMPFS_MOUNT
++ devtmpfs_mount("dev");
++#endif
+ return 0;
+ }
+ rootfs_initcall(populate_rootfs);
+diff -Nur linux-3.13.6.orig/init/main.c linux-3.13.6/init/main.c
+--- linux-3.13.6.orig/init/main.c 2014-03-07 07:07:02.000000000 +0100
++++ linux-3.13.6/init/main.c 2014-03-15 12:13:16.459024452 +0100
+@@ -924,7 +924,7 @@
+ */
+
+ if (!ramdisk_execute_command)
+- ramdisk_execute_command = "/init";
++ ramdisk_execute_command = "/sbin/init";
+
+ if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
+ ramdisk_execute_command = NULL;
diff --git a/target/linux/patches/3.18.9/wlan-cf.patch b/target/linux/patches/3.18.9/wlan-cf.patch
new file mode 100644
index 000000000..fc20759e2
--- /dev/null
+++ b/target/linux/patches/3.18.9/wlan-cf.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-2.6.39.orig/drivers/net/wireless/hostap/hostap_cs.c linux-2.6.39/drivers/net/wireless/hostap/hostap_cs.c
+--- linux-2.6.39.orig/drivers/net/wireless/hostap/hostap_cs.c 2011-05-19 06:06:34.000000000 +0200
++++ linux-2.6.39/drivers/net/wireless/hostap/hostap_cs.c 2011-09-12 02:46:26.987984145 +0200
+@@ -623,6 +623,7 @@
+ static struct pcmcia_device_id hostap_cs_ids[] = {
+ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100),
+ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300),
++ PCMCIA_DEVICE_MANF_CARD(0x0004, 0x2003),
+ PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777),
+ PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000),
+ PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002),
diff --git a/target/linux/patches/3.18.9/xargs.patch b/target/linux/patches/3.18.9/xargs.patch
new file mode 100644
index 000000000..2c7b3df59
--- /dev/null
+++ b/target/linux/patches/3.18.9/xargs.patch
@@ -0,0 +1,12 @@
+diff -Nur linux-3.12.6.orig/scripts/Makefile.modpost linux-3.12.6/scripts/Makefile.modpost
+--- linux-3.12.6.orig/scripts/Makefile.modpost 2013-12-20 16:51:33.000000000 +0100
++++ linux-3.12.6/scripts/Makefile.modpost 2014-01-25 14:55:33.000000000 +0100
+@@ -60,7 +60,7 @@
+ modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers
+
+ # Step 1), find all modules listed in $(MODVERDIR)/
+-MODLISTCMD := find $(MODVERDIR) -name '*.mod' | xargs -r grep -h '\.ko$$' | sort -u
++MODLISTCMD := find $(MODVERDIR) -name '*.mod' | xargs grep -h '\.ko$$' | sort -u
+ __modules := $(shell $(MODLISTCMD))
+ modules := $(patsubst %.o,%.ko, $(wildcard $(__modules:.ko=.o)))
+