summaryrefslogtreecommitdiff
path: root/target/linux/patches/3.14.43
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/patches/3.14.43')
-rw-r--r--target/linux/patches/3.14.43/bsd-compatibility.patch2538
-rw-r--r--target/linux/patches/3.14.43/cleankernel.patch11
-rw-r--r--target/linux/patches/3.14.43/defaults.patch34
-rw-r--r--target/linux/patches/3.14.43/disable-netfilter.patch160
-rw-r--r--target/linux/patches/3.14.43/export-symbol-for-exmap.patch11
-rw-r--r--target/linux/patches/3.14.43/fblogo.patch2097
-rw-r--r--target/linux/patches/3.14.43/gemalto.patch11
-rw-r--r--target/linux/patches/3.14.43/initramfs-nosizelimit.patch57
-rw-r--r--target/linux/patches/3.14.43/lemote-rfkill.patch21
-rw-r--r--target/linux/patches/3.14.43/microblaze-axi.patch11
-rw-r--r--target/linux/patches/3.14.43/microblaze-ethernet.patch11
-rw-r--r--target/linux/patches/3.14.43/mkpiggy.patch28
-rw-r--r--target/linux/patches/3.14.43/mptcp.patch17203
-rw-r--r--target/linux/patches/3.14.43/mtd-rootfs.patch26
-rw-r--r--target/linux/patches/3.14.43/nfsv3-tcp.patch12
-rw-r--r--target/linux/patches/3.14.43/non-static.patch33
-rw-r--r--target/linux/patches/3.14.43/ppc64-missing-zlib.patch11
-rw-r--r--target/linux/patches/3.14.43/regmap-boolean.patch24
-rw-r--r--target/linux/patches/3.14.43/relocs.patch2709
-rw-r--r--target/linux/patches/3.14.43/sgidefs.patch18
-rw-r--r--target/linux/patches/3.14.43/sortext.patch33
-rw-r--r--target/linux/patches/3.14.43/startup.patch37
-rw-r--r--target/linux/patches/3.14.43/wlan-cf.patch11
-rw-r--r--target/linux/patches/3.14.43/xargs.patch12
-rw-r--r--target/linux/patches/3.14.43/yaffs2.patch16547
-rw-r--r--target/linux/patches/3.14.43/zlib-inflate.patch12
26 files changed, 41678 insertions, 0 deletions
diff --git a/target/linux/patches/3.14.43/bsd-compatibility.patch b/target/linux/patches/3.14.43/bsd-compatibility.patch
new file mode 100644
index 000000000..b954b658f
--- /dev/null
+++ b/target/linux/patches/3.14.43/bsd-compatibility.patch
@@ -0,0 +1,2538 @@
+diff -Nur linux-3.11.5.orig/scripts/Makefile.lib linux-3.11.5/scripts/Makefile.lib
+--- linux-3.11.5.orig/scripts/Makefile.lib 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/scripts/Makefile.lib 2013-10-16 18:09:31.000000000 +0200
+@@ -281,7 +281,12 @@
+ size_append = printf $(shell \
+ dec_size=0; \
+ for F in $1; do \
+- fsize=$$(stat -c "%s" $$F); \
++ if stat -qs .>/dev/null 2>&1; then \
++ statcmd='stat -f %z'; \
++ else \
++ statcmd='stat -c %s'; \
++ fi; \
++ fsize=$$($$statcmd $$F); \
+ dec_size=$$(expr $$dec_size + $$fsize); \
+ done; \
+ printf "%08x\n" $$dec_size | \
+diff -Nur linux-3.11.5.orig/scripts/mod/mk_elfconfig.c linux-3.11.5/scripts/mod/mk_elfconfig.c
+--- linux-3.11.5.orig/scripts/mod/mk_elfconfig.c 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/scripts/mod/mk_elfconfig.c 2013-10-16 18:09:31.000000000 +0200
+@@ -1,7 +1,18 @@
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+-#include <elf.h>
++
++#define EI_NIDENT (16)
++#define ELFMAG "\177ELF"
++
++#define SELFMAG 4
++#define EI_CLASS 4
++#define ELFCLASS32 1 /* 32-bit objects */
++#define ELFCLASS64 2 /* 64-bit objects */
++
++#define EI_DATA 5 /* Data encoding byte index */
++#define ELFDATA2LSB 1 /* 2's complement, little endian */
++#define ELFDATA2MSB 2 /* 2's complement, big endian */
+
+ int
+ main(int argc, char **argv)
+diff -Nur linux-3.11.5.orig/scripts/mod/modpost.h linux-3.11.5/scripts/mod/modpost.h
+--- linux-3.11.5.orig/scripts/mod/modpost.h 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/scripts/mod/modpost.h 2013-10-16 18:09:31.000000000 +0200
+@@ -7,7 +7,2453 @@
+ #include <sys/mman.h>
+ #include <fcntl.h>
+ #include <unistd.h>
+-#include <elf.h>
++
++
++/* This file defines standard ELF types, structures, and macros.
++ Copyright (C) 1995-1999,2000,2001,2002,2003 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, write to the Free
++ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307 USA. */
++
++#ifndef _ELF_H
++#define _ELF_H 1
++
++__BEGIN_DECLS
++
++/* Standard ELF types. */
++
++#include <stdint.h>
++
++/* Type for a 16-bit quantity. */
++typedef uint16_t Elf32_Half;
++typedef uint16_t Elf64_Half;
++
++/* Types for signed and unsigned 32-bit quantities. */
++typedef uint32_t Elf32_Word;
++typedef int32_t Elf32_Sword;
++typedef uint32_t Elf64_Word;
++typedef int32_t Elf64_Sword;
++
++/* Types for signed and unsigned 64-bit quantities. */
++typedef uint64_t Elf32_Xword;
++typedef int64_t Elf32_Sxword;
++typedef uint64_t Elf64_Xword;
++typedef int64_t Elf64_Sxword;
++
++/* Type of addresses. */
++typedef uint32_t Elf32_Addr;
++typedef uint64_t Elf64_Addr;
++
++/* Type of file offsets. */
++typedef uint32_t Elf32_Off;
++typedef uint64_t Elf64_Off;
++
++/* Type for section indices, which are 16-bit quantities. */
++typedef uint16_t Elf32_Section;
++typedef uint16_t Elf64_Section;
++
++/* Type for version symbol information. */
++typedef Elf32_Half Elf32_Versym;
++typedef Elf64_Half Elf64_Versym;
++
++
++/* The ELF file header. This appears at the start of every ELF file. */
++
++#define EI_NIDENT (16)
++
++typedef struct
++{
++ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
++ Elf32_Half e_type; /* Object file type */
++ Elf32_Half e_machine; /* Architecture */
++ Elf32_Word e_version; /* Object file version */
++ Elf32_Addr e_entry; /* Entry point virtual address */
++ Elf32_Off e_phoff; /* Program header table file offset */
++ Elf32_Off e_shoff; /* Section header table file offset */
++ Elf32_Word e_flags; /* Processor-specific flags */
++ Elf32_Half e_ehsize; /* ELF header size in bytes */
++ Elf32_Half e_phentsize; /* Program header table entry size */
++ Elf32_Half e_phnum; /* Program header table entry count */
++ Elf32_Half e_shentsize; /* Section header table entry size */
++ Elf32_Half e_shnum; /* Section header table entry count */
++ Elf32_Half e_shstrndx; /* Section header string table index */
++} Elf32_Ehdr;
++
++typedef struct
++{
++ unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
++ Elf64_Half e_type; /* Object file type */
++ Elf64_Half e_machine; /* Architecture */
++ Elf64_Word e_version; /* Object file version */
++ Elf64_Addr e_entry; /* Entry point virtual address */
++ Elf64_Off e_phoff; /* Program header table file offset */
++ Elf64_Off e_shoff; /* Section header table file offset */
++ Elf64_Word e_flags; /* Processor-specific flags */
++ Elf64_Half e_ehsize; /* ELF header size in bytes */
++ Elf64_Half e_phentsize; /* Program header table entry size */
++ Elf64_Half e_phnum; /* Program header table entry count */
++ Elf64_Half e_shentsize; /* Section header table entry size */
++ Elf64_Half e_shnum; /* Section header table entry count */
++ Elf64_Half e_shstrndx; /* Section header string table index */
++} Elf64_Ehdr;
++
++/* Fields in the e_ident array. The EI_* macros are indices into the
++ array. The macros under each EI_* macro are the values the byte
++ may have. */
++
++#define EI_MAG0 0 /* File identification byte 0 index */
++#define ELFMAG0 0x7f /* Magic number byte 0 */
++
++#define EI_MAG1 1 /* File identification byte 1 index */
++#define ELFMAG1 'E' /* Magic number byte 1 */
++
++#define EI_MAG2 2 /* File identification byte 2 index */
++#define ELFMAG2 'L' /* Magic number byte 2 */
++
++#define EI_MAG3 3 /* File identification byte 3 index */
++#define ELFMAG3 'F' /* Magic number byte 3 */
++
++/* Conglomeration of the identification bytes, for easy testing as a word. */
++#define ELFMAG "\177ELF"
++#define SELFMAG 4
++
++#define EI_CLASS 4 /* File class byte index */
++#define ELFCLASSNONE 0 /* Invalid class */
++#define ELFCLASS32 1 /* 32-bit objects */
++#define ELFCLASS64 2 /* 64-bit objects */
++#define ELFCLASSNUM 3
++
++#define EI_DATA 5 /* Data encoding byte index */
++#define ELFDATANONE 0 /* Invalid data encoding */
++#define ELFDATA2LSB 1 /* 2's complement, little endian */
++#define ELFDATA2MSB 2 /* 2's complement, big endian */
++#define ELFDATANUM 3
++
++#define EI_VERSION 6 /* File version byte index */
++ /* Value must be EV_CURRENT */
++
++#define EI_OSABI 7 /* OS ABI identification */
++#define ELFOSABI_NONE 0 /* UNIX System V ABI */
++#define ELFOSABI_SYSV 0 /* Alias. */
++#define ELFOSABI_HPUX 1 /* HP-UX */
++#define ELFOSABI_NETBSD 2 /* NetBSD. */
++#define ELFOSABI_LINUX 3 /* Linux. */
++#define ELFOSABI_SOLARIS 6 /* Sun Solaris. */
++#define ELFOSABI_AIX 7 /* IBM AIX. */
++#define ELFOSABI_IRIX 8 /* SGI Irix. */
++#define ELFOSABI_FREEBSD 9 /* FreeBSD. */
++#define ELFOSABI_TRU64 10 /* Compaq TRU64 UNIX. */
++#define ELFOSABI_MODESTO 11 /* Novell Modesto. */
++#define ELFOSABI_OPENBSD 12 /* OpenBSD. */
++#define ELFOSABI_ARM 97 /* ARM */
++#define ELFOSABI_STANDALONE 255 /* Standalone (embedded) application */
++
++#define EI_ABIVERSION 8 /* ABI version */
++
++#define EI_PAD 9 /* Byte index of padding bytes */
++
++/* Legal values for e_type (object file type). */
++
++#define ET_NONE 0 /* No file type */
++#define ET_REL 1 /* Relocatable file */
++#define ET_EXEC 2 /* Executable file */
++#define ET_DYN 3 /* Shared object file */
++#define ET_CORE 4 /* Core file */
++#define ET_NUM 5 /* Number of defined types */
++#define ET_LOOS 0xfe00 /* OS-specific range start */
++#define ET_HIOS 0xfeff /* OS-specific range end */
++#define ET_LOPROC 0xff00 /* Processor-specific range start */
++#define ET_HIPROC 0xffff /* Processor-specific range end */
++
++/* Legal values for e_machine (architecture). */
++
++#define EM_NONE 0 /* No machine */
++#define EM_M32 1 /* AT&T WE 32100 */
++#define EM_SPARC 2 /* SUN SPARC */
++#define EM_386 3 /* Intel 80386 */
++#define EM_68K 4 /* Motorola m68k family */
++#define EM_88K 5 /* Motorola m88k family */
++#define EM_860 7 /* Intel 80860 */
++#define EM_MIPS 8 /* MIPS R3000 big-endian */
++#define EM_S370 9 /* IBM System/370 */
++#define EM_MIPS_RS3_LE 10 /* MIPS R3000 little-endian */
++
++#define EM_PARISC 15 /* HPPA */
++#define EM_VPP500 17 /* Fujitsu VPP500 */
++#define EM_SPARC32PLUS 18 /* Sun's "v8plus" */
++#define EM_960 19 /* Intel 80960 */
++#define EM_PPC 20 /* PowerPC */
++#define EM_PPC64 21 /* PowerPC 64-bit */
++#define EM_S390 22 /* IBM S390 */
++
++#define EM_V800 36 /* NEC V800 series */
++#define EM_FR20 37 /* Fujitsu FR20 */
++#define EM_RH32 38 /* TRW RH-32 */
++#define EM_RCE 39 /* Motorola RCE */
++#define EM_ARM 40 /* ARM */
++#define EM_FAKE_ALPHA 41 /* Digital Alpha */
++#define EM_SH 42 /* Hitachi SH */
++#define EM_SPARCV9 43 /* SPARC v9 64-bit */
++#define EM_TRICORE 44 /* Siemens Tricore */
++#define EM_ARC 45 /* Argonaut RISC Core */
++#define EM_H8_300 46 /* Hitachi H8/300 */
++#define EM_H8_300H 47 /* Hitachi H8/300H */
++#define EM_H8S 48 /* Hitachi H8S */
++#define EM_H8_500 49 /* Hitachi H8/500 */
++#define EM_IA_64 50 /* Intel Merced */
++#define EM_MIPS_X 51 /* Stanford MIPS-X */
++#define EM_COLDFIRE 52 /* Motorola Coldfire */
++#define EM_68HC12 53 /* Motorola M68HC12 */
++#define EM_MMA 54 /* Fujitsu MMA Multimedia Accelerator*/
++#define EM_PCP 55 /* Siemens PCP */
++#define EM_NCPU 56 /* Sony nCPU embeeded RISC */
++#define EM_NDR1 57 /* Denso NDR1 microprocessor */
++#define EM_STARCORE 58 /* Motorola Start*Core processor */
++#define EM_ME16 59 /* Toyota ME16 processor */
++#define EM_ST100 60 /* STMicroelectronic ST100 processor */
++#define EM_TINYJ 61 /* Advanced Logic Corp. Tinyj emb.fam*/
++#define EM_X86_64 62 /* AMD x86-64 architecture */
++#define EM_PDSP 63 /* Sony DSP Processor */
++
++#define EM_FX66 66 /* Siemens FX66 microcontroller */
++#define EM_ST9PLUS 67 /* STMicroelectronics ST9+ 8/16 mc */
++#define EM_ST7 68 /* STmicroelectronics ST7 8 bit mc */
++#define EM_68HC16 69 /* Motorola MC68HC16 microcontroller */
++#define EM_68HC11 70 /* Motorola MC68HC11 microcontroller */
++#define EM_68HC08 71 /* Motorola MC68HC08 microcontroller */
++#define EM_68HC05 72 /* Motorola MC68HC05 microcontroller */
++#define EM_SVX 73 /* Silicon Graphics SVx */
++#define EM_ST19 74 /* STMicroelectronics ST19 8 bit mc */
++#define EM_VAX 75 /* Digital VAX */
++#define EM_CRIS 76 /* Axis Communications 32-bit embedded processor */
++#define EM_JAVELIN 77 /* Infineon Technologies 32-bit embedded processor */
++#define EM_FIREPATH 78 /* Element 14 64-bit DSP Processor */
++#define EM_ZSP 79 /* LSI Logic 16-bit DSP Processor */
++#define EM_MMIX 80 /* Donald Knuth's educational 64-bit processor */
++#define EM_HUANY 81 /* Harvard University machine-independent object files */
++#define EM_PRISM 82 /* SiTera Prism */
++#define EM_AVR 83 /* Atmel AVR 8-bit microcontroller */
++#define EM_FR30 84 /* Fujitsu FR30 */
++#define EM_D10V 85 /* Mitsubishi D10V */
++#define EM_D30V 86 /* Mitsubishi D30V */
++#define EM_V850 87 /* NEC v850 */
++#define EM_M32R 88 /* Mitsubishi M32R */
++#define EM_MN10300 89 /* Matsushita MN10300 */
++#define EM_MN10200 90 /* Matsushita MN10200 */
++#define EM_PJ 91 /* picoJava */
++#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */
++#define EM_ARC_A5 93 /* ARC Cores Tangent-A5 */
++#define EM_XTENSA 94 /* Tensilica Xtensa Architecture */
++#define EM_NUM 95
++
++/* If it is necessary to assign new unofficial EM_* values, please
++ pick large random numbers (0x8523, 0xa7f2, etc.) to minimize the
++ chances of collision with official or non-GNU unofficial values. */
++
++#define EM_ALPHA 0x9026
++
++/* Legal values for e_version (version). */
++
++#define EV_NONE 0 /* Invalid ELF version */
++#define EV_CURRENT 1 /* Current version */
++#define EV_NUM 2
++
++/* Section header. */
++
++typedef struct
++{
++ Elf32_Word sh_name; /* Section name (string tbl index) */
++ Elf32_Word sh_type; /* Section type */
++ Elf32_Word sh_flags; /* Section flags */
++ Elf32_Addr sh_addr; /* Section virtual addr at execution */
++ Elf32_Off sh_offset; /* Section file offset */
++ Elf32_Word sh_size; /* Section size in bytes */
++ Elf32_Word sh_link; /* Link to another section */
++ Elf32_Word sh_info; /* Additional section information */
++ Elf32_Word sh_addralign; /* Section alignment */
++ Elf32_Word sh_entsize; /* Entry size if section holds table */
++} Elf32_Shdr;
++
++typedef struct
++{
++ Elf64_Word sh_name; /* Section name (string tbl index) */
++ Elf64_Word sh_type; /* Section type */
++ Elf64_Xword sh_flags; /* Section flags */
++ Elf64_Addr sh_addr; /* Section virtual addr at execution */
++ Elf64_Off sh_offset; /* Section file offset */
++ Elf64_Xword sh_size; /* Section size in bytes */
++ Elf64_Word sh_link; /* Link to another section */
++ Elf64_Word sh_info; /* Additional section information */
++ Elf64_Xword sh_addralign; /* Section alignment */
++ Elf64_Xword sh_entsize; /* Entry size if section holds table */
++} Elf64_Shdr;
++
++/* Special section indices. */
++
++#define SHN_UNDEF 0 /* Undefined section */
++#define SHN_LORESERVE 0xff00 /* Start of reserved indices */
++#define SHN_LOPROC 0xff00 /* Start of processor-specific */
++#define SHN_HIPROC 0xff1f /* End of processor-specific */
++#define SHN_LOOS 0xff20 /* Start of OS-specific */
++#define SHN_HIOS 0xff3f /* End of OS-specific */
++#define SHN_ABS 0xfff1 /* Associated symbol is absolute */
++#define SHN_COMMON 0xfff2 /* Associated symbol is common */
++#define SHN_XINDEX 0xffff /* Index is in extra table. */
++#define SHN_HIRESERVE 0xffff /* End of reserved indices */
++
++/* Legal values for sh_type (section type). */
++
++#define SHT_NULL 0 /* Section header table entry unused */
++#define SHT_PROGBITS 1 /* Program data */
++#define SHT_SYMTAB 2 /* Symbol table */
++#define SHT_STRTAB 3 /* String table */
++#define SHT_RELA 4 /* Relocation entries with addends */
++#define SHT_HASH 5 /* Symbol hash table */
++#define SHT_DYNAMIC 6 /* Dynamic linking information */
++#define SHT_NOTE 7 /* Notes */
++#define SHT_NOBITS 8 /* Program space with no data (bss) */
++#define SHT_REL 9 /* Relocation entries, no addends */
++#define SHT_SHLIB 10 /* Reserved */
++#define SHT_DYNSYM 11 /* Dynamic linker symbol table */
++#define SHT_INIT_ARRAY 14 /* Array of constructors */
++#define SHT_FINI_ARRAY 15 /* Array of destructors */
++#define SHT_PREINIT_ARRAY 16 /* Array of pre-constructors */
++#define SHT_GROUP 17 /* Section group */
++#define SHT_SYMTAB_SHNDX 18 /* Extended section indeces */
++#define SHT_NUM 19 /* Number of defined types. */
++#define SHT_LOOS 0x60000000 /* Start OS-specific */
++#define SHT_GNU_LIBLIST 0x6ffffff7 /* Prelink library list */
++#define SHT_CHECKSUM 0x6ffffff8 /* Checksum for DSO content. */
++#define SHT_LOSUNW 0x6ffffffa /* Sun-specific low bound. */
++#define SHT_SUNW_move 0x6ffffffa
++#define SHT_SUNW_COMDAT 0x6ffffffb
++#define SHT_SUNW_syminfo 0x6ffffffc
++#define SHT_GNU_verdef 0x6ffffffd /* Version definition section. */
++#define SHT_GNU_verneed 0x6ffffffe /* Version needs section. */
++#define SHT_GNU_versym 0x6fffffff /* Version symbol table. */
++#define SHT_HISUNW 0x6fffffff /* Sun-specific high bound. */
++#define SHT_HIOS 0x6fffffff /* End OS-specific type */
++#define SHT_LOPROC 0x70000000 /* Start of processor-specific */
++#define SHT_HIPROC 0x7fffffff /* End of processor-specific */
++#define SHT_LOUSER 0x80000000 /* Start of application-specific */
++#define SHT_HIUSER 0x8fffffff /* End of application-specific */
++
++/* Legal values for sh_flags (section flags). */
++
++#define SHF_WRITE (1 << 0) /* Writable */
++#define SHF_ALLOC (1 << 1) /* Occupies memory during execution */
++#define SHF_EXECINSTR (1 << 2) /* Executable */
++#define SHF_MERGE (1 << 4) /* Might be merged */
++#define SHF_STRINGS (1 << 5) /* Contains nul-terminated strings */
++#define SHF_INFO_LINK (1 << 6) /* `sh_info' contains SHT index */
++#define SHF_LINK_ORDER (1 << 7) /* Preserve order after combining */
++#define SHF_OS_NONCONFORMING (1 << 8) /* Non-standard OS specific handling
++ required */
++#define SHF_GROUP (1 << 9) /* Section is member of a group. */
++#define SHF_TLS (1 << 10) /* Section hold thread-local data. */
++#define SHF_MASKOS 0x0ff00000 /* OS-specific. */
++#define SHF_MASKPROC 0xf0000000 /* Processor-specific */
++
++/* Section group handling. */
++#define GRP_COMDAT 0x1 /* Mark group as COMDAT. */
++
++/* Symbol table entry. */
++
++typedef struct
++{
++ Elf32_Word st_name; /* Symbol name (string tbl index) */
++ Elf32_Addr st_value; /* Symbol value */
++ Elf32_Word st_size; /* Symbol size */
++ unsigned char st_info; /* Symbol type and binding */
++ unsigned char st_other; /* Symbol visibility */
++ Elf32_Section st_shndx; /* Section index */
++} Elf32_Sym;
++
++typedef struct
++{
++ Elf64_Word st_name; /* Symbol name (string tbl index) */
++ unsigned char st_info; /* Symbol type and binding */
++ unsigned char st_other; /* Symbol visibility */
++ Elf64_Section st_shndx; /* Section index */
++ Elf64_Addr st_value; /* Symbol value */
++ Elf64_Xword st_size; /* Symbol size */
++} Elf64_Sym;
++
++/* The syminfo section if available contains additional information about
++ every dynamic symbol. */
++
++typedef struct
++{
++ Elf32_Half si_boundto; /* Direct bindings, symbol bound to */
++ Elf32_Half si_flags; /* Per symbol flags */
++} Elf32_Syminfo;
++
++typedef struct
++{
++ Elf64_Half si_boundto; /* Direct bindings, symbol bound to */
++ Elf64_Half si_flags; /* Per symbol flags */
++} Elf64_Syminfo;
++
++/* Possible values for si_boundto. */
++#define SYMINFO_BT_SELF 0xffff /* Symbol bound to self */
++#define SYMINFO_BT_PARENT 0xfffe /* Symbol bound to parent */
++#define SYMINFO_BT_LOWRESERVE 0xff00 /* Beginning of reserved entries */
++
++/* Possible bitmasks for si_flags. */
++#define SYMINFO_FLG_DIRECT 0x0001 /* Direct bound symbol */
++#define SYMINFO_FLG_PASSTHRU 0x0002 /* Pass-thru symbol for translator */
++#define SYMINFO_FLG_COPY 0x0004 /* Symbol is a copy-reloc */
++#define SYMINFO_FLG_LAZYLOAD 0x0008 /* Symbol bound to object to be lazy
++ loaded */
++/* Syminfo version values. */
++#define SYMINFO_NONE 0
++#define SYMINFO_CURRENT 1
++#define SYMINFO_NUM 2
++
++
++/* How to extract and insert information held in the st_info field. */
++
++#define ELF32_ST_BIND(val) (((unsigned char) (val)) >> 4)
++#define ELF32_ST_TYPE(val) ((val) & 0xf)
++#define ELF32_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
++
++/* Both Elf32_Sym and Elf64_Sym use the same one-byte st_info field. */
++#define ELF64_ST_BIND(val) ELF32_ST_BIND (val)
++#define ELF64_ST_TYPE(val) ELF32_ST_TYPE (val)
++#define ELF64_ST_INFO(bind, type) ELF32_ST_INFO ((bind), (type))
++
++/* Legal values for ST_BIND subfield of st_info (symbol binding). */
++
++#define STB_LOCAL 0 /* Local symbol */
++#define STB_GLOBAL 1 /* Global symbol */
++#define STB_WEAK 2 /* Weak symbol */
++#define STB_NUM 3 /* Number of defined types. */
++#define STB_LOOS 10 /* Start of OS-specific */
++#define STB_HIOS 12 /* End of OS-specific */
++#define STB_LOPROC 13 /* Start of processor-specific */
++#define STB_HIPROC 15 /* End of processor-specific */
++
++/* Legal values for ST_TYPE subfield of st_info (symbol type). */
++
++#define STT_NOTYPE 0 /* Symbol type is unspecified */
++#define STT_OBJECT 1 /* Symbol is a data object */
++#define STT_FUNC 2 /* Symbol is a code object */
++#define STT_SECTION 3 /* Symbol associated with a section */
++#define STT_FILE 4 /* Symbol's name is file name */
++#define STT_COMMON 5 /* Symbol is a common data object */
++#define STT_TLS 6 /* Symbol is thread-local data object*/
++#define STT_NUM 7 /* Number of defined types. */
++#define STT_LOOS 10 /* Start of OS-specific */
++#define STT_HIOS 12 /* End of OS-specific */
++#define STT_LOPROC 13 /* Start of processor-specific */
++#define STT_HIPROC 15 /* End of processor-specific */
++
++
++/* Symbol table indices are found in the hash buckets and chain table
++ of a symbol hash table section. This special index value indicates
++ the end of a chain, meaning no further symbols are found in that bucket. */
++
++#define STN_UNDEF 0 /* End of a chain. */
++
++
++/* How to extract and insert information held in the st_other field. */
++
++#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
++
++/* For ELF64 the definitions are the same. */
++#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
++
++/* Symbol visibility specification encoded in the st_other field. */
++#define STV_DEFAULT 0 /* Default symbol visibility rules */
++#define STV_INTERNAL 1 /* Processor specific hidden class */
++#define STV_HIDDEN 2 /* Sym unavailable in other modules */
++#define STV_PROTECTED 3 /* Not preemptible, not exported */
++
++
++/* Relocation table entry without addend (in section of type SHT_REL). */
++
++typedef struct
++{
++ Elf32_Addr r_offset; /* Address */
++ Elf32_Word r_info; /* Relocation type and symbol index */
++} Elf32_Rel;
++
++/* I have seen two different definitions of the Elf64_Rel and
++ Elf64_Rela structures, so we'll leave them out until Novell (or
++ whoever) gets their act together. */
++/* The following, at least, is used on Sparc v9, MIPS, and Alpha. */
++
++typedef struct
++{
++ Elf64_Addr r_offset; /* Address */
++ Elf64_Xword r_info; /* Relocation type and symbol index */
++} Elf64_Rel;
++
++/* Relocation table entry with addend (in section of type SHT_RELA). */
++
++typedef struct
++{
++ Elf32_Addr r_offset; /* Address */
++ Elf32_Word r_info; /* Relocation type and symbol index */
++ Elf32_Sword r_addend; /* Addend */
++} Elf32_Rela;
++
++typedef struct
++{
++ Elf64_Addr r_offset; /* Address */
++ Elf64_Xword r_info; /* Relocation type and symbol index */
++ Elf64_Sxword r_addend; /* Addend */
++} Elf64_Rela;
++
++/* How to extract and insert information held in the r_info field. */
++
++#define ELF32_R_SYM(val) ((val) >> 8)
++#define ELF32_R_TYPE(val) ((val) & 0xff)
++#define ELF32_R_INFO(sym, type) (((sym) << 8) + ((type) & 0xff))
++
++#define ELF64_R_SYM(i) ((i) >> 32)
++#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
++#define ELF64_R_INFO(sym,type) ((((Elf64_Xword) (sym)) << 32) + (type))
++
++/* Program segment header. */
++
++typedef struct
++{
++ Elf32_Word p_type; /* Segment type */
++ Elf32_Off p_offset; /* Segment file offset */
++ Elf32_Addr p_vaddr; /* Segment virtual address */
++ Elf32_Addr p_paddr; /* Segment physical address */
++ Elf32_Word p_filesz; /* Segment size in file */
++ Elf32_Word p_memsz; /* Segment size in memory */
++ Elf32_Word p_flags; /* Segment flags */
++ Elf32_Word p_align; /* Segment alignment */
++} Elf32_Phdr;
++
++typedef struct
++{
++ Elf64_Word p_type; /* Segment type */
++ Elf64_Word p_flags; /* Segment flags */
++ Elf64_Off p_offset; /* Segment file offset */
++ Elf64_Addr p_vaddr; /* Segment virtual address */
++ Elf64_Addr p_paddr; /* Segment physical address */
++ Elf64_Xword p_filesz; /* Segment size in file */
++ Elf64_Xword p_memsz; /* Segment size in memory */
++ Elf64_Xword p_align; /* Segment alignment */
++} Elf64_Phdr;
++
++/* Legal values for p_type (segment type). */
++
++#define PT_NULL 0 /* Program header table entry unused */
++#define PT_LOAD 1 /* Loadable program segment */
++#define PT_DYNAMIC 2 /* Dynamic linking information */
++#define PT_INTERP 3 /* Program interpreter */
++#define PT_NOTE 4 /* Auxiliary information */
++#define PT_SHLIB 5 /* Reserved */
++#define PT_PHDR 6 /* Entry for header table itself */
++#define PT_TLS 7 /* Thread-local storage segment */
++#define PT_NUM 8 /* Number of defined types */
++#define PT_LOOS 0x60000000 /* Start of OS-specific */
++#define PT_GNU_EH_FRAME 0x6474e550 /* GCC .eh_frame_hdr segment */
++#define PT_GNU_STACK 0x6474e551 /* Indicates stack executability */
++#define PT_LOSUNW 0x6ffffffa
++#define PT_SUNWBSS 0x6ffffffa /* Sun Specific segment */
++#define PT_SUNWSTACK 0x6ffffffb /* Stack segment */
++#define PT_HISUNW 0x6fffffff
++#define PT_HIOS 0x6fffffff /* End of OS-specific */
++#define PT_LOPROC 0x70000000 /* Start of processor-specific */
++#define PT_HIPROC 0x7fffffff /* End of processor-specific */
++
++/* Legal values for p_flags (segment flags). */
++
++#define PF_X (1 << 0) /* Segment is executable */
++#define PF_W (1 << 1) /* Segment is writable */
++#define PF_R (1 << 2) /* Segment is readable */
++#define PF_MASKOS 0x0ff00000 /* OS-specific */
++#define PF_MASKPROC 0xf0000000 /* Processor-specific */
++
++/* Legal values for note segment descriptor types for core files. */
++
++#define NT_PRSTATUS 1 /* Contains copy of prstatus struct */
++#define NT_FPREGSET 2 /* Contains copy of fpregset struct */
++#define NT_PRPSINFO 3 /* Contains copy of prpsinfo struct */
++#define NT_PRXREG 4 /* Contains copy of prxregset struct */
++#define NT_TASKSTRUCT 4 /* Contains copy of task structure */
++#define NT_PLATFORM 5 /* String from sysinfo(SI_PLATFORM) */
++#define NT_AUXV 6 /* Contains copy of auxv array */
++#define NT_GWINDOWS 7 /* Contains copy of gwindows struct */
++#define NT_ASRS 8 /* Contains copy of asrset struct */
++#define NT_PSTATUS 10 /* Contains copy of pstatus struct */
++#define NT_PSINFO 13 /* Contains copy of psinfo struct */
++#define NT_PRCRED 14 /* Contains copy of prcred struct */
++#define NT_UTSNAME 15 /* Contains copy of utsname struct */
++#define NT_LWPSTATUS 16 /* Contains copy of lwpstatus struct */
++#define NT_LWPSINFO 17 /* Contains copy of lwpinfo struct */
++#define NT_PRFPXREG 20 /* Contains copy of fprxregset struct*/
++
++/* Legal values for the note segment descriptor types for object files. */
++
++#define NT_VERSION 1 /* Contains a version string. */
++
++
++/* Dynamic section entry. */
++
++typedef struct
++{
++ Elf32_Sword d_tag; /* Dynamic entry type */
++ union
++ {
++ Elf32_Word d_val; /* Integer value */
++ Elf32_Addr d_ptr; /* Address value */
++ } d_un;
++} Elf32_Dyn;
++
++typedef struct
++{
++ Elf64_Sxword d_tag; /* Dynamic entry type */
++ union
++ {
++ Elf64_Xword d_val; /* Integer value */
++ Elf64_Addr d_ptr; /* Address value */
++ } d_un;
++} Elf64_Dyn;
++
++/* Legal values for d_tag (dynamic entry type). */
++
++#define DT_NULL 0 /* Marks end of dynamic section */
++#define DT_NEEDED 1 /* Name of needed library */
++#define DT_PLTRELSZ 2 /* Size in bytes of PLT relocs */
++#define DT_PLTGOT 3 /* Processor defined value */
++#define DT_HASH 4 /* Address of symbol hash table */
++#define DT_STRTAB 5 /* Address of string table */
++#define DT_SYMTAB 6 /* Address of symbol table */
++#define DT_RELA 7 /* Address of Rela relocs */
++#define DT_RELASZ 8 /* Total size of Rela relocs */
++#define DT_RELAENT 9 /* Size of one Rela reloc */
++#define DT_STRSZ 10 /* Size of string table */
++#define DT_SYMENT 11 /* Size of one symbol table entry */
++#define DT_INIT 12 /* Address of init function */
++#define DT_FINI 13 /* Address of termination function */
++#define DT_SONAME 14 /* Name of shared object */
++#define DT_RPATH 15 /* Library search path (deprecated) */
++#define DT_SYMBOLIC 16 /* Start symbol search here */
++#define DT_REL 17 /* Address of Rel relocs */
++#define DT_RELSZ 18 /* Total size of Rel relocs */
++#define DT_RELENT 19 /* Size of one Rel reloc */
++#define DT_PLTREL 20 /* Type of reloc in PLT */
++#define DT_DEBUG 21 /* For debugging; unspecified */
++#define DT_TEXTREL 22 /* Reloc might modify .text */
++#define DT_JMPREL 23 /* Address of PLT relocs */
++#define DT_BIND_NOW 24 /* Process relocations of object */
++#define DT_INIT_ARRAY 25 /* Array with addresses of init fct */
++#define DT_FINI_ARRAY 26 /* Array with addresses of fini fct */
++#define DT_INIT_ARRAYSZ 27 /* Size in bytes of DT_INIT_ARRAY */
++#define DT_FINI_ARRAYSZ 28 /* Size in bytes of DT_FINI_ARRAY */
++#define DT_RUNPATH 29 /* Library search path */
++#define DT_FLAGS 30 /* Flags for the object being loaded */
++#define DT_ENCODING 32 /* Start of encoded range */
++#define DT_PREINIT_ARRAY 32 /* Array with addresses of preinit fct*/
++#define DT_PREINIT_ARRAYSZ 33 /* size in bytes of DT_PREINIT_ARRAY */
++#define DT_NUM 34 /* Number used */
++#define DT_LOOS 0x6000000d /* Start of OS-specific */
++#define DT_HIOS 0x6ffff000 /* End of OS-specific */
++#define DT_LOPROC 0x70000000 /* Start of processor-specific */
++#define DT_HIPROC 0x7fffffff /* End of processor-specific */
++#define DT_PROCNUM DT_MIPS_NUM /* Most used by any processor */
++
++/* DT_* entries which fall between DT_VALRNGHI & DT_VALRNGLO use the
++ Dyn.d_un.d_val field of the Elf*_Dyn structure. This follows Sun's
++ approach. */
++#define DT_VALRNGLO 0x6ffffd00
++#define DT_GNU_PRELINKED 0x6ffffdf5 /* Prelinking timestamp */
++#define DT_GNU_CONFLICTSZ 0x6ffffdf6 /* Size of conflict section */
++#define DT_GNU_LIBLISTSZ 0x6ffffdf7 /* Size of library list */
++#define DT_CHECKSUM 0x6ffffdf8
++#define DT_PLTPADSZ 0x6ffffdf9
++#define DT_MOVEENT 0x6ffffdfa
++#define DT_MOVESZ 0x6ffffdfb
++#define DT_FEATURE_1 0x6ffffdfc /* Feature selection (DTF_*). */
++#define DT_POSFLAG_1 0x6ffffdfd /* Flags for DT_* entries, effecting
++ the following DT_* entry. */
++#define DT_SYMINSZ 0x6ffffdfe /* Size of syminfo table (in bytes) */
++#define DT_SYMINENT 0x6ffffdff /* Entry size of syminfo */
++#define DT_VALRNGHI 0x6ffffdff
++#define DT_VALTAGIDX(tag) (DT_VALRNGHI - (tag)) /* Reverse order! */
++#define DT_VALNUM 12
++
++/* DT_* entries which fall between DT_ADDRRNGHI & DT_ADDRRNGLO use the
++ Dyn.d_un.d_ptr field of the Elf*_Dyn structure.
++
++ If any adjustment is made to the ELF object after it has been
++ built these entries will need to be adjusted. */
++#define DT_ADDRRNGLO 0x6ffffe00
++#define DT_GNU_CONFLICT 0x6ffffef8 /* Start of conflict section */
++#define DT_GNU_LIBLIST 0x6ffffef9 /* Library list */
++#define DT_CONFIG 0x6ffffefa /* Configuration information. */
++#define DT_DEPAUDIT 0x6ffffefb /* Dependency auditing. */
++#define DT_AUDIT 0x6ffffefc /* Object auditing. */
++#define DT_PLTPAD 0x6ffffefd /* PLT padding. */
++#define DT_MOVETAB 0x6ffffefe /* Move table. */
++#define DT_SYMINFO 0x6ffffeff /* Syminfo table. */
++#define DT_ADDRRNGHI 0x6ffffeff
++#define DT_ADDRTAGIDX(tag) (DT_ADDRRNGHI - (tag)) /* Reverse order! */
++#define DT_ADDRNUM 10
++
++/* The versioning entry types. The next are defined as part of the
++ GNU extension. */
++#define DT_VERSYM 0x6ffffff0
++
++#define DT_RELACOUNT 0x6ffffff9
++#define DT_RELCOUNT 0x6ffffffa
++
++/* These were chosen by Sun. */
++#define DT_FLAGS_1 0x6ffffffb /* State flags, see DF_1_* below. */
++#define DT_VERDEF 0x6ffffffc /* Address of version definition
++ table */
++#define DT_VERDEFNUM 0x6ffffffd /* Number of version definitions */
++#define DT_VERNEED 0x6ffffffe /* Address of table with needed
++ versions */
++#define DT_VERNEEDNUM 0x6fffffff /* Number of needed versions */
++#define DT_VERSIONTAGIDX(tag) (DT_VERNEEDNUM - (tag)) /* Reverse order! */
++#define DT_VERSIONTAGNUM 16
++
++/* Sun added these machine-independent extensions in the "processor-specific"
++ range. Be compatible. */
++#define DT_AUXILIARY 0x7ffffffd /* Shared object to load before self */
++#define DT_FILTER 0x7fffffff /* Shared object to get values from */
++#define DT_EXTRATAGIDX(tag) ((Elf32_Word)-((Elf32_Sword) (tag) <<1>>1)-1)
++#define DT_EXTRANUM 3
++
++/* Values of `d_un.d_val' in the DT_FLAGS entry. */
++#define DF_ORIGIN 0x00000001 /* Object may use DF_ORIGIN */
++#define DF_SYMBOLIC 0x00000002 /* Symbol resolutions starts here */
++#define DF_TEXTREL 0x00000004 /* Object contains text relocations */
++#define DF_BIND_NOW 0x00000008 /* No lazy binding for this object */
++#define DF_STATIC_TLS 0x00000010 /* Module uses the static TLS model */
++
++/* State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1
++ entry in the dynamic section. */
++#define DF_1_NOW 0x00000001 /* Set RTLD_NOW for this object. */
++#define DF_1_GLOBAL 0x00000002 /* Set RTLD_GLOBAL for this object. */
++#define DF_1_GROUP 0x00000004 /* Set RTLD_GROUP for this object. */
++#define DF_1_NODELETE 0x00000008 /* Set RTLD_NODELETE for this object.*/
++#define DF_1_LOADFLTR 0x00000010 /* Trigger filtee loading at runtime.*/
++#define DF_1_INITFIRST 0x00000020 /* Set RTLD_INITFIRST for this object*/
++#define DF_1_NOOPEN 0x00000040 /* Set RTLD_NOOPEN for this object. */
++#define DF_1_ORIGIN 0x00000080 /* $ORIGIN must be handled. */
++#define DF_1_DIRECT 0x00000100 /* Direct binding enabled. */
++#define DF_1_TRANS 0x00000200
++#define DF_1_INTERPOSE 0x00000400 /* Object is used to interpose. */
++#define DF_1_NODEFLIB 0x00000800 /* Ignore default lib search path. */
++#define DF_1_NODUMP 0x00001000 /* Object can't be dldump'ed. */
++#define DF_1_CONFALT 0x00002000 /* Configuration alternative created.*/
++#define DF_1_ENDFILTEE 0x00004000 /* Filtee terminates filters search. */
++#define DF_1_DISPRELDNE 0x00008000 /* Disp reloc applied at build time. */
++#define DF_1_DISPRELPND 0x00010000 /* Disp reloc applied at run-time. */
++
++/* Flags for the feature selection in DT_FEATURE_1. */
++#define DTF_1_PARINIT 0x00000001
++#define DTF_1_CONFEXP 0x00000002
++
++/* Flags in the DT_POSFLAG_1 entry effecting only the next DT_* entry. */
++#define DF_P1_LAZYLOAD 0x00000001 /* Lazyload following object. */
++#define DF_P1_GROUPPERM 0x00000002 /* Symbols from next object are not
++ generally available. */
++
++/* Version definition sections. */
++
++typedef struct
++{
++ Elf32_Half vd_version; /* Version revision */
++ Elf32_Half vd_flags; /* Version information */
++ Elf32_Half vd_ndx; /* Version Index */
++ Elf32_Half vd_cnt; /* Number of associated aux entries */
++ Elf32_Word vd_hash; /* Version name hash value */
++ Elf32_Word vd_aux; /* Offset in bytes to verdaux array */
++ Elf32_Word vd_next; /* Offset in bytes to next verdef
++ entry */
++} Elf32_Verdef;
++
++typedef struct
++{
++ Elf64_Half vd_version; /* Version revision */
++ Elf64_Half vd_flags; /* Version information */
++ Elf64_Half vd_ndx; /* Version Index */
++ Elf64_Half vd_cnt; /* Number of associated aux entries */
++ Elf64_Word vd_hash; /* Version name hash value */
++ Elf64_Word vd_aux; /* Offset in bytes to verdaux array */
++ Elf64_Word vd_next; /* Offset in bytes to next verdef
++ entry */
++} Elf64_Verdef;
++
++
++/* Legal values for vd_version (version revision). */
++#define VER_DEF_NONE 0 /* No version */
++#define VER_DEF_CURRENT 1 /* Current version */
++#define VER_DEF_NUM 2 /* Given version number */
++
++/* Legal values for vd_flags (version information flags). */
++#define VER_FLG_BASE 0x1 /* Version definition of file itself */
++#define VER_FLG_WEAK 0x2 /* Weak version identifier */
++
++/* Versym symbol index values. */
++#define VER_NDX_LOCAL 0 /* Symbol is local. */
++#define VER_NDX_GLOBAL 1 /* Symbol is global. */
++#define VER_NDX_LORESERVE 0xff00 /* Beginning of reserved entries. */
++#define VER_NDX_ELIMINATE 0xff01 /* Symbol is to be eliminated. */
++
++/* Auxialiary version information. */
++
++typedef struct
++{
++ Elf32_Word vda_name; /* Version or dependency names */
++ Elf32_Word vda_next; /* Offset in bytes to next verdaux
++ entry */
++} Elf32_Verdaux;
++
++typedef struct
++{
++ Elf64_Word vda_name; /* Version or dependency names */
++ Elf64_Word vda_next; /* Offset in bytes to next verdaux
++ entry */
++} Elf64_Verdaux;
++
++
++/* Version dependency section. */
++
++typedef struct
++{
++ Elf32_Half vn_version; /* Version of structure */
++ Elf32_Half vn_cnt; /* Number of associated aux entries */
++ Elf32_Word vn_file; /* Offset of filename for this
++ dependency */
++ Elf32_Word vn_aux; /* Offset in bytes to vernaux array */
++ Elf32_Word vn_next; /* Offset in bytes to next verneed
++ entry */
++} Elf32_Verneed;
++
++typedef struct
++{
++ Elf64_Half vn_version; /* Version of structure */
++ Elf64_Half vn_cnt; /* Number of associated aux entries */
++ Elf64_Word vn_file; /* Offset of filename for this
++ dependency */
++ Elf64_Word vn_aux; /* Offset in bytes to vernaux array */
++ Elf64_Word vn_next; /* Offset in bytes to next verneed
++ entry */
++} Elf64_Verneed;
++
++
++/* Legal values for vn_version (version revision). */
++#define VER_NEED_NONE 0 /* No version */
++#define VER_NEED_CURRENT 1 /* Current version */
++#define VER_NEED_NUM 2 /* Given version number */
++
++/* Auxiliary needed version information. */
++
++typedef struct
++{
++ Elf32_Word vna_hash; /* Hash value of dependency name */
++ Elf32_Half vna_flags; /* Dependency specific information */
++ Elf32_Half vna_other; /* Unused */
++ Elf32_Word vna_name; /* Dependency name string offset */
++ Elf32_Word vna_next; /* Offset in bytes to next vernaux
++ entry */
++} Elf32_Vernaux;
++
++typedef struct
++{
++ Elf64_Word vna_hash; /* Hash value of dependency name */
++ Elf64_Half vna_flags; /* Dependency specific information */
++ Elf64_Half vna_other; /* Unused */
++ Elf64_Word vna_name; /* Dependency name string offset */
++ Elf64_Word vna_next; /* Offset in bytes to next vernaux
++ entry */
++} Elf64_Vernaux;
++
++
++/* Legal values for vna_flags. */
++#define VER_FLG_WEAK 0x2 /* Weak version identifier */
++
++
++/* Auxiliary vector. */
++
++/* This vector is normally only used by the program interpreter. The
++ usual definition in an ABI supplement uses the name auxv_t. The
++ vector is not usually defined in a standard <elf.h> file, but it
++ can't hurt. We rename it to avoid conflicts. The sizes of these
++ types are an arrangement between the exec server and the program
++ interpreter, so we don't fully specify them here. */
++
++typedef struct
++{
++ int a_type; /* Entry type */
++ union
++ {
++ long int a_val; /* Integer value */
++ void *a_ptr; /* Pointer value */
++ void (*a_fcn) (void); /* Function pointer value */
++ } a_un;
++} Elf32_auxv_t;
++
++typedef struct
++{
++ long int a_type; /* Entry type */
++ union
++ {
++ long int a_val; /* Integer value */
++ void *a_ptr; /* Pointer value */
++ void (*a_fcn) (void); /* Function pointer value */
++ } a_un;
++} Elf64_auxv_t;
++
++/* Legal values for a_type (entry type). */
++
++#define AT_NULL 0 /* End of vector */
++#define AT_IGNORE 1 /* Entry should be ignored */
++#define AT_EXECFD 2 /* File descriptor of program */
++#define AT_PHDR 3 /* Program headers for program */
++#define AT_PHENT 4 /* Size of program header entry */
++#define AT_PHNUM 5 /* Number of program headers */
++#define AT_PAGESZ 6 /* System page size */
++#define AT_BASE 7 /* Base address of interpreter */
++#define AT_FLAGS 8 /* Flags */
++#define AT_ENTRY 9 /* Entry point of program */
++#define AT_NOTELF 10 /* Program is not ELF */
++#define AT_UID 11 /* Real uid */
++#define AT_EUID 12 /* Effective uid */
++#define AT_GID 13 /* Real gid */
++#define AT_EGID 14 /* Effective gid */
++#define AT_CLKTCK 17 /* Frequency of times() */
++
++/* Some more special a_type values describing the hardware. */
++#define AT_PLATFORM 15 /* String identifying platform. */
++#define AT_HWCAP 16 /* Machine dependent hints about
++ processor capabilities. */
++
++/* This entry gives some information about the FPU initialization
++ performed by the kernel. */
++#define AT_FPUCW 18 /* Used FPU control word. */
++
++/* Cache block sizes. */
++#define AT_DCACHEBSIZE 19 /* Data cache block size. */
++#define AT_ICACHEBSIZE 20 /* Instruction cache block size. */
++#define AT_UCACHEBSIZE 21 /* Unified cache block size. */
++
++/* A special ignored value for PPC, used by the kernel to control the
++ interpretation of the AUXV. Must be > 16. */
++#define AT_IGNOREPPC 22 /* Entry should be ignored. */
++
++#define AT_SECURE 23 /* Boolean, was exec setuid-like? */
++
++/* Pointer to the global system page used for system calls and other
++ nice things. */
++#define AT_SYSINFO 32
++#define AT_SYSINFO_EHDR 33
++
++
++/* Note section contents. Each entry in the note section begins with
++ a header of a fixed form. */
++
++typedef struct
++{
++ Elf32_Word n_namesz; /* Length of the note's name. */
++ Elf32_Word n_descsz; /* Length of the note's descriptor. */
++ Elf32_Word n_type; /* Type of the note. */
++} Elf32_Nhdr;
++
++typedef struct
++{
++ Elf64_Word n_namesz; /* Length of the note's name. */
++ Elf64_Word n_descsz; /* Length of the note's descriptor. */
++ Elf64_Word n_type; /* Type of the note. */
++} Elf64_Nhdr;
++
++/* Known names of notes. */
++
++/* Solaris entries in the note section have this name. */
++#define ELF_NOTE_SOLARIS "SUNW Solaris"
++
++/* Note entries for GNU systems have this name. */
++#define ELF_NOTE_GNU "GNU"
++
++
++/* Defined types of notes for Solaris. */
++
++/* Value of descriptor (one word) is desired pagesize for the binary. */
++#define ELF_NOTE_PAGESIZE_HINT 1
++
++
++/* Defined note types for GNU systems. */
++
++/* ABI information. The descriptor consists of words:
++ word 0: OS descriptor
++ word 1: major version of the ABI
++ word 2: minor version of the ABI
++ word 3: subminor version of the ABI
++*/
++#define ELF_NOTE_ABI 1
++
++/* Known OSes. These value can appear in word 0 of an ELF_NOTE_ABI
++ note section entry. */
++#define ELF_NOTE_OS_LINUX 0
++#define ELF_NOTE_OS_GNU 1
++#define ELF_NOTE_OS_SOLARIS2 2
++#define ELF_NOTE_OS_FREEBSD 3
++
++
++/* Move records. */
++typedef struct
++{
++ Elf32_Xword m_value; /* Symbol value. */
++ Elf32_Word m_info; /* Size and index. */
++ Elf32_Word m_poffset; /* Symbol offset. */
++ Elf32_Half m_repeat; /* Repeat count. */
++ Elf32_Half m_stride; /* Stride info. */
++} Elf32_Move;
++
++typedef struct
++{
++ Elf64_Xword m_value; /* Symbol value. */
++ Elf64_Xword m_info; /* Size and index. */
++ Elf64_Xword m_poffset; /* Symbol offset. */
++ Elf64_Half m_repeat; /* Repeat count. */
++ Elf64_Half m_stride; /* Stride info. */
++} Elf64_Move;
++
++/* Macro to construct move records. */
++#define ELF32_M_SYM(info) ((info) >> 8)
++#define ELF32_M_SIZE(info) ((unsigned char) (info))
++#define ELF32_M_INFO(sym, size) (((sym) << 8) + (unsigned char) (size))
++
++#define ELF64_M_SYM(info) ELF32_M_SYM (info)
++#define ELF64_M_SIZE(info) ELF32_M_SIZE (info)
++#define ELF64_M_INFO(sym, size) ELF32_M_INFO (sym, size)
++
++
++/* Motorola 68k specific definitions. */
++
++/* Values for Elf32_Ehdr.e_flags. */
++#define EF_CPU32 0x00810000
++
++/* m68k relocs. */
++
++#define R_68K_NONE 0 /* No reloc */
++#define R_68K_32 1 /* Direct 32 bit */
++#define R_68K_16 2 /* Direct 16 bit */
++#define R_68K_8 3 /* Direct 8 bit */
++#define R_68K_PC32 4 /* PC relative 32 bit */
++#define R_68K_PC16 5 /* PC relative 16 bit */
++#define R_68K_PC8 6 /* PC relative 8 bit */
++#define R_68K_GOT32 7 /* 32 bit PC relative GOT entry */
++#define R_68K_GOT16 8 /* 16 bit PC relative GOT entry */
++#define R_68K_GOT8 9 /* 8 bit PC relative GOT entry */
++#define R_68K_GOT32O 10 /* 32 bit GOT offset */
++#define R_68K_GOT16O 11 /* 16 bit GOT offset */
++#define R_68K_GOT8O 12 /* 8 bit GOT offset */
++#define R_68K_PLT32 13 /* 32 bit PC relative PLT address */
++#define R_68K_PLT16 14 /* 16 bit PC relative PLT address */
++#define R_68K_PLT8 15 /* 8 bit PC relative PLT address */
++#define R_68K_PLT32O 16 /* 32 bit PLT offset */
++#define R_68K_PLT16O 17 /* 16 bit PLT offset */
++#define R_68K_PLT8O 18 /* 8 bit PLT offset */
++#define R_68K_COPY 19 /* Copy symbol at runtime */
++#define R_68K_GLOB_DAT 20 /* Create GOT entry */
++#define R_68K_JMP_SLOT 21 /* Create PLT entry */
++#define R_68K_RELATIVE 22 /* Adjust by program base */
++/* Keep this the last entry. */
++#define R_68K_NUM 23
++
++/* Intel 80386 specific definitions. */
++
++/* i386 relocs. */
++
++#define R_386_NONE 0 /* No reloc */
++#define R_386_32 1 /* Direct 32 bit */
++#define R_386_PC32 2 /* PC relative 32 bit */
++#define R_386_GOT32 3 /* 32 bit GOT entry */
++#define R_386_PLT32 4 /* 32 bit PLT address */
++#define R_386_COPY 5 /* Copy symbol at runtime */
++#define R_386_GLOB_DAT 6 /* Create GOT entry */
++#define R_386_JMP_SLOT 7 /* Create PLT entry */
++#define R_386_RELATIVE 8 /* Adjust by program base */
++#define R_386_GOTOFF 9 /* 32 bit offset to GOT */
++#define R_386_GOTPC 10 /* 32 bit PC relative offset to GOT */
++#define R_386_32PLT 11
++#define R_386_TLS_TPOFF 14 /* Offset in static TLS block */
++#define R_386_TLS_IE 15 /* Address of GOT entry for static TLS
++ block offset */
++#define R_386_TLS_GOTIE 16 /* GOT entry for static TLS block
++ offset */
++#define R_386_TLS_LE 17 /* Offset relative to static TLS
++ block */
++#define R_386_TLS_GD 18 /* Direct 32 bit for GNU version of
++ general dynamic thread local data */
++#define R_386_TLS_LDM 19 /* Direct 32 bit for GNU version of
++ local dynamic thread local data
++ in LE code */
++#define R_386_16 20
++#define R_386_PC16 21
++#define R_386_8 22
++#define R_386_PC8 23
++#define R_386_TLS_GD_32 24 /* Direct 32 bit for general dynamic
++ thread local data */
++#define R_386_TLS_GD_PUSH 25 /* Tag for pushl in GD TLS code */
++#define R_386_TLS_GD_CALL 26 /* Relocation for call to
++ __tls_get_addr() */
++#define R_386_TLS_GD_POP 27 /* Tag for popl in GD TLS code */
++#define R_386_TLS_LDM_32 28 /* Direct 32 bit for local dynamic
++ thread local data in LE code */
++#define R_386_TLS_LDM_PUSH 29 /* Tag for pushl in LDM TLS code */
++#define R_386_TLS_LDM_CALL 30 /* Relocation for call to
++ __tls_get_addr() in LDM code */
++#define R_386_TLS_LDM_POP 31 /* Tag for popl in LDM TLS code */
++#define R_386_TLS_LDO_32 32 /* Offset relative to TLS block */
++#define R_386_TLS_IE_32 33 /* GOT entry for negated static TLS
++ block offset */
++#define R_386_TLS_LE_32 34 /* Negated offset relative to static
++ TLS block */
++#define R_386_TLS_DTPMOD32 35 /* ID of module containing symbol */
++#define R_386_TLS_DTPOFF32 36 /* Offset in TLS block */
++#define R_386_TLS_TPOFF32 37 /* Negated offset in static TLS block */
++/* Keep this the last entry. */
++#define R_386_NUM 38
++
++/* SUN SPARC specific definitions. */
++
++/* Legal values for ST_TYPE subfield of st_info (symbol type). */
++
++#define STT_REGISTER 13 /* Global register reserved to app. */
++
++/* Values for Elf64_Ehdr.e_flags. */
++
++#define EF_SPARCV9_MM 3
++#define EF_SPARCV9_TSO 0
++#define EF_SPARCV9_PSO 1
++#define EF_SPARCV9_RMO 2
++#define EF_SPARC_LEDATA 0x800000 /* little endian data */
++#define EF_SPARC_EXT_MASK 0xFFFF00
++#define EF_SPARC_32PLUS 0x000100 /* generic V8+ features */
++#define EF_SPARC_SUN_US1 0x000200 /* Sun UltraSPARC1 extensions */
++#define EF_SPARC_HAL_R1 0x000400 /* HAL R1 extensions */
++#define EF_SPARC_SUN_US3 0x000800 /* Sun UltraSPARCIII extensions */
++
++/* SPARC relocs. */
++
++#define R_SPARC_NONE 0 /* No reloc */
++#define R_SPARC_8 1 /* Direct 8 bit */
++#define R_SPARC_16 2 /* Direct 16 bit */
++#define R_SPARC_32 3 /* Direct 32 bit */
++#define R_SPARC_DISP8 4 /* PC relative 8 bit */
++#define R_SPARC_DISP16 5 /* PC relative 16 bit */
++#define R_SPARC_DISP32 6 /* PC relative 32 bit */
++#define R_SPARC_WDISP30 7 /* PC relative 30 bit shifted */
++#define R_SPARC_WDISP22 8 /* PC relative 22 bit shifted */
++#define R_SPARC_HI22 9 /* High 22 bit */
++#define R_SPARC_22 10 /* Direct 22 bit */
++#define R_SPARC_13 11 /* Direct 13 bit */
++#define R_SPARC_LO10 12 /* Truncated 10 bit */
++#define R_SPARC_GOT10 13 /* Truncated 10 bit GOT entry */
++#define R_SPARC_GOT13 14 /* 13 bit GOT entry */
++#define R_SPARC_GOT22 15 /* 22 bit GOT entry shifted */
++#define R_SPARC_PC10 16 /* PC relative 10 bit truncated */
++#define R_SPARC_PC22 17 /* PC relative 22 bit shifted */
++#define R_SPARC_WPLT30 18 /* 30 bit PC relative PLT address */
++#define R_SPARC_COPY 19 /* Copy symbol at runtime */
++#define R_SPARC_GLOB_DAT 20 /* Create GOT entry */
++#define R_SPARC_JMP_SLOT 21 /* Create PLT entry */
++#define R_SPARC_RELATIVE 22 /* Adjust by program base */
++#define R_SPARC_UA32 23 /* Direct 32 bit unaligned */
++
++/* Additional Sparc64 relocs. */
++
++#define R_SPARC_PLT32 24 /* Direct 32 bit ref to PLT entry */
++#define R_SPARC_HIPLT22 25 /* High 22 bit PLT entry */
++#define R_SPARC_LOPLT10 26 /* Truncated 10 bit PLT entry */
++#define R_SPARC_PCPLT32 27 /* PC rel 32 bit ref to PLT entry */
++#define R_SPARC_PCPLT22 28 /* PC rel high 22 bit PLT entry */
++#define R_SPARC_PCPLT10 29 /* PC rel trunc 10 bit PLT entry */
++#define R_SPARC_10 30 /* Direct 10 bit */
++#define R_SPARC_11 31 /* Direct 11 bit */
++#define R_SPARC_64 32 /* Direct 64 bit */
++#define R_SPARC_OLO10 33 /* 10bit with secondary 13bit addend */
++#define R_SPARC_HH22 34 /* Top 22 bits of direct 64 bit */
++#define R_SPARC_HM10 35 /* High middle 10 bits of ... */
++#define R_SPARC_LM22 36 /* Low middle 22 bits of ... */
++#define R_SPARC_PC_HH22 37 /* Top 22 bits of pc rel 64 bit */
++#define R_SPARC_PC_HM10 38 /* High middle 10 bit of ... */
++#define R_SPARC_PC_LM22 39 /* Low miggle 22 bits of ... */
++#define R_SPARC_WDISP16 40 /* PC relative 16 bit shifted */
++#define R_SPARC_WDISP19 41 /* PC relative 19 bit shifted */
++#define R_SPARC_7 43 /* Direct 7 bit */
++#define R_SPARC_5 44 /* Direct 5 bit */
++#define R_SPARC_6 45 /* Direct 6 bit */
++#define R_SPARC_DISP64 46 /* PC relative 64 bit */
++#define R_SPARC_PLT64 47 /* Direct 64 bit ref to PLT entry */
++#define R_SPARC_HIX22 48 /* High 22 bit complemented */
++#define R_SPARC_LOX10 49 /* Truncated 11 bit complemented */
++#define R_SPARC_H44 50 /* Direct high 12 of 44 bit */
++#define R_SPARC_M44 51 /* Direct mid 22 of 44 bit */
++#define R_SPARC_L44 52 /* Direct low 10 of 44 bit */
++#define R_SPARC_REGISTER 53 /* Global register usage */
++#define R_SPARC_UA64 54 /* Direct 64 bit unaligned */
++#define R_SPARC_UA16 55 /* Direct 16 bit unaligned */
++#define R_SPARC_TLS_GD_HI22 56
++#define R_SPARC_TLS_GD_LO10 57
++#define R_SPARC_TLS_GD_ADD 58
++#define R_SPARC_TLS_GD_CALL 59
++#define R_SPARC_TLS_LDM_HI22 60
++#define R_SPARC_TLS_LDM_LO10 61
++#define R_SPARC_TLS_LDM_ADD 62
++#define R_SPARC_TLS_LDM_CALL 63
++#define R_SPARC_TLS_LDO_HIX22 64
++#define R_SPARC_TLS_LDO_LOX10 65
++#define R_SPARC_TLS_LDO_ADD 66
++#define R_SPARC_TLS_IE_HI22 67
++#define R_SPARC_TLS_IE_LO10 68
++#define R_SPARC_TLS_IE_LD 69
++#define R_SPARC_TLS_IE_LDX 70
++#define R_SPARC_TLS_IE_ADD 71
++#define R_SPARC_TLS_LE_HIX22 72
++#define R_SPARC_TLS_LE_LOX10 73
++#define R_SPARC_TLS_DTPMOD32 74
++#define R_SPARC_TLS_DTPMOD64 75
++#define R_SPARC_TLS_DTPOFF32 76
++#define R_SPARC_TLS_DTPOFF64 77
++#define R_SPARC_TLS_TPOFF32 78
++#define R_SPARC_TLS_TPOFF64 79
++/* Keep this the last entry. */
++#define R_SPARC_NUM 80
++
++/* For Sparc64, legal values for d_tag of Elf64_Dyn. */
++
++#define DT_SPARC_REGISTER 0x70000001
++#define DT_SPARC_NUM 2
++
++/* Bits present in AT_HWCAP, primarily for Sparc32. */
++
++#define HWCAP_SPARC_FLUSH 1 /* The cpu supports flush insn. */
++#define HWCAP_SPARC_STBAR 2
++#define HWCAP_SPARC_SWAP 4
++#define HWCAP_SPARC_MULDIV 8
++#define HWCAP_SPARC_V9 16 /* The cpu is v9, so v8plus is ok. */
++#define HWCAP_SPARC_ULTRA3 32
++
++/* MIPS R3000 specific definitions. */
++
++/* Legal values for e_flags field of Elf32_Ehdr. */
++
++#define EF_MIPS_NOREORDER 1 /* A .noreorder directive was used */
++#define EF_MIPS_PIC 2 /* Contains PIC code */
++#define EF_MIPS_CPIC 4 /* Uses PIC calling sequence */
++#define EF_MIPS_XGOT 8
++#define EF_MIPS_64BIT_WHIRL 16
++#define EF_MIPS_ABI2 32
++#define EF_MIPS_ABI_ON32 64
++#define EF_MIPS_ARCH 0xf0000000 /* MIPS architecture level */
++
++/* Legal values for MIPS architecture level. */
++
++#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
++#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
++#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
++#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
++#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
++#define EF_MIPS_ARCH_32 0x60000000 /* MIPS32 code. */
++#define EF_MIPS_ARCH_64 0x70000000 /* MIPS64 code. */
++
++/* The following are non-official names and should not be used. */
++
++#define E_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
++#define E_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
++#define E_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
++#define E_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
++#define E_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
++#define E_MIPS_ARCH_32 0x60000000 /* MIPS32 code. */
++#define E_MIPS_ARCH_64 0x70000000 /* MIPS64 code. */
++
++/* Special section indices. */
++
++#define SHN_MIPS_ACOMMON 0xff00 /* Allocated common symbols */
++#define SHN_MIPS_TEXT 0xff01 /* Allocated test symbols. */
++#define SHN_MIPS_DATA 0xff02 /* Allocated data symbols. */
++#define SHN_MIPS_SCOMMON 0xff03 /* Small common symbols */
++#define SHN_MIPS_SUNDEFINED 0xff04 /* Small undefined symbols */
++
++/* Legal values for sh_type field of Elf32_Shdr. */
++
++#define SHT_MIPS_LIBLIST 0x70000000 /* Shared objects used in link */
++#define SHT_MIPS_MSYM 0x70000001
++#define SHT_MIPS_CONFLICT 0x70000002 /* Conflicting symbols */
++#define SHT_MIPS_GPTAB 0x70000003 /* Global data area sizes */
++#define SHT_MIPS_UCODE 0x70000004 /* Reserved for SGI/MIPS compilers */
++#define SHT_MIPS_DEBUG 0x70000005 /* MIPS ECOFF debugging information*/
++#define SHT_MIPS_REGINFO 0x70000006 /* Register usage information */
++#define SHT_MIPS_PACKAGE 0x70000007
++#define SHT_MIPS_PACKSYM 0x70000008
++#define SHT_MIPS_RELD 0x70000009
++#define SHT_MIPS_IFACE 0x7000000b
++#define SHT_MIPS_CONTENT 0x7000000c
++#define SHT_MIPS_OPTIONS 0x7000000d /* Miscellaneous options. */
++#define SHT_MIPS_SHDR 0x70000010
++#define SHT_MIPS_FDESC 0x70000011
++#define SHT_MIPS_EXTSYM 0x70000012
++#define SHT_MIPS_DENSE 0x70000013
++#define SHT_MIPS_PDESC 0x70000014
++#define SHT_MIPS_LOCSYM 0x70000015
++#define SHT_MIPS_AUXSYM 0x70000016
++#define SHT_MIPS_OPTSYM 0x70000017
++#define SHT_MIPS_LOCSTR 0x70000018
++#define SHT_MIPS_LINE 0x70000019
++#define SHT_MIPS_RFDESC 0x7000001a
++#define SHT_MIPS_DELTASYM 0x7000001b
++#define SHT_MIPS_DELTAINST 0x7000001c
++#define SHT_MIPS_DELTACLASS 0x7000001d
++#define SHT_MIPS_DWARF 0x7000001e /* DWARF debugging information. */
++#define SHT_MIPS_DELTADECL 0x7000001f
++#define SHT_MIPS_SYMBOL_LIB 0x70000020
++#define SHT_MIPS_EVENTS 0x70000021 /* Event section. */
++#define SHT_MIPS_TRANSLATE 0x70000022
++#define SHT_MIPS_PIXIE 0x70000023
++#define SHT_MIPS_XLATE 0x70000024
++#define SHT_MIPS_XLATE_DEBUG 0x70000025
++#define SHT_MIPS_WHIRL 0x70000026
++#define SHT_MIPS_EH_REGION 0x70000027
++#define SHT_MIPS_XLATE_OLD 0x70000028
++#define SHT_MIPS_PDR_EXCEPTION 0x70000029
++
++/* Legal values for sh_flags field of Elf32_Shdr. */
++
++#define SHF_MIPS_GPREL 0x10000000 /* Must be part of global data area */
++#define SHF_MIPS_MERGE 0x20000000
++#define SHF_MIPS_ADDR 0x40000000
++#define SHF_MIPS_STRINGS 0x80000000
++#define SHF_MIPS_NOSTRIP 0x08000000
++#define SHF_MIPS_LOCAL 0x04000000
++#define SHF_MIPS_NAMES 0x02000000
++#define SHF_MIPS_NODUPE 0x01000000
++
++
++/* Symbol tables. */
++
++/* MIPS specific values for `st_other'. */
++#define STO_MIPS_DEFAULT 0x0
++#define STO_MIPS_INTERNAL 0x1
++#define STO_MIPS_HIDDEN 0x2
++#define STO_MIPS_PROTECTED 0x3
++#define STO_MIPS_SC_ALIGN_UNUSED 0xff
++
++/* MIPS specific values for `st_info'. */
++#define STB_MIPS_SPLIT_COMMON 13
++
++/* Entries found in sections of type SHT_MIPS_GPTAB. */
++
++typedef union
++{
++ struct
++ {
++ Elf32_Word gt_current_g_value; /* -G value used for compilation */
++ Elf32_Word gt_unused; /* Not used */
++ } gt_header; /* First entry in section */
++ struct
++ {
++ Elf32_Word gt_g_value; /* If this value were used for -G */
++ Elf32_Word gt_bytes; /* This many bytes would be used */
++ } gt_entry; /* Subsequent entries in section */
++} Elf32_gptab;
++
++/* Entry found in sections of type SHT_MIPS_REGINFO. */
++
++typedef struct
++{
++ Elf32_Word ri_gprmask; /* General registers used */
++ Elf32_Word ri_cprmask[4]; /* Coprocessor registers used */
++ Elf32_Sword ri_gp_value; /* $gp register value */
++} Elf32_RegInfo;
++
++/* Entries found in sections of type SHT_MIPS_OPTIONS. */
++
++typedef struct
++{
++ unsigned char kind; /* Determines interpretation of the
++ variable part of descriptor. */
++ unsigned char size; /* Size of descriptor, including header. */
++ Elf32_Section section; /* Section header index of section affected,
++ 0 for global options. */
++ Elf32_Word info; /* Kind-specific information. */
++} Elf_Options;
++
++/* Values for `kind' field in Elf_Options. */
++
++#define ODK_NULL 0 /* Undefined. */
++#define ODK_REGINFO 1 /* Register usage information. */
++#define ODK_EXCEPTIONS 2 /* Exception processing options. */
++#define ODK_PAD 3 /* Section padding options. */
++#define ODK_HWPATCH 4 /* Hardware workarounds performed */
++#define ODK_FILL 5 /* record the fill value used by the linker. */
++#define ODK_TAGS 6 /* reserve space for desktop tools to write. */
++#define ODK_HWAND 7 /* HW workarounds. 'AND' bits when merging. */
++#define ODK_HWOR 8 /* HW workarounds. 'OR' bits when merging. */
++
++/* Values for `info' in Elf_Options for ODK_EXCEPTIONS entries. */
++
++#define OEX_FPU_MIN 0x1f /* FPE's which MUST be enabled. */
++#define OEX_FPU_MAX 0x1f00 /* FPE's which MAY be enabled. */
++#define OEX_PAGE0 0x10000 /* page zero must be mapped. */
++#define OEX_SMM 0x20000 /* Force sequential memory mode? */
++#define OEX_FPDBUG 0x40000 /* Force floating point debug mode? */
++#define OEX_PRECISEFP OEX_FPDBUG
++#define OEX_DISMISS 0x80000 /* Dismiss invalid address faults? */
++
++#define OEX_FPU_INVAL 0x10
++#define OEX_FPU_DIV0 0x08
++#define OEX_FPU_OFLO 0x04
++#define OEX_FPU_UFLO 0x02
++#define OEX_FPU_INEX 0x01
++
++/* Masks for `info' in Elf_Options for an ODK_HWPATCH entry. */
++
++#define OHW_R4KEOP 0x1 /* R4000 end-of-page patch. */
++#define OHW_R8KPFETCH 0x2 /* may need R8000 prefetch patch. */
++#define OHW_R5KEOP 0x4 /* R5000 end-of-page patch. */
++#define OHW_R5KCVTL 0x8 /* R5000 cvt.[ds].l bug. clean=1. */
++
++#define OPAD_PREFIX 0x1
++#define OPAD_POSTFIX 0x2
++#define OPAD_SYMBOL 0x4
++
++/* Entry found in `.options' section. */
++
++typedef struct
++{
++ Elf32_Word hwp_flags1; /* Extra flags. */
++ Elf32_Word hwp_flags2; /* Extra flags. */
++} Elf_Options_Hw;
++
++/* Masks for `info' in ElfOptions for ODK_HWAND and ODK_HWOR entries. */
++
++#define OHWA0_R4KEOP_CHECKED 0x00000001
++#define OHWA1_R4KEOP_CLEAN 0x00000002
++
++/* MIPS relocs. */
++
++#define R_MIPS_NONE 0 /* No reloc */
++#define R_MIPS_16 1 /* Direct 16 bit */
++#define R_MIPS_32 2 /* Direct 32 bit */
++#define R_MIPS_REL32 3 /* PC relative 32 bit */
++#define R_MIPS_26 4 /* Direct 26 bit shifted */
++#define R_MIPS_HI16 5 /* High 16 bit */
++#define R_MIPS_LO16 6 /* Low 16 bit */
++#define R_MIPS_GPREL16 7 /* GP relative 16 bit */
++#define R_MIPS_LITERAL 8 /* 16 bit literal entry */
++#define R_MIPS_GOT16 9 /* 16 bit GOT entry */
++#define R_MIPS_PC16 10 /* PC relative 16 bit */
++#define R_MIPS_CALL16 11 /* 16 bit GOT entry for function */
++#define R_MIPS_GPREL32 12 /* GP relative 32 bit */
++
++#define R_MIPS_SHIFT5 16
++#define R_MIPS_SHIFT6 17
++#define R_MIPS_64 18
++#define R_MIPS_GOT_DISP 19
++#define R_MIPS_GOT_PAGE 20
++#define R_MIPS_GOT_OFST 21
++#define R_MIPS_GOT_HI16 22
++#define R_MIPS_GOT_LO16 23
++#define R_MIPS_SUB 24
++#define R_MIPS_INSERT_A 25
++#define R_MIPS_INSERT_B 26
++#define R_MIPS_DELETE 27
++#define R_MIPS_HIGHER 28
++#define R_MIPS_HIGHEST 29
++#define R_MIPS_CALL_HI16 30
++#define R_MIPS_CALL_LO16 31
++#define R_MIPS_SCN_DISP 32
++#define R_MIPS_REL16 33
++#define R_MIPS_ADD_IMMEDIATE 34
++#define R_MIPS_PJUMP 35
++#define R_MIPS_RELGOT 36
++#define R_MIPS_JALR 37
++/* Keep this the last entry. */
++#define R_MIPS_NUM 38
++
++/* Legal values for p_type field of Elf32_Phdr. */
++
++#define PT_MIPS_REGINFO 0x70000000 /* Register usage information */
++#define PT_MIPS_RTPROC 0x70000001 /* Runtime procedure table. */
++#define PT_MIPS_OPTIONS 0x70000002
++
++/* Special program header types. */
++
++#define PF_MIPS_LOCAL 0x10000000
++
++/* Legal values for d_tag field of Elf32_Dyn. */
++
++#define DT_MIPS_RLD_VERSION 0x70000001 /* Runtime linker interface version */
++#define DT_MIPS_TIME_STAMP 0x70000002 /* Timestamp */
++#define DT_MIPS_ICHECKSUM 0x70000003 /* Checksum */
++#define DT_MIPS_IVERSION 0x70000004 /* Version string (string tbl index) */
++#define DT_MIPS_FLAGS 0x70000005 /* Flags */
++#define DT_MIPS_BASE_ADDRESS 0x70000006 /* Base address */
++#define DT_MIPS_MSYM 0x70000007
++#define DT_MIPS_CONFLICT 0x70000008 /* Address of CONFLICT section */
++#define DT_MIPS_LIBLIST 0x70000009 /* Address of LIBLIST section */
++#define DT_MIPS_LOCAL_GOTNO 0x7000000a /* Number of local GOT entries */
++#define DT_MIPS_CONFLICTNO 0x7000000b /* Number of CONFLICT entries */
++#define DT_MIPS_LIBLISTNO 0x70000010 /* Number of LIBLIST entries */
++#define DT_MIPS_SYMTABNO 0x70000011 /* Number of DYNSYM entries */
++#define DT_MIPS_UNREFEXTNO 0x70000012 /* First external DYNSYM */
++#define DT_MIPS_GOTSYM 0x70000013 /* First GOT entry in DYNSYM */
++#define DT_MIPS_HIPAGENO 0x70000014 /* Number of GOT page table entries */
++#define DT_MIPS_RLD_MAP 0x70000016 /* Address of run time loader map. */
++#define DT_MIPS_DELTA_CLASS 0x70000017 /* Delta C++ class definition. */
++#define DT_MIPS_DELTA_CLASS_NO 0x70000018 /* Number of entries in
++ DT_MIPS_DELTA_CLASS. */
++#define DT_MIPS_DELTA_INSTANCE 0x70000019 /* Delta C++ class instances. */
++#define DT_MIPS_DELTA_INSTANCE_NO 0x7000001a /* Number of entries in
++ DT_MIPS_DELTA_INSTANCE. */
++#define DT_MIPS_DELTA_RELOC 0x7000001b /* Delta relocations. */
++#define DT_MIPS_DELTA_RELOC_NO 0x7000001c /* Number of entries in
++ DT_MIPS_DELTA_RELOC. */
++#define DT_MIPS_DELTA_SYM 0x7000001d /* Delta symbols that Delta
++ relocations refer to. */
++#define DT_MIPS_DELTA_SYM_NO 0x7000001e /* Number of entries in
++ DT_MIPS_DELTA_SYM. */
++#define DT_MIPS_DELTA_CLASSSYM 0x70000020 /* Delta symbols that hold the
++ class declaration. */
++#define DT_MIPS_DELTA_CLASSSYM_NO 0x70000021 /* Number of entries in
++ DT_MIPS_DELTA_CLASSSYM. */
++#define DT_MIPS_CXX_FLAGS 0x70000022 /* Flags indicating for C++ flavor. */
++#define DT_MIPS_PIXIE_INIT 0x70000023
++#define DT_MIPS_SYMBOL_LIB 0x70000024
++#define DT_MIPS_LOCALPAGE_GOTIDX 0x70000025
++#define DT_MIPS_LOCAL_GOTIDX 0x70000026
++#define DT_MIPS_HIDDEN_GOTIDX 0x70000027
++#define DT_MIPS_PROTECTED_GOTIDX 0x70000028
++#define DT_MIPS_OPTIONS 0x70000029 /* Address of .options. */
++#define DT_MIPS_INTERFACE 0x7000002a /* Address of .interface. */
++#define DT_MIPS_DYNSTR_ALIGN 0x7000002b
++#define DT_MIPS_INTERFACE_SIZE 0x7000002c /* Size of the .interface section. */
++#define DT_MIPS_RLD_TEXT_RESOLVE_ADDR 0x7000002d /* Address of rld_text_rsolve
++ function stored in GOT. */
++#define DT_MIPS_PERF_SUFFIX 0x7000002e /* Default suffix of dso to be added
++ by rld on dlopen() calls. */
++#define DT_MIPS_COMPACT_SIZE 0x7000002f /* (O32)Size of compact rel section. */
++#define DT_MIPS_GP_VALUE 0x70000030 /* GP value for aux GOTs. */
++#define DT_MIPS_AUX_DYNAMIC 0x70000031 /* Address of aux .dynamic. */
++#define DT_MIPS_NUM 0x32
++
++/* Legal values for DT_MIPS_FLAGS Elf32_Dyn entry. */
++
++#define RHF_NONE 0 /* No flags */
++#define RHF_QUICKSTART (1 << 0) /* Use quickstart */
++#define RHF_NOTPOT (1 << 1) /* Hash size not power of 2 */
++#define RHF_NO_LIBRARY_REPLACEMENT (1 << 2) /* Ignore LD_LIBRARY_PATH */
++#define RHF_NO_MOVE (1 << 3)
++#define RHF_SGI_ONLY (1 << 4)
++#define RHF_GUARANTEE_INIT (1 << 5)
++#define RHF_DELTA_C_PLUS_PLUS (1 << 6)
++#define RHF_GUARANTEE_START_INIT (1 << 7)
++#define RHF_PIXIE (1 << 8)
++#define RHF_DEFAULT_DELAY_LOAD (1 << 9)
++#define RHF_REQUICKSTART (1 << 10)
++#define RHF_REQUICKSTARTED (1 << 11)
++#define RHF_CORD (1 << 12)
++#define RHF_NO_UNRES_UNDEF (1 << 13)
++#define RHF_RLD_ORDER_SAFE (1 << 14)
++
++/* Entries found in sections of type SHT_MIPS_LIBLIST. */
++
++typedef struct
++{
++ Elf32_Word l_name; /* Name (string table index) */
++ Elf32_Word l_time_stamp; /* Timestamp */
++ Elf32_Word l_checksum; /* Checksum */
++ Elf32_Word l_version; /* Interface version */
++ Elf32_Word l_flags; /* Flags */
++} Elf32_Lib;
++
++typedef struct
++{
++ Elf64_Word l_name; /* Name (string table index) */
++ Elf64_Word l_time_stamp; /* Timestamp */
++ Elf64_Word l_checksum; /* Checksum */
++ Elf64_Word l_version; /* Interface version */
++ Elf64_Word l_flags; /* Flags */
++} Elf64_Lib;
++
++
++/* Legal values for l_flags. */
++
++#define LL_NONE 0
++#define LL_EXACT_MATCH (1 << 0) /* Require exact match */
++#define LL_IGNORE_INT_VER (1 << 1) /* Ignore interface version */
++#define LL_REQUIRE_MINOR (1 << 2)
++#define LL_EXPORTS (1 << 3)
++#define LL_DELAY_LOAD (1 << 4)
++#define LL_DELTA (1 << 5)
++
++/* Entries found in sections of type SHT_MIPS_CONFLICT. */
++
++typedef Elf32_Addr Elf32_Conflict;
++
++
++/* HPPA specific definitions. */
++
++/* Legal values for e_flags field of Elf32_Ehdr. */
++
++#define EF_PARISC_TRAPNIL 0x00010000 /* Trap nil pointer dereference. */
++#define EF_PARISC_EXT 0x00020000 /* Program uses arch. extensions. */
++#define EF_PARISC_LSB 0x00040000 /* Program expects little endian. */
++#define EF_PARISC_WIDE 0x00080000 /* Program expects wide mode. */
++#define EF_PARISC_NO_KABP 0x00100000 /* No kernel assisted branch
++ prediction. */
++#define EF_PARISC_LAZYSWAP 0x00400000 /* Allow lazy swapping. */
++#define EF_PARISC_ARCH 0x0000ffff /* Architecture version. */
++
++/* Defined values for `e_flags & EF_PARISC_ARCH' are: */
++
++#define EFA_PARISC_1_0 0x020b /* PA-RISC 1.0 big-endian. */
++#define EFA_PARISC_1_1 0x0210 /* PA-RISC 1.1 big-endian. */
++#define EFA_PARISC_2_0 0x0214 /* PA-RISC 2.0 big-endian. */
++
++/* Additional section indeces. */
++
++#define SHN_PARISC_ANSI_COMMON 0xff00 /* Section for tenatively declared
++ symbols in ANSI C. */
++#define SHN_PARISC_HUGE_COMMON 0xff01 /* Common blocks in huge model. */
++
++/* Legal values for sh_type field of Elf32_Shdr. */
++
++#define SHT_PARISC_EXT 0x70000000 /* Contains product specific ext. */
++#define SHT_PARISC_UNWIND 0x70000001 /* Unwind information. */
++#define SHT_PARISC_DOC 0x70000002 /* Debug info for optimized code. */
++
++/* Legal values for sh_flags field of Elf32_Shdr. */
++
++#define SHF_PARISC_SHORT 0x20000000 /* Section with short addressing. */
++#define SHF_PARISC_HUGE 0x40000000 /* Section far from gp. */
++#define SHF_PARISC_SBP 0x80000000 /* Static branch prediction code. */
++
++/* Legal values for ST_TYPE subfield of st_info (symbol type). */
++
++#define STT_PARISC_MILLICODE 13 /* Millicode function entry point. */
++
++#define STT_HP_OPAQUE (STT_LOOS + 0x1)
++#define STT_HP_STUB (STT_LOOS + 0x2)
++
++/* HPPA relocs. */
++
++#define R_PARISC_NONE 0 /* No reloc. */
++#define R_PARISC_DIR32 1 /* Direct 32-bit reference. */
++#define R_PARISC_DIR21L 2 /* Left 21 bits of eff. address. */
++#define R_PARISC_DIR17R 3 /* Right 17 bits of eff. address. */
++#define R_PARISC_DIR17F 4 /* 17 bits of eff. address. */
++#define R_PARISC_DIR14R 6 /* Right 14 bits of eff. address. */
++#define R_PARISC_PCREL32 9 /* 32-bit rel. address. */
++#define R_PARISC_PCREL21L 10 /* Left 21 bits of rel. address. */
++#define R_PARISC_PCREL17R 11 /* Right 17 bits of rel. address. */
++#define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */
++#define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */
++#define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */
++#define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */
++#define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */
++#define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */
++#define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */
++#define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */
++#define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */
++#define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */
++#define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */
++#define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */
++#define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */
++#define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */
++#define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */
++#define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */
++#define R_PARISC_FPTR64 64 /* 64 bits function address. */
++#define R_PARISC_PLABEL32 65 /* 32 bits function address. */
++#define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */
++#define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */
++#define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */
++#define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */
++#define R_PARISC_PCREL16F 77 /* 16 bits PC-rel. address. */
++#define R_PARISC_PCREL16WF 78 /* 16 bits PC-rel. address. */
++#define R_PARISC_PCREL16DF 79 /* 16 bits PC-rel. address. */
++#define R_PARISC_DIR64 80 /* 64 bits of eff. address. */
++#define R_PARISC_DIR14WR 83 /* 14 bits of eff. address. */
++#define R_PARISC_DIR14DR 84 /* 14 bits of eff. address. */
++#define R_PARISC_DIR16F 85 /* 16 bits of eff. address. */
++#define R_PARISC_DIR16WF 86 /* 16 bits of eff. address. */
++#define R_PARISC_DIR16DF 87 /* 16 bits of eff. address. */
++#define R_PARISC_GPREL64 88 /* 64 bits of GP-rel. address. */
++#define R_PARISC_GPREL14WR 91 /* GP-rel. address, right 14 bits. */
++#define R_PARISC_GPREL14DR 92 /* GP-rel. address, right 14 bits. */
++#define R_PARISC_GPREL16F 93 /* 16 bits GP-rel. address. */
++#define R_PARISC_GPREL16WF 94 /* 16 bits GP-rel. address. */
++#define R_PARISC_GPREL16DF 95 /* 16 bits GP-rel. address. */
++#define R_PARISC_LTOFF64 96 /* 64 bits LT-rel. address. */
++#define R_PARISC_LTOFF14WR 99 /* LT-rel. address, right 14 bits. */
++#define R_PARISC_LTOFF14DR 100 /* LT-rel. address, right 14 bits. */
++#define R_PARISC_LTOFF16F 101 /* 16 bits LT-rel. address. */
++#define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */
++#define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */
++#define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */
++#define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */
++#define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */
++#define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */
++#define R_PARISC_PLTOFF16F 117 /* 16 bits LT-rel. address. */
++#define R_PARISC_PLTOFF16WF 118 /* 16 bits PLT-rel. address. */
++#define R_PARISC_PLTOFF16DF 119 /* 16 bits PLT-rel. address. */
++#define R_PARISC_LTOFF_FPTR64 120 /* 64 bits LT-rel. function ptr. */
++#define R_PARISC_LTOFF_FPTR14WR 123 /* LT-rel. fct. ptr., right 14 bits. */
++#define R_PARISC_LTOFF_FPTR14DR 124 /* LT-rel. fct. ptr., right 14 bits. */
++#define R_PARISC_LTOFF_FPTR16F 125 /* 16 bits LT-rel. function ptr. */
++#define R_PARISC_LTOFF_FPTR16WF 126 /* 16 bits LT-rel. function ptr. */
++#define R_PARISC_LTOFF_FPTR16DF 127 /* 16 bits LT-rel. function ptr. */
++#define R_PARISC_LORESERVE 128
++#define R_PARISC_COPY 128 /* Copy relocation. */
++#define R_PARISC_IPLT 129 /* Dynamic reloc, imported PLT */
++#define R_PARISC_EPLT 130 /* Dynamic reloc, exported PLT */
++#define R_PARISC_TPREL32 153 /* 32 bits TP-rel. address. */
++#define R_PARISC_TPREL21L 154 /* TP-rel. address, left 21 bits. */
++#define R_PARISC_TPREL14R 158 /* TP-rel. address, right 14 bits. */
++#define R_PARISC_LTOFF_TP21L 162 /* LT-TP-rel. address, left 21 bits. */
++#define R_PARISC_LTOFF_TP14R 166 /* LT-TP-rel. address, right 14 bits.*/
++#define R_PARISC_LTOFF_TP14F 167 /* 14 bits LT-TP-rel. address. */
++#define R_PARISC_TPREL64 216 /* 64 bits TP-rel. address. */
++#define R_PARISC_TPREL14WR 219 /* TP-rel. address, right 14 bits. */
++#define R_PARISC_TPREL14DR 220 /* TP-rel. address, right 14 bits. */
++#define R_PARISC_TPREL16F 221 /* 16 bits TP-rel. address. */
++#define R_PARISC_TPREL16WF 222 /* 16 bits TP-rel. address. */
++#define R_PARISC_TPREL16DF 223 /* 16 bits TP-rel. address. */
++#define R_PARISC_LTOFF_TP64 224 /* 64 bits LT-TP-rel. address. */
++#define R_PARISC_LTOFF_TP14WR 227 /* LT-TP-rel. address, right 14 bits.*/
++#define R_PARISC_LTOFF_TP14DR 228 /* LT-TP-rel. address, right 14 bits.*/
++#define R_PARISC_LTOFF_TP16F 229 /* 16 bits LT-TP-rel. address. */
++#define R_PARISC_LTOFF_TP16WF 230 /* 16 bits LT-TP-rel. address. */
++#define R_PARISC_LTOFF_TP16DF 231 /* 16 bits LT-TP-rel. address. */
++#define R_PARISC_HIRESERVE 255
++
++/* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */
++
++#define PT_HP_TLS (PT_LOOS + 0x0)
++#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
++#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
++#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
++#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
++#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
++#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
++#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
++#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
++#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
++#define PT_HP_PARALLEL (PT_LOOS + 0x10)
++#define PT_HP_FASTBIND (PT_LOOS + 0x11)
++#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
++#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
++#define PT_HP_STACK (PT_LOOS + 0x14)
++
++#define PT_PARISC_ARCHEXT 0x70000000
++#define PT_PARISC_UNWIND 0x70000001
++
++/* Legal values for p_flags field of Elf32_Phdr/Elf64_Phdr. */
++
++#define PF_PARISC_SBP 0x08000000
++
++#define PF_HP_PAGE_SIZE 0x00100000
++#define PF_HP_FAR_SHARED 0x00200000
++#define PF_HP_NEAR_SHARED 0x00400000
++#define PF_HP_CODE 0x01000000
++#define PF_HP_MODIFY 0x02000000
++#define PF_HP_LAZYSWAP 0x04000000
++#define PF_HP_SBP 0x08000000
++
++
++/* Alpha specific definitions. */
++
++/* Legal values for e_flags field of Elf64_Ehdr. */
++
++#define EF_ALPHA_32BIT 1 /* All addresses must be < 2GB. */
++#define EF_ALPHA_CANRELAX 2 /* Relocations for relaxing exist. */
++
++/* Legal values for sh_type field of Elf64_Shdr. */
++
++/* These two are primerily concerned with ECOFF debugging info. */
++#define SHT_ALPHA_DEBUG 0x70000001
++#define SHT_ALPHA_REGINFO 0x70000002
++
++/* Legal values for sh_flags field of Elf64_Shdr. */
++
++#define SHF_ALPHA_GPREL 0x10000000
++
++/* Legal values for st_other field of Elf64_Sym. */
++#define STO_ALPHA_NOPV 0x80 /* No PV required. */
++#define STO_ALPHA_STD_GPLOAD 0x88 /* PV only used for initial ldgp. */
++
++/* Alpha relocs. */
++
++#define R_ALPHA_NONE 0 /* No reloc */
++#define R_ALPHA_REFLONG 1 /* Direct 32 bit */
++#define R_ALPHA_REFQUAD 2 /* Direct 64 bit */
++#define R_ALPHA_GPREL32 3 /* GP relative 32 bit */
++#define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */
++#define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */
++#define R_ALPHA_GPDISP 6 /* Add displacement to GP */
++#define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */
++#define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */
++#define R_ALPHA_SREL16 9 /* PC relative 16 bit */
++#define R_ALPHA_SREL32 10 /* PC relative 32 bit */
++#define R_ALPHA_SREL64 11 /* PC relative 64 bit */
++#define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */
++#define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */
++#define R_ALPHA_GPREL16 19 /* GP relative 16 bit */
++#define R_ALPHA_COPY 24 /* Copy symbol at runtime */
++#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */
++#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */
++#define R_ALPHA_RELATIVE 27 /* Adjust by program base */
++#define R_ALPHA_TLS_GD_HI 28
++#define R_ALPHA_TLSGD 29
++#define R_ALPHA_TLS_LDM 30
++#define R_ALPHA_DTPMOD64 31
++#define R_ALPHA_GOTDTPREL 32
++#define R_ALPHA_DTPREL64 33
++#define R_ALPHA_DTPRELHI 34
++#define R_ALPHA_DTPRELLO 35
++#define R_ALPHA_DTPREL16 36
++#define R_ALPHA_GOTTPREL 37
++#define R_ALPHA_TPREL64 38
++#define R_ALPHA_TPRELHI 39
++#define R_ALPHA_TPRELLO 40
++#define R_ALPHA_TPREL16 41
++/* Keep this the last entry. */
++#define R_ALPHA_NUM 46
++
++/* Magic values of the LITUSE relocation addend. */
++#define LITUSE_ALPHA_ADDR 0
++#define LITUSE_ALPHA_BASE 1
++#define LITUSE_ALPHA_BYTOFF 2
++#define LITUSE_ALPHA_JSR 3
++#define LITUSE_ALPHA_TLS_GD 4
++#define LITUSE_ALPHA_TLS_LDM 5
++
++
++/* PowerPC specific declarations */
++
++/* Values for Elf32/64_Ehdr.e_flags. */
++#define EF_PPC_EMB 0x80000000 /* PowerPC embedded flag */
++
++/* Cygnus local bits below */
++#define EF_PPC_RELOCATABLE 0x00010000 /* PowerPC -mrelocatable flag*/
++#define EF_PPC_RELOCATABLE_LIB 0x00008000 /* PowerPC -mrelocatable-lib
++ flag */
++
++/* PowerPC relocations defined by the ABIs */
++#define R_PPC_NONE 0
++#define R_PPC_ADDR32 1 /* 32bit absolute address */
++#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
++#define R_PPC_ADDR16 3 /* 16bit absolute address */
++#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
++#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
++#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
++#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
++#define R_PPC_ADDR14_BRTAKEN 8
++#define R_PPC_ADDR14_BRNTAKEN 9
++#define R_PPC_REL24 10 /* PC relative 26 bit */
++#define R_PPC_REL14 11 /* PC relative 16 bit */
++#define R_PPC_REL14_BRTAKEN 12
++#define R_PPC_REL14_BRNTAKEN 13
++#define R_PPC_GOT16 14
++#define R_PPC_GOT16_LO 15
++#define R_PPC_GOT16_HI 16
++#define R_PPC_GOT16_HA 17
++#define R_PPC_PLTREL24 18
++#define R_PPC_COPY 19
++#define R_PPC_GLOB_DAT 20
++#define R_PPC_JMP_SLOT 21
++#define R_PPC_RELATIVE 22
++#define R_PPC_LOCAL24PC 23
++#define R_PPC_UADDR32 24
++#define R_PPC_UADDR16 25
++#define R_PPC_REL32 26
++#define R_PPC_PLT32 27
++#define R_PPC_PLTREL32 28
++#define R_PPC_PLT16_LO 29
++#define R_PPC_PLT16_HI 30
++#define R_PPC_PLT16_HA 31
++#define R_PPC_SDAREL16 32
++#define R_PPC_SECTOFF 33
++#define R_PPC_SECTOFF_LO 34
++#define R_PPC_SECTOFF_HI 35
++#define R_PPC_SECTOFF_HA 36
++
++/* PowerPC relocations defined for the TLS access ABI. */
++#define R_PPC_TLS 67 /* none (sym+add)@tls */
++#define R_PPC_DTPMOD32 68 /* word32 (sym+add)@dtpmod */
++#define R_PPC_TPREL16 69 /* half16* (sym+add)@tprel */
++#define R_PPC_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
++#define R_PPC_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
++#define R_PPC_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
++#define R_PPC_TPREL32 73 /* word32 (sym+add)@tprel */
++#define R_PPC_DTPREL16 74 /* half16* (sym+add)@dtprel */
++#define R_PPC_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
++#define R_PPC_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
++#define R_PPC_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
++#define R_PPC_DTPREL32 78 /* word32 (sym+add)@dtprel */
++#define R_PPC_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
++#define R_PPC_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
++#define R_PPC_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
++#define R_PPC_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
++#define R_PPC_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
++#define R_PPC_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
++#define R_PPC_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
++#define R_PPC_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
++#define R_PPC_GOT_TPREL16 87 /* half16* (sym+add)@got@tprel */
++#define R_PPC_GOT_TPREL16_LO 88 /* half16 (sym+add)@got@tprel@l */
++#define R_PPC_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
++#define R_PPC_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
++#define R_PPC_GOT_DTPREL16 91 /* half16* (sym+add)@got@dtprel */
++#define R_PPC_GOT_DTPREL16_LO 92 /* half16* (sym+add)@got@dtprel@l */
++#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */
++#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */
++
++/* Keep this the last entry. */
++#define R_PPC_NUM 95
++
++/* The remaining relocs are from the Embedded ELF ABI, and are not
++ in the SVR4 ELF ABI. */
++#define R_PPC_EMB_NADDR32 101
++#define R_PPC_EMB_NADDR16 102
++#define R_PPC_EMB_NADDR16_LO 103
++#define R_PPC_EMB_NADDR16_HI 104
++#define R_PPC_EMB_NADDR16_HA 105
++#define R_PPC_EMB_SDAI16 106
++#define R_PPC_EMB_SDA2I16 107
++#define R_PPC_EMB_SDA2REL 108
++#define R_PPC_EMB_SDA21 109 /* 16 bit offset in SDA */
++#define R_PPC_EMB_MRKREF 110
++#define R_PPC_EMB_RELSEC16 111
++#define R_PPC_EMB_RELST_LO 112
++#define R_PPC_EMB_RELST_HI 113
++#define R_PPC_EMB_RELST_HA 114
++#define R_PPC_EMB_BIT_FLD 115
++#define R_PPC_EMB_RELSDA 116 /* 16 bit relative offset in SDA */
++
++/* Diab tool relocations. */
++#define R_PPC_DIAB_SDA21_LO 180 /* like EMB_SDA21, but lower 16 bit */
++#define R_PPC_DIAB_SDA21_HI 181 /* like EMB_SDA21, but high 16 bit */
++#define R_PPC_DIAB_SDA21_HA 182 /* like EMB_SDA21, adjusted high 16 */
++#define R_PPC_DIAB_RELSDA_LO 183 /* like EMB_RELSDA, but lower 16 bit */
++#define R_PPC_DIAB_RELSDA_HI 184 /* like EMB_RELSDA, but high 16 bit */
++#define R_PPC_DIAB_RELSDA_HA 185 /* like EMB_RELSDA, adjusted high 16 */
++
++/* This is a phony reloc to handle any old fashioned TOC16 references
++ that may still be in object files. */
++#define R_PPC_TOC16 255
++
++
++/* PowerPC64 relocations defined by the ABIs */
++#define R_PPC64_NONE R_PPC_NONE
++#define R_PPC64_ADDR32 R_PPC_ADDR32 /* 32bit absolute address */
++#define R_PPC64_ADDR24 R_PPC_ADDR24 /* 26bit address, word aligned */
++#define R_PPC64_ADDR16 R_PPC_ADDR16 /* 16bit absolute address */
++#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO /* lower 16bits of address */
++#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI /* high 16bits of address. */
++#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA /* adjusted high 16bits. */
++#define R_PPC64_ADDR14 R_PPC_ADDR14 /* 16bit address, word aligned */
++#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN
++#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN
++#define R_PPC64_REL24 R_PPC_REL24 /* PC-rel. 26 bit, word aligned */
++#define R_PPC64_REL14 R_PPC_REL14 /* PC relative 16 bit */
++#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN
++#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN
++#define R_PPC64_GOT16 R_PPC_GOT16
++#define R_PPC64_GOT16_LO R_PPC_GOT16_LO
++#define R_PPC64_GOT16_HI R_PPC_GOT16_HI
++#define R_PPC64_GOT16_HA R_PPC_GOT16_HA
++
++#define R_PPC64_COPY R_PPC_COPY
++#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT
++#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT
++#define R_PPC64_RELATIVE R_PPC_RELATIVE
++
++#define R_PPC64_UADDR32 R_PPC_UADDR32
++#define R_PPC64_UADDR16 R_PPC_UADDR16
++#define R_PPC64_REL32 R_PPC_REL32
++#define R_PPC64_PLT32 R_PPC_PLT32
++#define R_PPC64_PLTREL32 R_PPC_PLTREL32
++#define R_PPC64_PLT16_LO R_PPC_PLT16_LO
++#define R_PPC64_PLT16_HI R_PPC_PLT16_HI
++#define R_PPC64_PLT16_HA R_PPC_PLT16_HA
++
++#define R_PPC64_SECTOFF R_PPC_SECTOFF
++#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO
++#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI
++#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA
++#define R_PPC64_ADDR30 37 /* word30 (S + A - P) >> 2 */
++#define R_PPC64_ADDR64 38 /* doubleword64 S + A */
++#define R_PPC64_ADDR16_HIGHER 39 /* half16 #higher(S + A) */
++#define R_PPC64_ADDR16_HIGHERA 40 /* half16 #highera(S + A) */
++#define R_PPC64_ADDR16_HIGHEST 41 /* half16 #highest(S + A) */
++#define R_PPC64_ADDR16_HIGHESTA 42 /* half16 #highesta(S + A) */
++#define R_PPC64_UADDR64 43 /* doubleword64 S + A */
++#define R_PPC64_REL64 44 /* doubleword64 S + A - P */
++#define R_PPC64_PLT64 45 /* doubleword64 L + A */
++#define R_PPC64_PLTREL64 46 /* doubleword64 L + A - P */
++#define R_PPC64_TOC16 47 /* half16* S + A - .TOC */
++#define R_PPC64_TOC16_LO 48 /* half16 #lo(S + A - .TOC.) */
++#define R_PPC64_TOC16_HI 49 /* half16 #hi(S + A - .TOC.) */
++#define R_PPC64_TOC16_HA 50 /* half16 #ha(S + A - .TOC.) */
++#define R_PPC64_TOC 51 /* doubleword64 .TOC */
++#define R_PPC64_PLTGOT16 52 /* half16* M + A */
++#define R_PPC64_PLTGOT16_LO 53 /* half16 #lo(M + A) */
++#define R_PPC64_PLTGOT16_HI 54 /* half16 #hi(M + A) */
++#define R_PPC64_PLTGOT16_HA 55 /* half16 #ha(M + A) */
++
++#define R_PPC64_ADDR16_DS 56 /* half16ds* (S + A) >> 2 */
++#define R_PPC64_ADDR16_LO_DS 57 /* half16ds #lo(S + A) >> 2 */
++#define R_PPC64_GOT16_DS 58 /* half16ds* (G + A) >> 2 */
++#define R_PPC64_GOT16_LO_DS 59 /* half16ds #lo(G + A) >> 2 */
++#define R_PPC64_PLT16_LO_DS 60 /* half16ds #lo(L + A) >> 2 */
++#define R_PPC64_SECTOFF_DS 61 /* half16ds* (R + A) >> 2 */
++#define R_PPC64_SECTOFF_LO_DS 62 /* half16ds #lo(R + A) >> 2 */
++#define R_PPC64_TOC16_DS 63 /* half16ds* (S + A - .TOC.) >> 2 */
++#define R_PPC64_TOC16_LO_DS 64 /* half16ds #lo(S + A - .TOC.) >> 2 */
++#define R_PPC64_PLTGOT16_DS 65 /* half16ds* (M + A) >> 2 */
++#define R_PPC64_PLTGOT16_LO_DS 66 /* half16ds #lo(M + A) >> 2 */
++
++/* PowerPC64 relocations defined for the TLS access ABI. */
++#define R_PPC64_TLS 67 /* none (sym+add)@tls */
++#define R_PPC64_DTPMOD64 68 /* doubleword64 (sym+add)@dtpmod */
++#define R_PPC64_TPREL16 69 /* half16* (sym+add)@tprel */
++#define R_PPC64_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
++#define R_PPC64_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
++#define R_PPC64_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
++#define R_PPC64_TPREL64 73 /* doubleword64 (sym+add)@tprel */
++#define R_PPC64_DTPREL16 74 /* half16* (sym+add)@dtprel */
++#define R_PPC64_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
++#define R_PPC64_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
++#define R_PPC64_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
++#define R_PPC64_DTPREL64 78 /* doubleword64 (sym+add)@dtprel */
++#define R_PPC64_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
++#define R_PPC64_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
++#define R_PPC64_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
++#define R_PPC64_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
++#define R_PPC64_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
++#define R_PPC64_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
++#define R_PPC64_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
++#define R_PPC64_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
++#define R_PPC64_GOT_TPREL16_DS 87 /* half16ds* (sym+add)@got@tprel */
++#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */
++#define R_PPC64_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
++#define R_PPC64_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
++#define R_PPC64_GOT_DTPREL16_DS 91 /* half16ds* (sym+add)@got@dtprel */
++#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */
++#define R_PPC64_GOT_DTPREL16_HI 93 /* half16 (sym+add)@got@dtprel@h */
++#define R_PPC64_GOT_DTPREL16_HA 94 /* half16 (sym+add)@got@dtprel@ha */
++#define R_PPC64_TPREL16_DS 95 /* half16ds* (sym+add)@tprel */
++#define R_PPC64_TPREL16_LO_DS 96 /* half16ds (sym+add)@tprel@l */
++#define R_PPC64_TPREL16_HIGHER 97 /* half16 (sym+add)@tprel@higher */
++#define R_PPC64_TPREL16_HIGHERA 98 /* half16 (sym+add)@tprel@highera */
++#define R_PPC64_TPREL16_HIGHEST 99 /* half16 (sym+add)@tprel@highest */
++#define R_PPC64_TPREL16_HIGHESTA 100 /* half16 (sym+add)@tprel@highesta */
++#define R_PPC64_DTPREL16_DS 101 /* half16ds* (sym+add)@dtprel */
++#define R_PPC64_DTPREL16_LO_DS 102 /* half16ds (sym+add)@dtprel@l */
++#define R_PPC64_DTPREL16_HIGHER 103 /* half16 (sym+add)@dtprel@higher */
++#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */
++#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */
++#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */
++
++/* Keep this the last entry. */
++#define R_PPC64_NUM 107
++
++/* PowerPC64 specific values for the Dyn d_tag field. */
++#define DT_PPC64_GLINK (DT_LOPROC + 0)
++#define DT_PPC64_NUM 1
++
++
++/* ARM specific declarations */
++
++/* Processor specific flags for the ELF header e_flags field. */
++#define EF_ARM_RELEXEC 0x01
++#define EF_ARM_HASENTRY 0x02
++#define EF_ARM_INTERWORK 0x04
++#define EF_ARM_APCS_26 0x08
++#define EF_ARM_APCS_FLOAT 0x10
++#define EF_ARM_PIC 0x20
++#define EF_ARM_ALIGN8 0x40 /* 8-bit structure alignment is in use */
++#define EF_ARM_NEW_ABI 0x80
++#define EF_ARM_OLD_ABI 0x100
++
++/* Other constants defined in the ARM ELF spec. version B-01. */
++/* NB. These conflict with values defined above. */
++#define EF_ARM_SYMSARESORTED 0x04
++#define EF_ARM_DYNSYMSUSESEGIDX 0x08
++#define EF_ARM_MAPSYMSFIRST 0x10
++#define EF_ARM_EABIMASK 0XFF000000
++
++#define EF_ARM_EABI_VERSION(flags) ((flags) & EF_ARM_EABIMASK)
++#define EF_ARM_EABI_UNKNOWN 0x00000000
++#define EF_ARM_EABI_VER1 0x01000000
++#define EF_ARM_EABI_VER2 0x02000000
++
++/* Additional symbol types for Thumb */
++#define STT_ARM_TFUNC 0xd
++
++/* ARM-specific values for sh_flags */
++#define SHF_ARM_ENTRYSECT 0x10000000 /* Section contains an entry point */
++#define SHF_ARM_COMDEF 0x80000000 /* Section may be multiply defined
++ in the input to a link step */
++
++/* ARM-specific program header flags */
++#define PF_ARM_SB 0x10000000 /* Segment contains the location
++ addressed by the static base */
++
++/* ARM relocs. */
++#define R_ARM_NONE 0 /* No reloc */
++#define R_ARM_PC24 1 /* PC relative 26 bit branch */
++#define R_ARM_ABS32 2 /* Direct 32 bit */
++#define R_ARM_REL32 3 /* PC relative 32 bit */
++#define R_ARM_PC13 4
++#define R_ARM_ABS16 5 /* Direct 16 bit */
++#define R_ARM_ABS12 6 /* Direct 12 bit */
++#define R_ARM_THM_ABS5 7
++#define R_ARM_ABS8 8 /* Direct 8 bit */
++#define R_ARM_SBREL32 9
++#define R_ARM_THM_PC22 10
++#define R_ARM_THM_PC8 11
++#define R_ARM_AMP_VCALL9 12
++#define R_ARM_SWI24 13
++#define R_ARM_THM_SWI8 14
++#define R_ARM_XPC25 15
++#define R_ARM_THM_XPC22 16
++#define R_ARM_COPY 20 /* Copy symbol at runtime */
++#define R_ARM_GLOB_DAT 21 /* Create GOT entry */
++#define R_ARM_JUMP_SLOT 22 /* Create PLT entry */
++#define R_ARM_RELATIVE 23 /* Adjust by program base */
++#define R_ARM_GOTOFF 24 /* 32 bit offset to GOT */
++#define R_ARM_GOTPC 25 /* 32 bit PC relative offset to GOT */
++#define R_ARM_GOT32 26 /* 32 bit GOT entry */
++#define R_ARM_PLT32 27 /* 32 bit PLT address */
++#define R_ARM_ALU_PCREL_7_0 32
++#define R_ARM_ALU_PCREL_15_8 33
++#define R_ARM_ALU_PCREL_23_15 34
++#define R_ARM_LDR_SBREL_11_0 35
++#define R_ARM_ALU_SBREL_19_12 36
++#define R_ARM_ALU_SBREL_27_20 37
++#define R_ARM_GNU_VTENTRY 100
++#define R_ARM_GNU_VTINHERIT 101
++#define R_ARM_THM_PC11 102 /* thumb unconditional branch */
++#define R_ARM_THM_PC9 103 /* thumb conditional branch */
++#define R_ARM_RXPC25 249
++#define R_ARM_RSBREL32 250
++#define R_ARM_THM_RPC22 251
++#define R_ARM_RREL32 252
++#define R_ARM_RABS22 253
++#define R_ARM_RPC24 254
++#define R_ARM_RBASE 255
++/* Keep this the last entry. */
++#define R_ARM_NUM 256
++
++/* IA-64 specific declarations. */
++
++/* Processor specific flags for the Ehdr e_flags field. */
++#define EF_IA_64_MASKOS 0x0000000f /* os-specific flags */
++#define EF_IA_64_ABI64 0x00000010 /* 64-bit ABI */
++#define EF_IA_64_ARCH 0xff000000 /* arch. version mask */
++
++/* Processor specific values for the Phdr p_type field. */
++#define PT_IA_64_ARCHEXT (PT_LOPROC + 0) /* arch extension bits */
++#define PT_IA_64_UNWIND (PT_LOPROC + 1) /* ia64 unwind bits */
++
++/* Processor specific flags for the Phdr p_flags field. */
++#define PF_IA_64_NORECOV 0x80000000 /* spec insns w/o recovery */
++
++/* Processor specific values for the Shdr sh_type field. */
++#define SHT_IA_64_EXT (SHT_LOPROC + 0) /* extension bits */
++#define SHT_IA_64_UNWIND (SHT_LOPROC + 1) /* unwind bits */
++
++/* Processor specific flags for the Shdr sh_flags field. */
++#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
++#define SHF_IA_64_NORECOV 0x20000000 /* spec insns w/o recovery */
++
++/* Processor specific values for the Dyn d_tag field. */
++#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0)
++#define DT_IA_64_NUM 1
++
++/* IA-64 relocations. */
++#define R_IA64_NONE 0x00 /* none */
++#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
++#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
++#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
++#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
++#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
++#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
++#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
++#define R_IA64_GPREL22 0x2a /* @gprel(sym + add), add imm22 */
++#define R_IA64_GPREL64I 0x2b /* @gprel(sym + add), mov imm64 */
++#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym + add), data4 MSB */
++#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym + add), data4 LSB */
++#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym + add), data8 MSB */
++#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym + add), data8 LSB */
++#define R_IA64_LTOFF22 0x32 /* @ltoff(sym + add), add imm22 */
++#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym + add), mov imm64 */
++#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym + add), add imm22 */
++#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym + add), mov imm64 */
++#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym + add), data8 MSB */
++#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym + add), data8 LSB */
++#define R_IA64_FPTR64I 0x43 /* @fptr(sym + add), mov imm64 */
++#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym + add), data4 MSB */
++#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym + add), data4 LSB */
++#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym + add), data8 MSB */
++#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym + add), data8 LSB */
++#define R_IA64_PCREL60B 0x48 /* @pcrel(sym + add), brl */
++#define R_IA64_PCREL21B 0x49 /* @pcrel(sym + add), ptb, call */
++#define R_IA64_PCREL21M 0x4a /* @pcrel(sym + add), chk.s */
++#define R_IA64_PCREL21F 0x4b /* @pcrel(sym + add), fchkf */
++#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym + add), data4 MSB */
++#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym + add), data4 LSB */
++#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym + add), data8 MSB */
++#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym + add), data8 LSB */
++#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
++#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
++#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), data4 MSB */
++#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), data4 LSB */
++#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), data8 MSB */
++#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), data8 LSB */
++#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym + add), data4 MSB */
++#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym + add), data4 LSB */
++#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym + add), data8 MSB */
++#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym + add), data8 LSB */
++#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym + add), data4 MSB */
++#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym + add), data4 LSB */
++#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym + add), data8 MSB */
++#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym + add), data8 LSB */
++#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
++#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
++#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
++#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
++#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
++#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
++#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
++#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
++#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym + add), 21bit inst */
++#define R_IA64_PCREL22 0x7a /* @pcrel(sym + add), 22bit inst */
++#define R_IA64_PCREL64I 0x7b /* @pcrel(sym + add), 64bit inst */
++#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
++#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
++#define R_IA64_COPY 0x84 /* copy relocation */
++#define R_IA64_SUB 0x85 /* Addend and symbol difference */
++#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
++#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
++#define R_IA64_TPREL14 0x91 /* @tprel(sym + add), imm14 */
++#define R_IA64_TPREL22 0x92 /* @tprel(sym + add), imm22 */
++#define R_IA64_TPREL64I 0x93 /* @tprel(sym + add), imm64 */
++#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym + add), data8 MSB */
++#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym + add), data8 LSB */
++#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), imm2 */
++#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym + add), data8 MSB */
++#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym + add), data8 LSB */
++#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(sym + add)), imm22 */
++#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym + add), imm14 */
++#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym + add), imm22 */
++#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym + add), imm64 */
++#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym + add), data4 MSB */
++#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym + add), data4 LSB */
++#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym + add), data8 MSB */
++#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym + add), data8 LSB */
++#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
++
++/* SH specific declarations */
++
++/* SH relocs. */
++#define R_SH_NONE 0
++#define R_SH_DIR32 1
++#define R_SH_REL32 2
++#define R_SH_DIR8WPN 3
++#define R_SH_IND12W 4
++#define R_SH_DIR8WPL 5
++#define R_SH_DIR8WPZ 6
++#define R_SH_DIR8BP 7
++#define R_SH_DIR8W 8
++#define R_SH_DIR8L 9
++#define R_SH_SWITCH16 25
++#define R_SH_SWITCH32 26
++#define R_SH_USES 27
++#define R_SH_COUNT 28
++#define R_SH_ALIGN 29
++#define R_SH_CODE 30
++#define R_SH_DATA 31
++#define R_SH_LABEL 32
++#define R_SH_SWITCH8 33
++#define R_SH_GNU_VTINHERIT 34
++#define R_SH_GNU_VTENTRY 35
++#define R_SH_TLS_GD_32 144
++#define R_SH_TLS_LD_32 145
++#define R_SH_TLS_LDO_32 146
++#define R_SH_TLS_IE_32 147
++#define R_SH_TLS_LE_32 148
++#define R_SH_TLS_DTPMOD32 149
++#define R_SH_TLS_DTPOFF32 150
++#define R_SH_TLS_TPOFF32 151
++#define R_SH_GOT32 160
++#define R_SH_PLT32 161
++#define R_SH_COPY 162
++#define R_SH_GLOB_DAT 163
++#define R_SH_JMP_SLOT 164
++#define R_SH_RELATIVE 165
++#define R_SH_GOTOFF 166
++#define R_SH_GOTPC 167
++/* Keep this the last entry. */
++#define R_SH_NUM 256
++
++/* Additional s390 relocs */
++
++#define R_390_NONE 0 /* No reloc. */
++#define R_390_8 1 /* Direct 8 bit. */
++#define R_390_12 2 /* Direct 12 bit. */
++#define R_390_16 3 /* Direct 16 bit. */
++#define R_390_32 4 /* Direct 32 bit. */
++#define R_390_PC32 5 /* PC relative 32 bit. */
++#define R_390_GOT12 6 /* 12 bit GOT offset. */
++#define R_390_GOT32 7 /* 32 bit GOT offset. */
++#define R_390_PLT32 8 /* 32 bit PC relative PLT address. */
++#define R_390_COPY 9 /* Copy symbol at runtime. */
++#define R_390_GLOB_DAT 10 /* Create GOT entry. */
++#define R_390_JMP_SLOT 11 /* Create PLT entry. */
++#define R_390_RELATIVE 12 /* Adjust by program base. */
++#define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */
++#define R_390_GOTPC 14 /* 32 bit PC relative offset to GOT. */
++#define R_390_GOT16 15 /* 16 bit GOT offset. */
++#define R_390_PC16 16 /* PC relative 16 bit. */
++#define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */
++#define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */
++#define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */
++#define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */
++#define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */
++#define R_390_64 22 /* Direct 64 bit. */
++#define R_390_PC64 23 /* PC relative 64 bit. */
++#define R_390_GOT64 24 /* 64 bit GOT offset. */
++#define R_390_PLT64 25 /* 64 bit PC relative PLT address. */
++#define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */
++#define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */
++#define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */
++#define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */
++#define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */
++#define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */
++#define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */
++#define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */
++#define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */
++#define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */
++#define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */
++#define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */
++#define R_390_TLS_GDCALL 38 /* Tag for function call in general
++ dynamic TLS code. */
++#define R_390_TLS_LDCALL 39 /* Tag for function call in local
++ dynamic TLS code. */
++#define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic
++ thread local data. */
++#define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic
++ thread local data. */
++#define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS
++ block offset. */
++#define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS
++ block offset. */
++#define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS
++ block offset. */
++#define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic
++ thread local data in LE code. */
++#define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic
++ thread local data in LE code. */
++#define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for
++ negated static TLS block offset. */
++#define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for
++ negated static TLS block offset. */
++#define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for
++ negated static TLS block offset. */
++#define R_390_TLS_LE32 50 /* 32 bit negated offset relative to
++ static TLS block. */
++#define R_390_TLS_LE64 51 /* 64 bit negated offset relative to
++ static TLS block. */
++#define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS
++ block. */
++#define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS
++ block. */
++#define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */
++#define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */
++#define R_390_TLS_TPOFF 56 /* Negated offset in static TLS
++ block. */
++
++/* Keep this the last entry. */
++#define R_390_NUM 57
++
++/* CRIS relocations. */
++#define R_CRIS_NONE 0
++#define R_CRIS_8 1
++#define R_CRIS_16 2
++#define R_CRIS_32 3
++#define R_CRIS_8_PCREL 4
++#define R_CRIS_16_PCREL 5
++#define R_CRIS_32_PCREL 6
++#define R_CRIS_GNU_VTINHERIT 7
++#define R_CRIS_GNU_VTENTRY 8
++#define R_CRIS_COPY 9
++#define R_CRIS_GLOB_DAT 10
++#define R_CRIS_JUMP_SLOT 11
++#define R_CRIS_RELATIVE 12
++#define R_CRIS_16_GOT 13
++#define R_CRIS_32_GOT 14
++#define R_CRIS_16_GOTPLT 15
++#define R_CRIS_32_GOTPLT 16
++#define R_CRIS_32_GOTREL 17
++#define R_CRIS_32_PLT_GOTREL 18
++#define R_CRIS_32_PLT_PCREL 19
++
++#define R_CRIS_NUM 20
++
++/* AMD x86-64 relocations. */
++#define R_X86_64_NONE 0 /* No reloc */
++#define R_X86_64_64 1 /* Direct 64 bit */
++#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
++#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
++#define R_X86_64_PLT32 4 /* 32 bit PLT address */
++#define R_X86_64_COPY 5 /* Copy symbol at runtime */
++#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
++#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
++#define R_X86_64_RELATIVE 8 /* Adjust by program base */
++#define R_X86_64_GOTPCREL 9 /* 32 bit signed PC relative
++ offset to GOT */
++#define R_X86_64_32 10 /* Direct 32 bit zero extended */
++#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
++#define R_X86_64_16 12 /* Direct 16 bit zero extended */
++#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
++#define R_X86_64_8 14 /* Direct 8 bit sign extended */
++#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
++#define R_X86_64_DTPMOD64 16 /* ID of module containing symbol */
++#define R_X86_64_DTPOFF64 17 /* Offset in module's TLS block */
++#define R_X86_64_TPOFF64 18 /* Offset in initial TLS block */
++#define R_X86_64_TLSGD 19 /* 32 bit signed PC relative offset
++ to two GOT entries for GD symbol */
++#define R_X86_64_TLSLD 20 /* 32 bit signed PC relative offset
++ to two GOT entries for LD symbol */
++#define R_X86_64_DTPOFF32 21 /* Offset in TLS block */
++#define R_X86_64_GOTTPOFF 22 /* 32 bit signed PC relative offset
++ to GOT entry for IE symbol */
++#define R_X86_64_TPOFF32 23 /* Offset in initial TLS block */
++
++#define R_X86_64_NUM 24
++
++__END_DECLS
++
++#endif /* elf.h */
+
+ #include "elfconfig.h"
+
+@@ -185,3 +2631,4 @@
+ void fatal(const char *fmt, ...);
+ void warn(const char *fmt, ...);
+ void merror(const char *fmt, ...);
++
+diff -Nur linux-3.11.5.orig/scripts/mod/sumversion.c linux-3.11.5/scripts/mod/sumversion.c
+--- linux-3.11.5.orig/scripts/mod/sumversion.c 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/scripts/mod/sumversion.c 2013-10-16 18:09:31.000000000 +0200
+@@ -1,4 +1,4 @@
+-#include <netinet/in.h>
++/* #include <netinet/in.h> */
+ #ifdef __sun__
+ #include <inttypes.h>
+ #else
+diff -Nur linux-3.11.5.orig/tools/include/tools/linux_types.h linux-3.11.5/tools/include/tools/linux_types.h
+--- linux-3.11.5.orig/tools/include/tools/linux_types.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.11.5/tools/include/tools/linux_types.h 2013-10-16 18:09:31.000000000 +0200
+@@ -0,0 +1,22 @@
++#ifndef __LINUX_TYPES_H
++#define __LINUX_TYPES_H
++
++#include <stdint.h>
++
++typedef uint8_t __u8;
++typedef uint8_t __be8;
++typedef uint8_t __le8;
++
++typedef uint16_t __u16;
++typedef uint16_t __be16;
++typedef uint16_t __le16;
++
++typedef uint32_t __u32;
++typedef uint32_t __be32;
++typedef uint32_t __le32;
++
++typedef uint64_t __u64;
++typedef uint64_t __be64;
++typedef uint64_t __le64;
++
++#endif
diff --git a/target/linux/patches/3.14.43/cleankernel.patch b/target/linux/patches/3.14.43/cleankernel.patch
new file mode 100644
index 000000000..d8c055dc3
--- /dev/null
+++ b/target/linux/patches/3.14.43/cleankernel.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-3.11.5.orig/scripts/Makefile.headersinst linux-3.11.5/scripts/Makefile.headersinst
+--- linux-3.11.5.orig/scripts/Makefile.headersinst 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/scripts/Makefile.headersinst 2013-10-15 16:33:10.000000000 +0200
+@@ -107,7 +107,6 @@
+
+ targets += $(install-file)
+ $(install-file): scripts/headers_install.sh $(input-files1) $(input-files2) $(input-files3) FORCE
+- $(if $(unwanted),$(call cmd,remove),)
+ $(if $(wildcard $(dir $@)),,$(shell mkdir -p $(dir $@)))
+ $(call if_changed,install)
+
diff --git a/target/linux/patches/3.14.43/defaults.patch b/target/linux/patches/3.14.43/defaults.patch
new file mode 100644
index 000000000..f071fd1dd
--- /dev/null
+++ b/target/linux/patches/3.14.43/defaults.patch
@@ -0,0 +1,34 @@
+diff -Nur linux-3.0.4.orig/fs/Kconfig linux-3.0.4/fs/Kconfig
+--- linux-3.0.4.orig/fs/Kconfig 2011-08-29 22:56:30.000000000 +0200
++++ linux-3.0.4/fs/Kconfig 2011-10-15 22:08:44.000000000 +0200
+@@ -47,7 +47,7 @@
+ def_bool n
+
+ config EXPORTFS
+- tristate
++ def_bool y
+
+ config FILE_LOCKING
+ bool "Enable POSIX file locking API" if EXPERT
+diff -Nur linux-3.0.4.orig/fs/notify/Kconfig linux-3.0.4/fs/notify/Kconfig
+--- linux-3.0.4.orig/fs/notify/Kconfig 2011-08-29 22:56:30.000000000 +0200
++++ linux-3.0.4/fs/notify/Kconfig 2011-10-15 22:02:00.000000000 +0200
+@@ -1,5 +1,5 @@
+ config FSNOTIFY
+- def_bool n
++ def_bool y
+
+ source "fs/notify/dnotify/Kconfig"
+ source "fs/notify/inotify/Kconfig"
+diff -Nur linux-3.11.10.orig/usr/Kconfig linux-3.11.10/usr/Kconfig
+--- linux-3.11.10.orig/usr/Kconfig 2013-11-29 19:42:37.000000000 +0100
++++ linux-3.11.10/usr/Kconfig 2013-12-27 19:15:16.000000000 +0100
+@@ -47,7 +47,7 @@
+
+ config RD_GZIP
+ bool "Support initial ramdisks compressed using gzip" if EXPERT
+- default y
++ default n
+ depends on BLK_DEV_INITRD
+ select DECOMPRESS_GZIP
+ help
diff --git a/target/linux/patches/3.14.43/disable-netfilter.patch b/target/linux/patches/3.14.43/disable-netfilter.patch
new file mode 100644
index 000000000..7b1ca013a
--- /dev/null
+++ b/target/linux/patches/3.14.43/disable-netfilter.patch
@@ -0,0 +1,160 @@
+diff -Nur linux-3.7.3.orig/net/Kconfig linux-3.7.3/net/Kconfig
+--- linux-3.7.3.orig/net/Kconfig 2013-01-17 17:47:40.000000000 +0100
++++ linux-3.7.3/net/Kconfig 2013-01-19 18:19:55.000000000 +0100
+@@ -163,7 +163,7 @@
+ config NETFILTER_ADVANCED
+ bool "Advanced netfilter configuration"
+ depends on NETFILTER
+- default y
++ default n
+ help
+ If you say Y here you can select between all the netfilter modules.
+ If you say N the more unusual ones will not be shown and the
+@@ -175,7 +175,7 @@
+ bool "Bridged IP/ARP packets filtering"
+ depends on BRIDGE && NETFILTER && INET
+ depends on NETFILTER_ADVANCED
+- default y
++ default n
+ ---help---
+ Enabling this option will let arptables resp. iptables see bridged
+ ARP resp. IP traffic. If you want a bridging firewall, you probably
+diff -Nur linux-3.7.3.orig/net/netfilter/Kconfig linux-3.7.3/net/netfilter/Kconfig
+--- linux-3.7.3.orig/net/netfilter/Kconfig 2013-01-17 17:47:40.000000000 +0100
++++ linux-3.7.3/net/netfilter/Kconfig 2013-01-19 18:21:41.000000000 +0100
+@@ -22,7 +22,6 @@
+
+ config NETFILTER_NETLINK_LOG
+ tristate "Netfilter LOG over NFNETLINK interface"
+- default m if NETFILTER_ADVANCED=n
+ select NETFILTER_NETLINK
+ help
+ If this option is enabled, the kernel will include support
+@@ -34,7 +33,6 @@
+
+ config NF_CONNTRACK
+ tristate "Netfilter connection tracking support"
+- default m if NETFILTER_ADVANCED=n
+ help
+ Connection tracking keeps a record of what packets have passed
+ through your machine, in order to figure out how they are related
+@@ -60,7 +58,6 @@
+ config NF_CONNTRACK_SECMARK
+ bool 'Connection tracking security mark support'
+ depends on NETWORK_SECMARK
+- default m if NETFILTER_ADVANCED=n
+ help
+ This option enables security markings to be applied to
+ connections. Typically they are copied to connections from
+@@ -177,7 +174,6 @@
+
+ config NF_CONNTRACK_FTP
+ tristate "FTP protocol support"
+- default m if NETFILTER_ADVANCED=n
+ help
+ Tracking FTP connections is problematic: special helpers are
+ required for tracking them, and doing masquerading and other forms
+@@ -211,7 +207,6 @@
+
+ config NF_CONNTRACK_IRC
+ tristate "IRC protocol support"
+- default m if NETFILTER_ADVANCED=n
+ help
+ There is a commonly-used extension to IRC called
+ Direct Client-to-Client Protocol (DCC). This enables users to send
+@@ -296,7 +291,6 @@
+
+ config NF_CONNTRACK_SIP
+ tristate "SIP protocol support"
+- default m if NETFILTER_ADVANCED=n
+ help
+ SIP is an application-layer control protocol that can establish,
+ modify, and terminate multimedia sessions (conferences) such as
+@@ -320,7 +314,6 @@
+ config NF_CT_NETLINK
+ tristate 'Connection tracking netlink interface'
+ select NETFILTER_NETLINK
+- default m if NETFILTER_ADVANCED=n
+ help
+ This option enables support for a netlink-based userspace interface
+
+@@ -424,7 +417,6 @@
+
+ config NETFILTER_XTABLES
+ tristate "Netfilter Xtables support (required for ip_tables)"
+- default m if NETFILTER_ADVANCED=n
+ help
+ This is required if you intend to use any of ip_tables,
+ ip6_tables or arp_tables.
+@@ -435,7 +427,6 @@
+
+ config NETFILTER_XT_MARK
+ tristate 'nfmark target and match support'
+- default m if NETFILTER_ADVANCED=n
+ ---help---
+ This option adds the "MARK" target and "mark" match.
+
+@@ -527,7 +518,6 @@
+ config NETFILTER_XT_TARGET_CONNSECMARK
+ tristate '"CONNSECMARK" target support'
+ depends on NF_CONNTRACK && NF_CONNTRACK_SECMARK
+- default m if NETFILTER_ADVANCED=n
+ help
+ The CONNSECMARK target copies security markings from packets
+ to connections, and restores security markings from connections
+@@ -632,7 +622,6 @@
+
+ config NETFILTER_XT_TARGET_LOG
+ tristate "LOG target support"
+- default m if NETFILTER_ADVANCED=n
+ help
+ This option adds a `LOG' target, which allows you to create rules in
+ any iptables table which records the packet header to the syslog.
+@@ -660,7 +649,6 @@
+
+ config NETFILTER_XT_TARGET_NFLOG
+ tristate '"NFLOG" target support'
+- default m if NETFILTER_ADVANCED=n
+ select NETFILTER_NETLINK_LOG
+ help
+ This option enables the NFLOG target, which allows to LOG
+@@ -741,7 +729,6 @@
+ config NETFILTER_XT_TARGET_SECMARK
+ tristate '"SECMARK" target support'
+ depends on NETWORK_SECMARK
+- default m if NETFILTER_ADVANCED=n
+ help
+ The SECMARK target allows security marking of network
+ packets, for use with security subsystems.
+@@ -751,7 +738,6 @@
+ config NETFILTER_XT_TARGET_TCPMSS
+ tristate '"TCPMSS" target support'
+ depends on (IPV6 || IPV6=n)
+- default m if NETFILTER_ADVANCED=n
+ ---help---
+ This option adds a `TCPMSS' target, which allows you to alter the
+ MSS value of TCP SYN packets, to control the maximum size for that
+@@ -856,7 +842,6 @@
+ config NETFILTER_XT_MATCH_CONNTRACK
+ tristate '"conntrack" connection tracking match support'
+ depends on NF_CONNTRACK
+- default m if NETFILTER_ADVANCED=n
+ help
+ This is a general conntrack match module, a superset of the state match.
+
+@@ -1063,7 +1048,6 @@
+ config NETFILTER_XT_MATCH_POLICY
+ tristate 'IPsec "policy" match support'
+ depends on XFRM
+- default m if NETFILTER_ADVANCED=n
+ help
+ Policy matching allows you to match packets based on the
+ IPsec policy that was used during decapsulation/will
+@@ -1170,7 +1154,6 @@
+ config NETFILTER_XT_MATCH_STATE
+ tristate '"state" match support'
+ depends on NF_CONNTRACK
+- default m if NETFILTER_ADVANCED=n
+ help
+ Connection state matching allows you to match packets based on their
+ relationship to a tracked connection (ie. previous packets). This
diff --git a/target/linux/patches/3.14.43/export-symbol-for-exmap.patch b/target/linux/patches/3.14.43/export-symbol-for-exmap.patch
new file mode 100644
index 000000000..4f0fc8449
--- /dev/null
+++ b/target/linux/patches/3.14.43/export-symbol-for-exmap.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-3.11.5.orig/kernel/pid.c linux-3.11.5/kernel/pid.c
+--- linux-3.11.5.orig/kernel/pid.c 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/kernel/pid.c 2013-10-29 15:37:02.000000000 +0100
+@@ -450,6 +450,7 @@
+ {
+ return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
+ }
++EXPORT_SYMBOL(find_task_by_vpid);
+
+ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
+ {
diff --git a/target/linux/patches/3.14.43/fblogo.patch b/target/linux/patches/3.14.43/fblogo.patch
new file mode 100644
index 000000000..cbbb4216f
--- /dev/null
+++ b/target/linux/patches/3.14.43/fblogo.patch
@@ -0,0 +1,2097 @@
+diff -Nur linux-3.13.7.orig/Documentation/fb/00-INDEX linux-3.13.7/Documentation/fb/00-INDEX
+--- linux-3.13.7.orig/Documentation/fb/00-INDEX 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/Documentation/fb/00-INDEX 2014-03-29 16:03:17.000000000 +0100
+@@ -21,6 +21,8 @@
+ - info on the driver for EP93xx LCD controller.
+ fbcon.txt
+ - intro to and usage guide for the framebuffer console (fbcon).
++fbcondecor.txt
++ - info on the Framebuffer Console Decoration
+ framebuffer.txt
+ - introduction to frame buffer devices.
+ gxfb.txt
+diff -Nur linux-3.13.7.orig/Documentation/fb/fbcondecor.txt linux-3.13.7/Documentation/fb/fbcondecor.txt
+--- linux-3.13.7.orig/Documentation/fb/fbcondecor.txt 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.13.7/Documentation/fb/fbcondecor.txt 2014-03-29 16:03:17.000000000 +0100
+@@ -0,0 +1,207 @@
++What is it?
++-----------
++
++The framebuffer decorations are a kernel feature which allows displaying a
++background picture on selected consoles.
++
++What do I need to get it to work?
++---------------------------------
++
++To get fbcondecor up-and-running you will have to:
++ 1) get a copy of splashutils [1] or a similar program
++ 2) get some fbcondecor themes
++ 3) build the kernel helper program
++ 4) build your kernel with the FB_CON_DECOR option enabled.
++
++To get fbcondecor operational right after fbcon initialization is finished, you
++will have to include a theme and the kernel helper into your initramfs image.
++Please refer to splashutils documentation for instructions on how to do that.
++
++[1] The splashutils package can be downloaded from:
++ http://github.com/alanhaggai/fbsplash
++
++The userspace helper
++--------------------
++
++The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
++kernel whenever an important event occurs and the kernel needs some kind of
++job to be carried out. Important events include console switches and video
++mode switches (the kernel requests background images and configuration
++parameters for the current console). The fbcondecor helper must be accessible at
++all times. If it's not, fbcondecor will be switched off automatically.
++
++It's possible to set path to the fbcondecor helper by writing it to
++/proc/sys/kernel/fbcondecor.
++
++*****************************************************************************
++
++The information below is mostly technical stuff. There's probably no need to
++read it unless you plan to develop a userspace helper.
++
++The fbcondecor protocol
++-----------------------
++
++The fbcondecor protocol defines a communication interface between the kernel and
++the userspace fbcondecor helper.
++
++The kernel side is responsible for:
++
++ * rendering console text, using an image as a background (instead of a
++ standard solid color fbcon uses),
++ * accepting commands from the user via ioctls on the fbcondecor device,
++ * calling the userspace helper to set things up as soon as the fb subsystem
++ is initialized.
++
++The userspace helper is responsible for everything else, including parsing
++configuration files, decompressing the image files whenever the kernel needs
++it, and communicating with the kernel if necessary.
++
++The fbcondecor protocol specifies how communication is done in both ways:
++kernel->userspace and userspace->helper.
++
++Kernel -> Userspace
++-------------------
++
++The kernel communicates with the userspace helper by calling it and specifying
++the task to be done in a series of arguments.
++
++The arguments follow the pattern:
++<fbcondecor protocol version> <command> <parameters>
++
++All commands defined in fbcondecor protocol v2 have the following parameters:
++ virtual console
++ framebuffer number
++ theme
++
++Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
++framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
++
++Fbcondecor protocol v2 specifies the following commands:
++
++getpic
++------
++ The kernel issues this command to request image data. It's up to the
++ userspace helper to find a background image appropriate for the specified
++ theme and the current resolution. The userspace helper should respond by
++ issuing the FBIOCONDECOR_SETPIC ioctl.
++
++init
++----
++ The kernel issues this command after the fbcondecor device is created and
++ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
++ helper should parse the kernel command line (/proc/cmdline) or otherwise
++ decide whether fbcondecor is to be activated.
++
++ To activate fbcondecor on the first console the helper should issue the
++ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
++ in the above-mentioned order.
++
++ When the userspace helper is called in an early phase of the boot process
++ (right after the initialization of fbcon), no filesystems will be mounted.
++ The helper program should mount sysfs and then create the appropriate
++ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
++ current display settings and to be able to communicate with the kernel side.
++ It should probably also mount the procfs to be able to parse the kernel
++ command line parameters.
++
++ Note that the console sem is not held when the kernel calls fbcondecor_helper
++ with the 'init' command. The fbcondecor helper should perform all ioctls with
++ origin set to FBCON_DECOR_IO_ORIG_USER.
++
++modechange
++----------
++ The kernel issues this command on a mode change. The helper's response should
++ be similar to the response to the 'init' command. Note that this time the
++ console sem is held and all ioctls must be performed with origin set to
++ FBCON_DECOR_IO_ORIG_KERNEL.
++
++
++Userspace -> Kernel
++-------------------
++
++Userspace programs can communicate with fbcondecor via ioctls on the
++fbcondecor device. These ioctls are to be used by both the userspace helper
++(called only by the kernel) and userspace configuration tools (run by the users).
++
++The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
++when doing the appropriate ioctls. All userspace configuration tools should
++use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
++field when performing ioctls from the kernel helper will most likely result
++in a console deadlock.
++
++FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
++semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
++the console sem.
++
++The framebuffer console decoration provides the following ioctls (all defined in
++linux/fb.h):
++
++FBIOCONDECOR_SETPIC
++description: loads a background picture for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
++notes:
++If called for consoles other than the current foreground one, the picture data
++will be ignored.
++
++If the current virtual console is running in a 8-bpp mode, the cmap substruct
++of fb_image has to be filled appropriately: start should be set to 16 (first
++16 colors are reserved for fbcon), len to a value <= 240 and red, green and
++blue should point to valid cmap data. The transp field is ingored. The fields
++dx, dy, bg_color, fg_color in fb_image are ignored as well.
++
++FBIOCONDECOR_SETCFG
++description: sets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++notes: The structure has to be filled with valid data.
++
++FBIOCONDECOR_GETCFG
++description: gets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++
++FBIOCONDECOR_SETSTATE
++description: sets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++ values: 0 = disabled, 1 = enabled.
++
++FBIOCONDECOR_GETSTATE
++description: gets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++ values: as in FBIOCONDECOR_SETSTATE
++
++Info on used structures:
++
++Definition of struct vc_decor can be found in linux/console_decor.h. It's
++heavily commented. Note that the 'theme' field should point to a string
++no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
++performed, the theme field should point to a char buffer of length
++FBCON_DECOR_THEME_LEN.
++
++Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
++The fields in this struct have the following meaning:
++
++vc:
++Virtual console number.
++
++origin:
++Specifies if the ioctl is performed as a response to a kernel request. The
++fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
++programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
++avoid console semaphore deadlocks.
++
++data:
++Pointer to a data structure appropriate for the performed ioctl. Type of
++the data struct is specified in the ioctls description.
++
++*****************************************************************************
++
++Credit
++------
++
++Original 'bootsplash' project & implementation by:
++ Volker Poplawski <volker@poplawski.de>, Stefan Reinauer <stepan@suse.de>,
++ Steffen Winterfeldt <snwint@suse.de>, Michael Schroeder <mls@suse.de>,
++ Ken Wimer <wimer@suse.de>.
++
++Fbcondecor, fbcondecor protocol design, current implementation & docs by:
++ Michal Januszewski <michalj+fbcondecor@gmail.com>
++
+diff -Nur linux-3.13.7.orig/drivers/Makefile linux-3.13.7/drivers/Makefile
+--- linux-3.13.7.orig/drivers/Makefile 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/drivers/Makefile 2014-03-29 16:03:17.000000000 +0100
+@@ -17,6 +17,10 @@
+ obj-$(CONFIG_PCI) += pci/
+ obj-$(CONFIG_PARISC) += parisc/
+ obj-$(CONFIG_RAPIDIO) += rapidio/
++# tty/ comes before char/ so that the VT console is the boot-time
++# default.
++obj-y += tty/
++obj-y += char/
+ obj-y += video/
+ obj-y += idle/
+
+@@ -42,11 +46,6 @@
+ # reset controllers early, since gpu drivers might rely on them to initialize
+ obj-$(CONFIG_RESET_CONTROLLER) += reset/
+
+-# tty/ comes before char/ so that the VT console is the boot-time
+-# default.
+-obj-y += tty/
+-obj-y += char/
+-
+ # gpu/ comes after char for AGP vs DRM startup
+ obj-y += gpu/
+
+diff -Nur linux-3.13.7.orig/drivers/video/Kconfig linux-3.13.7/drivers/video/Kconfig
+--- linux-3.13.7.orig/drivers/video/Kconfig 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/drivers/video/Kconfig 2014-03-29 16:03:17.000000000 +0100
+@@ -1231,7 +1231,6 @@
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+- select FB_TILEBLITTING
+ select FB_MACMODES if PPC_PMAC
+ ---help---
+ Say Y here if you have a Matrox Millennium, Matrox Millennium II,
+diff -Nur linux-3.13.7.orig/drivers/video/console/Kconfig linux-3.13.7/drivers/video/console/Kconfig
+--- linux-3.13.7.orig/drivers/video/console/Kconfig 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/drivers/video/console/Kconfig 2014-03-29 16:03:17.000000000 +0100
+@@ -125,6 +125,19 @@
+ such that other users of the framebuffer will remain normally
+ oriented.
+
++config FB_CON_DECOR
++ bool "Support for the Framebuffer Console Decorations"
++ depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
++ default n
++ ---help---
++ This option enables support for framebuffer console decorations which
++ makes it possible to display images in the background of the system
++ consoles. Note that userspace utilities are necessary in order to take
++ advantage of these features. Refer to Documentation/fb/fbcondecor.txt
++ for more information.
++
++ If unsure, say N.
++
+ config STI_CONSOLE
+ bool "STI text console"
+ depends on PARISC
+diff -Nur linux-3.13.7.orig/drivers/video/console/Makefile linux-3.13.7/drivers/video/console/Makefile
+--- linux-3.13.7.orig/drivers/video/console/Makefile 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/drivers/video/console/Makefile 2014-03-29 16:03:17.000000000 +0100
+@@ -16,4 +16,5 @@
+ fbcon_ccw.o
+ endif
+
++obj-$(CONFIG_FB_CON_DECOR) += fbcondecor.o cfbcondecor.o
+ obj-$(CONFIG_FB_STI) += sticore.o
+diff -Nur linux-3.13.7.orig/drivers/video/console/bitblit.c linux-3.13.7/drivers/video/console/bitblit.c
+--- linux-3.13.7.orig/drivers/video/console/bitblit.c 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/drivers/video/console/bitblit.c 2014-03-29 16:03:17.000000000 +0100
+@@ -18,6 +18,7 @@
+ #include <linux/console.h>
+ #include <asm/types.h>
+ #include "fbcon.h"
++#include "fbcondecor.h"
+
+ /*
+ * Accelerated handlers.
+@@ -55,6 +56,13 @@
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
++ if (fbcon_decor_active(info, vc)) {
++ area.sx += vc->vc_decor.tx;
++ area.sy += vc->vc_decor.ty;
++ area.dx += vc->vc_decor.tx;
++ area.dy += vc->vc_decor.ty;
++ }
++
+ info->fbops->fb_copyarea(info, &area);
+ }
+
+@@ -380,11 +388,15 @@
+ cursor.image.depth = 1;
+ cursor.rop = ROP_XOR;
+
+- if (info->fbops->fb_cursor)
+- err = info->fbops->fb_cursor(info, &cursor);
++ if (fbcon_decor_active(info, vc)) {
++ fbcon_decor_cursor(info, &cursor);
++ } else {
++ if (info->fbops->fb_cursor)
++ err = info->fbops->fb_cursor(info, &cursor);
+
+- if (err)
+- soft_cursor(info, &cursor);
++ if (err)
++ soft_cursor(info, &cursor);
++ }
+
+ ops->cursor_reset = 0;
+ }
+diff -Nur linux-3.13.7.orig/drivers/video/console/cfbcondecor.c linux-3.13.7/drivers/video/console/cfbcondecor.c
+--- linux-3.13.7.orig/drivers/video/console/cfbcondecor.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.13.7/drivers/video/console/cfbcondecor.c 2014-03-29 16:03:17.000000000 +0100
+@@ -0,0 +1,471 @@
++/*
++ * linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
++ *
++ * Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ * Code based upon "Bootdecor" (C) 2001-2003
++ * Volker Poplawski <volker@poplawski.de>,
++ * Stefan Reinauer <stepan@suse.de>,
++ * Steffen Winterfeldt <snwint@suse.de>,
++ * Michael Schroeder <mls@suse.de>,
++ * Ken Wimer <wimer@suse.de>.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/selection.h>
++#include <linux/slab.h>
++#include <linux/vt_kern.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++#define parse_pixel(shift,bpp,type) \
++ do { \
++ if (d & (0x80 >> (shift))) \
++ dd2[(shift)] = fgx; \
++ else \
++ dd2[(shift)] = transparent ? *(type *)decor_src : bgx; \
++ decor_src += (bpp); \
++ } while (0) \
++
++extern int get_color(struct vc_data *vc, struct fb_info *info,
++ u16 c, int is_fg);
++
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
++{
++ int i, j, k;
++ int minlen = min(min(info->var.red.length, info->var.green.length),
++ info->var.blue.length);
++ u32 col;
++
++ for (j = i = 0; i < 16; i++) {
++ k = color_table[i];
++
++ col = ((vc->vc_palette[j++] >> (8-minlen))
++ << info->var.red.offset);
++ col |= ((vc->vc_palette[j++] >> (8-minlen))
++ << info->var.green.offset);
++ col |= ((vc->vc_palette[j++] >> (8-minlen))
++ << info->var.blue.offset);
++ ((u32 *)info->pseudo_palette)[k] = col;
++ }
++}
++
++void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
++ int width, u8* src, u32 fgx, u32 bgx, u8 transparent)
++{
++ unsigned int x, y;
++ u32 dd;
++ int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++ unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
++ unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
++ u16 dd2[4];
++
++ u8* decor_src = (u8 *)(info->bgdecor.data + ds);
++ u8* dst = (u8 *)(info->screen_base + d);
++
++ if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
++ return;
++
++ for (y = 0; y < height; y++) {
++ switch (info->var.bits_per_pixel) {
++
++ case 32:
++ for (x = 0; x < width; x++) {
++
++ if ((x & 7) == 0)
++ d = *src++;
++ if (d & 0x80)
++ dd = fgx;
++ else
++ dd = transparent ?
++ *(u32 *)decor_src : bgx;
++
++ d <<= 1;
++ decor_src += 4;
++ fb_writel(dd, dst);
++ dst += 4;
++ }
++ break;
++ case 24:
++ for (x = 0; x < width; x++) {
++
++ if ((x & 7) == 0)
++ d = *src++;
++ if (d & 0x80)
++ dd = fgx;
++ else
++ dd = transparent ?
++ (*(u32 *)decor_src & 0xffffff) : bgx;
++
++ d <<= 1;
++ decor_src += 3;
++#ifdef __LITTLE_ENDIAN
++ fb_writew(dd & 0xffff, dst);
++ dst += 2;
++ fb_writeb((dd >> 16), dst);
++#else
++ fb_writew(dd >> 8, dst);
++ dst += 2;
++ fb_writeb(dd & 0xff, dst);
++#endif
++ dst++;
++ }
++ break;
++ case 16:
++ for (x = 0; x < width; x += 2) {
++ if ((x & 7) == 0)
++ d = *src++;
++
++ parse_pixel(0, 2, u16);
++ parse_pixel(1, 2, u16);
++#ifdef __LITTLE_ENDIAN
++ dd = dd2[0] | (dd2[1] << 16);
++#else
++ dd = dd2[1] | (dd2[0] << 16);
++#endif
++ d <<= 2;
++ fb_writel(dd, dst);
++ dst += 4;
++ }
++ break;
++
++ case 8:
++ for (x = 0; x < width; x += 4) {
++ if ((x & 7) == 0)
++ d = *src++;
++
++ parse_pixel(0, 1, u8);
++ parse_pixel(1, 1, u8);
++ parse_pixel(2, 1, u8);
++ parse_pixel(3, 1, u8);
++
++#ifdef __LITTLE_ENDIAN
++ dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
++#else
++ dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
++#endif
++ d <<= 4;
++ fb_writel(dd, dst);
++ dst += 4;
++ }
++ }
++
++ dst += info->fix.line_length - width * bytespp;
++ decor_src += (info->var.xres - width) * bytespp;
++ }
++}
++
++#define cc2cx(a) \
++ ((info->fix.visual == FB_VISUAL_TRUECOLOR || \
++ info->fix.visual == FB_VISUAL_DIRECTCOLOR) ? \
++ ((u32*)info->pseudo_palette)[a] : a)
++
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
++ const unsigned short *s, int count, int yy, int xx)
++{
++ unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
++ struct fbcon_ops *ops = info->fbcon_par;
++ int fg_color, bg_color, transparent;
++ u8 *src;
++ u32 bgx, fgx;
++ u16 c = scr_readw(s);
++
++ fg_color = get_color(vc, info, c, 1);
++ bg_color = get_color(vc, info, c, 0);
++
++ /* Don't paint the background image if console is blanked */
++ transparent = ops->blank_state ? 0 :
++ (vc->vc_decor.bg_color == bg_color);
++
++ xx = xx * vc->vc_font.width + vc->vc_decor.tx;
++ yy = yy * vc->vc_font.height + vc->vc_decor.ty;
++
++ fgx = cc2cx(fg_color);
++ bgx = cc2cx(bg_color);
++
++ while (count--) {
++ c = scr_readw(s++);
++ src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
++ ((vc->vc_font.width + 7) >> 3);
++
++ fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
++ vc->vc_font.width, src, fgx, bgx, transparent);
++ xx += vc->vc_font.width;
++ }
++}
++
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
++{
++ int i;
++ unsigned int dsize, s_pitch;
++ struct fbcon_ops *ops = info->fbcon_par;
++ struct vc_data* vc;
++ u8 *src;
++
++ /* we really don't need any cursors while the console is blanked */
++ if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
++ return;
++
++ vc = vc_cons[ops->currcon].d;
++
++ src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
++ if (!src)
++ return;
++
++ s_pitch = (cursor->image.width + 7) >> 3;
++ dsize = s_pitch * cursor->image.height;
++ if (cursor->enable) {
++ switch (cursor->rop) {
++ case ROP_XOR:
++ for (i = 0; i < dsize; i++)
++ src[i] = cursor->image.data[i] ^ cursor->mask[i];
++ break;
++ case ROP_COPY:
++ default:
++ for (i = 0; i < dsize; i++)
++ src[i] = cursor->image.data[i] & cursor->mask[i];
++ break;
++ }
++ } else
++ memcpy(src, cursor->image.data, dsize);
++
++ fbcon_decor_renderc(info,
++ cursor->image.dy + vc->vc_decor.ty,
++ cursor->image.dx + vc->vc_decor.tx,
++ cursor->image.height,
++ cursor->image.width,
++ (u8*)src,
++ cc2cx(cursor->image.fg_color),
++ cc2cx(cursor->image.bg_color),
++ cursor->image.bg_color == vc->vc_decor.bg_color);
++
++ kfree(src);
++}
++
++static void decorset(u8 *dst, int height, int width, int dstbytes,
++ u32 bgx, int bpp)
++{
++ int i;
++
++ if (bpp == 8)
++ bgx |= bgx << 8;
++ if (bpp == 16 || bpp == 8)
++ bgx |= bgx << 16;
++
++ while (height-- > 0) {
++ u8 *p = dst;
++
++ switch (bpp) {
++
++ case 32:
++ for (i=0; i < width; i++) {
++ fb_writel(bgx, p); p += 4;
++ }
++ break;
++ case 24:
++ for (i=0; i < width; i++) {
++#ifdef __LITTLE_ENDIAN
++ fb_writew((bgx & 0xffff),(u16*)p); p += 2;
++ fb_writeb((bgx >> 16),p++);
++#else
++ fb_writew((bgx >> 8),(u16*)p); p += 2;
++ fb_writeb((bgx & 0xff),p++);
++#endif
++ }
++ case 16:
++ for (i=0; i < width/4; i++) {
++ fb_writel(bgx,p); p += 4;
++ fb_writel(bgx,p); p += 4;
++ }
++ if (width & 2) {
++ fb_writel(bgx,p); p += 4;
++ }
++ if (width & 1)
++ fb_writew(bgx,(u16*)p);
++ break;
++ case 8:
++ for (i=0; i < width/4; i++) {
++ fb_writel(bgx,p); p += 4;
++ }
++
++ if (width & 2) {
++ fb_writew(bgx,p); p += 2;
++ }
++ if (width & 1)
++ fb_writeb(bgx,(u8*)p);
++ break;
++
++ }
++ dst += dstbytes;
++ }
++}
++
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
++ int srclinebytes, int bpp)
++{
++ int i;
++
++ while (height-- > 0) {
++ u32 *p = (u32 *)dst;
++ u32 *q = (u32 *)src;
++
++ switch (bpp) {
++
++ case 32:
++ for (i=0; i < width; i++)
++ fb_writel(*q++, p++);
++ break;
++ case 24:
++ for (i=0; i < (width*3/4); i++)
++ fb_writel(*q++, p++);
++ if ((width*3) % 4) {
++ if (width & 2) {
++ fb_writeb(*(u8*)q, (u8*)p);
++ } else if (width & 1) {
++ fb_writew(*(u16*)q, (u16*)p);
++ fb_writeb(*(u8*)((u16*)q+1),(u8*)((u16*)p+2));
++ }
++ }
++ break;
++ case 16:
++ for (i=0; i < width/4; i++) {
++ fb_writel(*q++, p++);
++ fb_writel(*q++, p++);
++ }
++ if (width & 2)
++ fb_writel(*q++, p++);
++ if (width & 1)
++ fb_writew(*(u16*)q, (u16*)p);
++ break;
++ case 8:
++ for (i=0; i < width/4; i++)
++ fb_writel(*q++, p++);
++
++ if (width & 2) {
++ fb_writew(*(u16*)q, (u16*)p);
++ q = (u32*) ((u16*)q + 1);
++ p = (u32*) ((u16*)p + 1);
++ }
++ if (width & 1)
++ fb_writeb(*(u8*)q, (u8*)p);
++ break;
++ }
++
++ dst += linebytes;
++ src += srclinebytes;
++ }
++}
++
++static void decorfill(struct fb_info *info, int sy, int sx, int height,
++ int width)
++{
++ int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++ int d = sy * info->fix.line_length + sx * bytespp;
++ int ds = (sy * info->var.xres + sx) * bytespp;
++
++ fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
++ height, width, info->fix.line_length, info->var.xres * bytespp,
++ info->var.bits_per_pixel);
++}
++
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
++ int height, int width)
++{
++ int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
++ struct fbcon_ops *ops = info->fbcon_par;
++ u8 *dst;
++ int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
++
++ transparent = (vc->vc_decor.bg_color == bg_color);
++ sy = sy * vc->vc_font.height + vc->vc_decor.ty;
++ sx = sx * vc->vc_font.width + vc->vc_decor.tx;
++ height *= vc->vc_font.height;
++ width *= vc->vc_font.width;
++
++ /* Don't paint the background image if console is blanked */
++ if (transparent && !ops->blank_state) {
++ decorfill(info, sy, sx, height, width);
++ } else {
++ dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
++ sx * ((info->var.bits_per_pixel + 7) >> 3));
++ decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
++ info->var.bits_per_pixel);
++ }
++}
++
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
++ int bottom_only)
++{
++ unsigned int tw = vc->vc_cols*vc->vc_font.width;
++ unsigned int th = vc->vc_rows*vc->vc_font.height;
++
++ if (!bottom_only) {
++ /* top margin */
++ decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
++ /* left margin */
++ decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
++ /* right margin */
++ decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th,
++ info->var.xres - vc->vc_decor.tx - tw);
++ }
++ decorfill(info, vc->vc_decor.ty + th, 0,
++ info->var.yres - vc->vc_decor.ty - th, info->var.xres);
++}
++
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y,
++ int sx, int dx, int width)
++{
++ u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
++ u16 *s = d + (dx - sx);
++ u16 *start = d;
++ u16 *ls = d;
++ u16 *le = d + width;
++ u16 c;
++ int x = dx;
++ u16 attr = 1;
++
++ do {
++ c = scr_readw(d);
++ if (attr != (c & 0xff00)) {
++ attr = c & 0xff00;
++ if (d > start) {
++ fbcon_decor_putcs(vc, info, start, d - start, y, x);
++ x += d - start;
++ start = d;
++ }
++ }
++ if (s >= ls && s < le && c == scr_readw(s)) {
++ if (d > start) {
++ fbcon_decor_putcs(vc, info, start, d - start, y, x);
++ x += d - start + 1;
++ start = d + 1;
++ } else {
++ x++;
++ start++;
++ }
++ }
++ s++;
++ d++;
++ } while (d < le);
++ if (d > start)
++ fbcon_decor_putcs(vc, info, start, d - start, y, x);
++}
++
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
++{
++ if (blank) {
++ decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
++ info->fix.line_length, 0, info->var.bits_per_pixel);
++ } else {
++ update_screen(vc);
++ fbcon_decor_clear_margins(vc, info, 0);
++ }
++}
++
+diff -Nur linux-3.13.7.orig/drivers/video/console/fbcon.c linux-3.13.7/drivers/video/console/fbcon.c
+--- linux-3.13.7.orig/drivers/video/console/fbcon.c 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/drivers/video/console/fbcon.c 2014-03-29 16:03:17.000000000 +0100
+@@ -79,6 +79,7 @@
+ #include <asm/irq.h>
+
+ #include "fbcon.h"
++#include "fbcondecor.h"
+
+ #ifdef FBCONDEBUG
+ # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+@@ -94,7 +95,7 @@
+
+ static struct display fb_display[MAX_NR_CONSOLES];
+
+-static signed char con2fb_map[MAX_NR_CONSOLES];
++signed char con2fb_map[MAX_NR_CONSOLES];
+ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+
+ static int logo_lines;
+@@ -286,7 +287,7 @@
+ !vt_force_oops_output(vc);
+ }
+
+-static int get_color(struct vc_data *vc, struct fb_info *info,
++int get_color(struct vc_data *vc, struct fb_info *info,
+ u16 c, int is_fg)
+ {
+ int depth = fb_get_color_depth(&info->var, &info->fix);
+@@ -551,6 +552,9 @@
+ info_idx = -1;
+ } else {
+ fbcon_has_console_bind = 1;
++#ifdef CONFIG_FB_CON_DECOR
++ fbcon_decor_init();
++#endif
+ }
+
+ return err;
+@@ -1007,6 +1011,12 @@
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols /= vc->vc_font.width;
+ rows /= vc->vc_font.height;
++
++ if (fbcon_decor_active(info, vc)) {
++ cols = vc->vc_decor.twidth / vc->vc_font.width;
++ rows = vc->vc_decor.theight / vc->vc_font.height;
++ }
++
+ vc_resize(vc, cols, rows);
+
+ DPRINTK("mode: %s\n", info->fix.id);
+@@ -1036,7 +1046,7 @@
+ cap = info->flags;
+
+ if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
+- (info->fix.type == FB_TYPE_TEXT))
++ (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
+ logo = 0;
+
+ if (var_to_display(p, &info->var, info))
+@@ -1260,6 +1270,11 @@
+ fbcon_clear_margins(vc, 0);
+ }
+
++ if (fbcon_decor_active(info, vc)) {
++ fbcon_decor_clear(vc, info, sy, sx, height, width);
++ return;
++ }
++
+ /* Split blits that cross physical y_wrap boundary */
+
+ y_break = p->vrows - p->yscroll;
+@@ -1279,10 +1294,15 @@
+ struct display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+- if (!fbcon_is_inactive(vc, info))
+- ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+- get_color(vc, info, scr_readw(s), 1),
+- get_color(vc, info, scr_readw(s), 0));
++ if (!fbcon_is_inactive(vc, info)) {
++
++ if (fbcon_decor_active(info, vc))
++ fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
++ else
++ ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
++ get_color(vc, info, scr_readw(s), 1),
++ get_color(vc, info, scr_readw(s), 0));
++ }
+ }
+
+ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+@@ -1298,8 +1318,13 @@
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+- if (!fbcon_is_inactive(vc, info))
+- ops->clear_margins(vc, info, bottom_only);
++ if (!fbcon_is_inactive(vc, info)) {
++ if (fbcon_decor_active(info, vc)) {
++ fbcon_decor_clear_margins(vc, info, bottom_only);
++ } else {
++ ops->clear_margins(vc, info, bottom_only);
++ }
++ }
+ }
+
+ static void fbcon_cursor(struct vc_data *vc, int mode)
+@@ -1819,7 +1844,7 @@
+ count = vc->vc_rows;
+ if (softback_top)
+ fbcon_softback_note(vc, t, count);
+- if (logo_shown >= 0)
++ if (logo_shown >= 0 || fbcon_decor_active(info, vc))
+ goto redraw_up;
+ switch (p->scrollmode) {
+ case SCROLL_MOVE:
+@@ -1912,6 +1937,8 @@
+ count = vc->vc_rows;
+ if (logo_shown >= 0)
+ goto redraw_down;
++ if (fbcon_decor_active(info, vc))
++ goto redraw_down;
+ switch (p->scrollmode) {
+ case SCROLL_MOVE:
+ fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+@@ -2060,6 +2087,13 @@
+ }
+ return;
+ }
++
++ if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
++ /* must use slower redraw bmove to keep background pic intact */
++ fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
++ return;
++ }
++
+ ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
+ }
+@@ -2130,8 +2164,8 @@
+ var.yres = virt_h * virt_fh;
+ x_diff = info->var.xres - var.xres;
+ y_diff = info->var.yres - var.yres;
+- if (x_diff < 0 || x_diff > virt_fw ||
+- y_diff < 0 || y_diff > virt_fh) {
++ if ((x_diff < 0 || x_diff > virt_fw ||
++ y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
+ const struct fb_videomode *mode;
+
+ DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+@@ -2167,6 +2201,21 @@
+
+ info = registered_fb[con2fb_map[vc->vc_num]];
+ ops = info->fbcon_par;
++ prev_console = ops->currcon;
++ if (prev_console != -1)
++ old_info = registered_fb[con2fb_map[prev_console]];
++
++#ifdef CONFIG_FB_CON_DECOR
++ if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++ struct vc_data *vc_curr = vc_cons[prev_console].d;
++ if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
++ /* Clear the screen to avoid displaying funky colors during
++ * palette updates. */
++ memset((u8*)info->screen_base + info->fix.line_length * info->var.yoffset,
++ 0, info->var.yres * info->fix.line_length);
++ }
++ }
++#endif
+
+ if (softback_top) {
+ if (softback_lines)
+@@ -2185,9 +2234,6 @@
+ logo_shown = FBCON_LOGO_CANSHOW;
+ }
+
+- prev_console = ops->currcon;
+- if (prev_console != -1)
+- old_info = registered_fb[con2fb_map[prev_console]];
+ /*
+ * FIXME: If we have multiple fbdev's loaded, we need to
+ * update all info->currcon. Perhaps, we can place this
+@@ -2231,6 +2277,18 @@
+ fbcon_del_cursor_timer(old_info);
+ }
+
++ if (fbcon_decor_active_vc(vc)) {
++ struct vc_data *vc_curr = vc_cons[prev_console].d;
++
++ if (!vc_curr->vc_decor.theme ||
++ strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
++ (fbcon_decor_active_nores(info, vc_curr) &&
++ !fbcon_decor_active(info, vc_curr))) {
++ fbcon_decor_disable(vc, 0);
++ fbcon_decor_call_helper("modechange", vc->vc_num);
++ }
++ }
++
+ if (fbcon_is_inactive(vc, info) ||
+ ops->blank_state != FB_BLANK_UNBLANK)
+ fbcon_del_cursor_timer(info);
+@@ -2339,15 +2397,20 @@
+ }
+ }
+
+- if (!fbcon_is_inactive(vc, info)) {
++ if (!fbcon_is_inactive(vc, info)) {
+ if (ops->blank_state != blank) {
+ ops->blank_state = blank;
+ fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ ops->cursor_flash = (!blank);
+
+- if (!(info->flags & FBINFO_MISC_USEREVENT))
+- if (fb_blank(info, blank))
+- fbcon_generic_blank(vc, info, blank);
++ if (!(info->flags & FBINFO_MISC_USEREVENT)) {
++ if (fb_blank(info, blank)) {
++ if (fbcon_decor_active(info, vc))
++ fbcon_decor_blank(vc, info, blank);
++ else
++ fbcon_generic_blank(vc, info, blank);
++ }
++ }
+ }
+
+ if (!blank)
+@@ -2522,13 +2585,22 @@
+ }
+
+ if (resize) {
++ /* reset wrap/pan */
+ int cols, rows;
+
+ cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
++
++ if (fbcon_decor_active(info, vc)) {
++ info->var.xoffset = info->var.yoffset = p->yscroll = 0;
++ cols = vc->vc_decor.twidth;
++ rows = vc->vc_decor.theight;
++ }
+ cols /= w;
+ rows /= h;
++
+ vc_resize(vc, cols, rows);
++
+ if (CON_IS_VISIBLE(vc) && softback_buf)
+ fbcon_update_softback(vc);
+ } else if (CON_IS_VISIBLE(vc)
+@@ -2657,7 +2729,11 @@
+ int i, j, k, depth;
+ u8 val;
+
+- if (fbcon_is_inactive(vc, info))
++ if (fbcon_is_inactive(vc, info)
++#ifdef CONFIG_FB_CON_DECOR
++ || vc->vc_num != fg_console
++#endif
++ )
+ return -EINVAL;
+
+ if (!CON_IS_VISIBLE(vc))
+@@ -2683,14 +2759,56 @@
+ } else
+ fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
+
+- return fb_set_cmap(&palette_cmap, info);
++ if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++
++ u16 *red, *green, *blue;
++ int minlen = min(min(info->var.red.length, info->var.green.length),
++ info->var.blue.length);
++ int h;
++
++ struct fb_cmap cmap = {
++ .start = 0,
++ .len = (1 << minlen),
++ .red = NULL,
++ .green = NULL,
++ .blue = NULL,
++ .transp = NULL
++ };
++
++ red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
++
++ if (!red)
++ goto out;
++
++ green = red + 256;
++ blue = green + 256;
++ cmap.red = red;
++ cmap.green = green;
++ cmap.blue = blue;
++
++ for (i = 0; i < cmap.len; i++) {
++ red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
++ }
++
++ h = fb_set_cmap(&cmap, info);
++ fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++ kfree(red);
++
++ return h;
++
++ } else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++ info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
++ fb_set_cmap(&info->bgdecor.cmap, info);
++
++out: return fb_set_cmap(&palette_cmap, info);
+ }
+
+ static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+ {
+ unsigned long p;
+ int line;
+-
++
+ if (vc->vc_num != fg_console || !softback_lines)
+ return (u16 *) (vc->vc_origin + offset);
+ line = offset / vc->vc_size_row;
+@@ -2909,7 +3027,14 @@
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols /= vc->vc_font.width;
+ rows /= vc->vc_font.height;
+- vc_resize(vc, cols, rows);
++
++ if (!fbcon_decor_active_nores(info, vc)) {
++ vc_resize(vc, cols, rows);
++ } else {
++ fbcon_decor_disable(vc, 0);
++ fbcon_decor_call_helper("modechange", vc->vc_num);
++ }
++
+ updatescrollmode(p, info, vc);
+ scrollback_max = 0;
+ scrollback_current = 0;
+@@ -2954,7 +3079,9 @@
+ rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ cols /= vc->vc_font.width;
+ rows /= vc->vc_font.height;
+- vc_resize(vc, cols, rows);
++ if (!fbcon_decor_active_nores(info, vc)) {
++ vc_resize(vc, cols, rows);
++ }
+ }
+
+ if (fg != -1)
+@@ -3570,6 +3697,7 @@
+ }
+ }
+
++ fbcon_decor_exit();
+ fbcon_has_exited = 1;
+ }
+
+diff -Nur linux-3.13.7.orig/drivers/video/console/fbcondecor.c linux-3.13.7/drivers/video/console/fbcondecor.c
+--- linux-3.13.7.orig/drivers/video/console/fbcondecor.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.13.7/drivers/video/console/fbcondecor.c 2014-03-29 16:03:17.000000000 +0100
+@@ -0,0 +1,555 @@
++/*
++ * linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
++ *
++ * Copyright (C) 2004-2009 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ * Code based upon "Bootsplash" (C) 2001-2003
++ * Volker Poplawski <volker@poplawski.de>,
++ * Stefan Reinauer <stepan@suse.de>,
++ * Steffen Winterfeldt <snwint@suse.de>,
++ * Michael Schroeder <mls@suse.de>,
++ * Ken Wimer <wimer@suse.de>.
++ *
++ * Compat ioctl support by Thorsten Klein <TK@Thorsten-Klein.de>.
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ *
++ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/vt_kern.h>
++#include <linux/vmalloc.h>
++#include <linux/unistd.h>
++#include <linux/syscalls.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/workqueue.h>
++#include <linux/kmod.h>
++#include <linux/miscdevice.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/compat.h>
++#include <linux/console.h>
++
++#include <asm/uaccess.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++extern signed char con2fb_map[];
++static int fbcon_decor_enable(struct vc_data *vc);
++char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
++static int initialized = 0;
++
++int fbcon_decor_call_helper(char* cmd, unsigned short vc)
++{
++ char *envp[] = {
++ "HOME=/",
++ "PATH=/sbin:/bin",
++ NULL
++ };
++
++ char tfb[5];
++ char tcons[5];
++ unsigned char fb = (int) con2fb_map[vc];
++
++ char *argv[] = {
++ fbcon_decor_path,
++ "2",
++ cmd,
++ tcons,
++ tfb,
++ vc_cons[vc].d->vc_decor.theme,
++ NULL
++ };
++
++ snprintf(tfb,5,"%d",fb);
++ snprintf(tcons,5,"%d",vc);
++
++ return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
++}
++
++/* Disables fbcondecor on a virtual console; called with console sem held. */
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
++{
++ struct fb_info* info;
++
++ if (!vc->vc_decor.state)
++ return -EINVAL;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (info == NULL)
++ return -EINVAL;
++
++ vc->vc_decor.state = 0;
++ vc_resize(vc, info->var.xres / vc->vc_font.width,
++ info->var.yres / vc->vc_font.height);
++
++ if (fg_console == vc->vc_num && redraw) {
++ redraw_screen(vc, 0);
++ update_region(vc, vc->vc_origin +
++ vc->vc_size_row * vc->vc_top,
++ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++ }
++
++ printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
++ vc->vc_num);
++
++ return 0;
++}
++
++/* Enables fbcondecor on a virtual console; called with console sem held. */
++static int fbcon_decor_enable(struct vc_data *vc)
++{
++ struct fb_info* info;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
++ info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
++ vc->vc_num == fg_console))
++ return -EINVAL;
++
++ vc->vc_decor.state = 1;
++ vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
++ vc->vc_decor.theight / vc->vc_font.height);
++
++ if (fg_console == vc->vc_num) {
++ redraw_screen(vc, 0);
++ update_region(vc, vc->vc_origin +
++ vc->vc_size_row * vc->vc_top,
++ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++ fbcon_decor_clear_margins(vc, info, 0);
++ }
++
++ printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
++ vc->vc_num);
++
++ return 0;
++}
++
++static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
++{
++ int ret;
++
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_lock();
++ if (!state)
++ ret = fbcon_decor_disable(vc, 1);
++ else
++ ret = fbcon_decor_enable(vc);
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_unlock();
++
++ return ret;
++}
++
++static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
++{
++ *state = vc->vc_decor.state;
++}
++
++static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
++{
++ struct fb_info *info;
++ int len;
++ char *tmp;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (info == NULL || !cfg->twidth || !cfg->theight ||
++ cfg->tx + cfg->twidth > info->var.xres ||
++ cfg->ty + cfg->theight > info->var.yres)
++ return -EINVAL;
++
++ len = strlen_user(cfg->theme);
++ if (!len || len > FBCON_DECOR_THEME_LEN)
++ return -EINVAL;
++ tmp = kmalloc(len, GFP_KERNEL);
++ if (!tmp)
++ return -ENOMEM;
++ if (copy_from_user(tmp, (void __user *)cfg->theme, len))
++ return -EFAULT;
++ cfg->theme = tmp;
++ cfg->state = 0;
++
++ /* If this ioctl is a response to a request from kernel, the console sem
++ * is already held; we also don't need to disable decor because either the
++ * new config and background picture will be successfully loaded, and the
++ * decor will stay on, or in case of a failure it'll be turned off in fbcon. */
++// if (origin == FBCON_DECOR_IO_ORIG_USER) {
++ console_lock();
++ if (vc->vc_decor.state)
++ fbcon_decor_disable(vc, 1);
++// }
++
++ if (vc->vc_decor.theme)
++ kfree(vc->vc_decor.theme);
++
++ vc->vc_decor = *cfg;
++
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_unlock();
++
++ printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
++ vc->vc_num, vc->vc_decor.theme);
++ return 0;
++}
++
++static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc, struct vc_decor *decor)
++{
++ char __user *tmp;
++
++ tmp = decor->theme;
++ *decor = vc->vc_decor;
++ decor->theme = tmp;
++
++ if (vc->vc_decor.theme) {
++ if (copy_to_user(tmp, vc->vc_decor.theme, strlen(vc->vc_decor.theme) + 1))
++ return -EFAULT;
++ } else
++ if (put_user(0, tmp))
++ return -EFAULT;
++
++ return 0;
++}
++
++static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img, unsigned char origin)
++{
++ struct fb_info *info;
++ int len;
++ u8 *tmp;
++
++ if (vc->vc_num != fg_console)
++ return -EINVAL;
++
++ info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++ if (info == NULL)
++ return -EINVAL;
++
++ if (img->width != info->var.xres || img->height != info->var.yres) {
++ printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
++ printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height, info->var.xres, info->var.yres);
++ return -EINVAL;
++ }
++
++ if (img->depth != info->var.bits_per_pixel) {
++ printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
++ return -EINVAL;
++ }
++
++ if (img->depth == 8) {
++ if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
++ !img->cmap.blue)
++ return -EINVAL;
++
++ tmp = vmalloc(img->cmap.len * 3 * 2);
++ if (!tmp)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp,
++ (void __user*)img->cmap.red, (img->cmap.len << 1)) ||
++ copy_from_user(tmp + (img->cmap.len << 1),
++ (void __user*)img->cmap.green, (img->cmap.len << 1)) ||
++ copy_from_user(tmp + (img->cmap.len << 2),
++ (void __user*)img->cmap.blue, (img->cmap.len << 1))) {
++ vfree(tmp);
++ return -EFAULT;
++ }
++
++ img->cmap.transp = NULL;
++ img->cmap.red = (u16*)tmp;
++ img->cmap.green = img->cmap.red + img->cmap.len;
++ img->cmap.blue = img->cmap.green + img->cmap.len;
++ } else {
++ img->cmap.red = NULL;
++ }
++
++ len = ((img->depth + 7) >> 3) * img->width * img->height;
++
++ /*
++ * Allocate an additional byte so that we never go outside of the
++ * buffer boundaries in the rendering functions in a 24 bpp mode.
++ */
++ tmp = vmalloc(len + 1);
++
++ if (!tmp)
++ goto out;
++
++ if (copy_from_user(tmp, (void __user*)img->data, len))
++ goto out;
++
++ img->data = tmp;
++
++ /* If this ioctl is a response to a request from kernel, the console sem
++ * is already held. */
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_lock();
++
++ if (info->bgdecor.data)
++ vfree((u8*)info->bgdecor.data);
++ if (info->bgdecor.cmap.red)
++ vfree(info->bgdecor.cmap.red);
++
++ info->bgdecor = *img;
++
++ if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
++ redraw_screen(vc, 0);
++ update_region(vc, vc->vc_origin +
++ vc->vc_size_row * vc->vc_top,
++ vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++ fbcon_decor_clear_margins(vc, info, 0);
++ }
++
++// if (origin == FBCON_DECOR_IO_ORIG_USER)
++ console_unlock();
++
++ return 0;
++
++out: if (img->cmap.red)
++ vfree(img->cmap.red);
++
++ if (tmp)
++ vfree(tmp);
++ return -ENOMEM;
++}
++
++static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
++{
++ struct fbcon_decor_iowrapper __user *wrapper = (void __user*) arg;
++ struct vc_data *vc = NULL;
++ unsigned short vc_num = 0;
++ unsigned char origin = 0;
++ void __user *data = NULL;
++
++ if (!access_ok(VERIFY_READ, wrapper,
++ sizeof(struct fbcon_decor_iowrapper)))
++ return -EFAULT;
++
++ __get_user(vc_num, &wrapper->vc);
++ __get_user(origin, &wrapper->origin);
++ __get_user(data, &wrapper->data);
++
++ if (!vc_cons_allocated(vc_num))
++ return -EINVAL;
++
++ vc = vc_cons[vc_num].d;
++
++ switch (cmd) {
++ case FBIOCONDECOR_SETPIC:
++ {
++ struct fb_image img;
++ if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
++ return -EFAULT;
++
++ return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++ }
++ case FBIOCONDECOR_SETCFG:
++ {
++ struct vc_decor cfg;
++ if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++ return -EFAULT;
++
++ return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++ }
++ case FBIOCONDECOR_GETCFG:
++ {
++ int rval;
++ struct vc_decor cfg;
++
++ if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++ return -EFAULT;
++
++ rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++ if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
++ return -EFAULT;
++ return rval;
++ }
++ case FBIOCONDECOR_SETSTATE:
++ {
++ unsigned int state = 0;
++ if (get_user(state, (unsigned int __user *)data))
++ return -EFAULT;
++ return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++ }
++ case FBIOCONDECOR_GETSTATE:
++ {
++ unsigned int state = 0;
++ fbcon_decor_ioctl_dogetstate(vc, &state);
++ return put_user(state, (unsigned int __user *)data);
++ }
++
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++
++#ifdef CONFIG_COMPAT
++
++static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {
++
++ struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
++ struct vc_data *vc = NULL;
++ unsigned short vc_num = 0;
++ unsigned char origin = 0;
++ compat_uptr_t data_compat = 0;
++ void __user *data = NULL;
++
++ if (!access_ok(VERIFY_READ, wrapper,
++ sizeof(struct fbcon_decor_iowrapper32)))
++ return -EFAULT;
++
++ __get_user(vc_num, &wrapper->vc);
++ __get_user(origin, &wrapper->origin);
++ __get_user(data_compat, &wrapper->data);
++ data = compat_ptr(data_compat);
++
++ if (!vc_cons_allocated(vc_num))
++ return -EINVAL;
++
++ vc = vc_cons[vc_num].d;
++
++ switch (cmd) {
++ case FBIOCONDECOR_SETPIC32:
++ {
++ struct fb_image32 img_compat;
++ struct fb_image img;
++
++ if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
++ return -EFAULT;
++
++ fb_image_from_compat(img, img_compat);
++
++ return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++ }
++
++ case FBIOCONDECOR_SETCFG32:
++ {
++ struct vc_decor32 cfg_compat;
++ struct vc_decor cfg;
++
++ if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++ return -EFAULT;
++
++ vc_decor_from_compat(cfg, cfg_compat);
++
++ return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++ }
++
++ case FBIOCONDECOR_GETCFG32:
++ {
++ int rval;
++ struct vc_decor32 cfg_compat;
++ struct vc_decor cfg;
++
++ if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++ return -EFAULT;
++ cfg.theme = compat_ptr(cfg_compat.theme);
++
++ rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++ vc_decor_to_compat(cfg_compat, cfg);
++
++ if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
++ return -EFAULT;
++ return rval;
++ }
++
++ case FBIOCONDECOR_SETSTATE32:
++ {
++ compat_uint_t state_compat = 0;
++ unsigned int state = 0;
++
++ if (get_user(state_compat, (compat_uint_t __user *)data))
++ return -EFAULT;
++
++ state = (unsigned int)state_compat;
++
++ return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++ }
++
++ case FBIOCONDECOR_GETSTATE32:
++ {
++ compat_uint_t state_compat = 0;
++ unsigned int state = 0;
++
++ fbcon_decor_ioctl_dogetstate(vc, &state);
++ state_compat = (compat_uint_t)state;
++
++ return put_user(state_compat, (compat_uint_t __user *)data);
++ }
++
++ default:
++ return -ENOIOCTLCMD;
++ }
++}
++#else
++ #define fbcon_decor_compat_ioctl NULL
++#endif
++
++static struct file_operations fbcon_decor_ops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = fbcon_decor_ioctl,
++ .compat_ioctl = fbcon_decor_compat_ioctl
++};
++
++static struct miscdevice fbcon_decor_dev = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "fbcondecor",
++ .fops = &fbcon_decor_ops
++};
++
++void fbcon_decor_reset()
++{
++ int i;
++
++ for (i = 0; i < num_registered_fb; i++) {
++ registered_fb[i]->bgdecor.data = NULL;
++ registered_fb[i]->bgdecor.cmap.red = NULL;
++ }
++
++ for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
++ vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
++ vc_cons[i].d->vc_decor.theight = 0;
++ vc_cons[i].d->vc_decor.theme = NULL;
++ }
++
++ return;
++}
++
++int fbcon_decor_init()
++{
++ int i;
++
++ fbcon_decor_reset();
++
++ if (initialized)
++ return 0;
++
++ i = misc_register(&fbcon_decor_dev);
++ if (i) {
++ printk(KERN_ERR "fbcondecor: failed to register device\n");
++ return i;
++ }
++
++ fbcon_decor_call_helper("init", 0);
++ initialized = 1;
++ return 0;
++}
++
++int fbcon_decor_exit(void)
++{
++ fbcon_decor_reset();
++ return 0;
++}
++
++EXPORT_SYMBOL(fbcon_decor_path);
+diff -Nur linux-3.13.7.orig/drivers/video/console/fbcondecor.h linux-3.13.7/drivers/video/console/fbcondecor.h
+--- linux-3.13.7.orig/drivers/video/console/fbcondecor.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.13.7/drivers/video/console/fbcondecor.h 2014-03-29 21:17:27.000000000 +0100
+@@ -0,0 +1,79 @@
++/*
++ * linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
++ *
++ * Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ */
++
++#ifndef __FBCON_DECOR_H
++#define __FBCON_DECOR_H
++
++#ifndef _LINUX_FB_H
++#include <linux/fb.h>
++#endif
++
++/* This is needed for vc_cons in fbcmap.c */
++#include <linux/vt_kern.h>
++
++struct fb_cursor;
++struct fb_info;
++struct vc_data;
++
++#ifdef CONFIG_FB_CON_DECOR
++/* fbcondecor.c */
++int fbcon_decor_init(void);
++void fbcon_decor_reset(void);
++int fbcon_decor_exit(void);
++int fbcon_decor_call_helper(char* cmd, unsigned short cons);
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
++
++/* cfbcondecor.c */
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
++
++/* vt.c */
++void acquire_console_sem(void);
++void release_console_sem(void);
++void do_unblank_screen(int entering_gfx);
++
++/* struct vc_data *y */
++#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme)
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active_nores(x,y) (x->bgdecor.data && fbcon_decor_active_vc(y))
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active(x,y) (fbcon_decor_active_nores(x,y) && \
++ x->bgdecor.width == x->var.xres && \
++ x->bgdecor.height == x->var.yres && \
++ x->bgdecor.depth == x->var.bits_per_pixel)
++
++
++#else /* CONFIG_FB_CON_DECOR */
++
++static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
++static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
++static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
++static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
++static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
++static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
++static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
++static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
++static inline int fbcon_decor_call_helper(char* cmd, unsigned short cons) { return 0; }
++static inline int fbcon_decor_init(void) { return 0; }
++static inline int fbcon_decor_exit(void) { return 0; }
++static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
++
++#define fbcon_decor_active_vc(y) (0)
++#define fbcon_decor_active_nores(x,y) (0)
++#define fbcon_decor_active(x,y) (0)
++
++#endif /* CONFIG_FB_CON_DECOR */
++
++#endif /* __FBCON_DECOR_H */
+diff -Nur linux-3.13.7.orig/drivers/video/fbcmap.c linux-3.13.7/drivers/video/fbcmap.c
+--- linux-3.13.7.orig/drivers/video/fbcmap.c 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/drivers/video/fbcmap.c 2014-03-29 16:03:17.000000000 +0100
+@@ -17,6 +17,8 @@
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+
++#include "console/fbcondecor.h"
++
+ static u16 red2[] __read_mostly = {
+ 0x0000, 0xaaaa
+ };
+@@ -249,14 +251,17 @@
+ if (transp)
+ htransp = *transp++;
+ if (info->fbops->fb_setcolreg(start++,
+- hred, hgreen, hblue,
++ hred, hgreen, hblue,
+ htransp, info))
+ break;
+ }
+ }
+- if (rc == 0)
++ if (rc == 0) {
+ fb_copy_cmap(cmap, &info->cmap);
+-
++ if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
++ fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++ }
+ return rc;
+ }
+
+diff -Nur linux-3.13.7.orig/drivers/video/fbmem.c linux-3.13.7/drivers/video/fbmem.c
+--- linux-3.13.7.orig/drivers/video/fbmem.c 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/drivers/video/fbmem.c 2014-03-29 16:03:17.000000000 +0100
+@@ -1250,15 +1250,6 @@
+ u16 reserved[3];
+ };
+
+-struct fb_cmap32 {
+- u32 start;
+- u32 len;
+- compat_caddr_t red;
+- compat_caddr_t green;
+- compat_caddr_t blue;
+- compat_caddr_t transp;
+-};
+-
+ static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+ {
+diff -Nur linux-3.13.7.orig/include/linux/console_decor.h linux-3.13.7/include/linux/console_decor.h
+--- linux-3.13.7.orig/include/linux/console_decor.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.13.7/include/linux/console_decor.h 2014-03-29 16:03:17.000000000 +0100
+@@ -0,0 +1,46 @@
++#ifndef _LINUX_CONSOLE_DECOR_H_
++#define _LINUX_CONSOLE_DECOR_H_ 1
++
++/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
++struct vc_decor {
++ __u8 bg_color; /* The color that is to be treated as transparent */
++ __u8 state; /* Current decor state: 0 = off, 1 = on */
++ __u16 tx, ty; /* Top left corner coordinates of the text field */
++ __u16 twidth, theight; /* Width and height of the text field */
++ char* theme;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++
++struct vc_decor32 {
++ __u8 bg_color; /* The color that is to be treated as transparent */
++ __u8 state; /* Current decor state: 0 = off, 1 = on */
++ __u16 tx, ty; /* Top left corner coordinates of the text field */
++ __u16 twidth, theight; /* Width and height of the text field */
++ compat_uptr_t theme;
++};
++
++#define vc_decor_from_compat(to, from) \
++ (to).bg_color = (from).bg_color; \
++ (to).state = (from).state; \
++ (to).tx = (from).tx; \
++ (to).ty = (from).ty; \
++ (to).twidth = (from).twidth; \
++ (to).theight = (from).theight; \
++ (to).theme = compat_ptr((from).theme)
++
++#define vc_decor_to_compat(to, from) \
++ (to).bg_color = (from).bg_color; \
++ (to).state = (from).state; \
++ (to).tx = (from).tx; \
++ (to).ty = (from).ty; \
++ (to).twidth = (from).twidth; \
++ (to).theight = (from).theight; \
++ (to).theme = ptr_to_compat((from).theme)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#endif
+diff -Nur linux-3.13.7.orig/include/linux/console_struct.h linux-3.13.7/include/linux/console_struct.h
+--- linux-3.13.7.orig/include/linux/console_struct.h 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/include/linux/console_struct.h 2014-03-29 16:03:17.000000000 +0100
+@@ -19,6 +19,7 @@
+ struct vt_struct;
+
+ #define NPAR 16
++#include <linux/console_decor.h>
+
+ struct vc_data {
+ struct tty_port port; /* Upper level data */
+@@ -107,6 +108,8 @@
+ unsigned long vc_uni_pagedir;
+ unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
+ bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
++
++ struct vc_decor vc_decor;
+ /* additional information is in vt_kern.h */
+ };
+
+diff -Nur linux-3.13.7.orig/include/linux/fb.h linux-3.13.7/include/linux/fb.h
+--- linux-3.13.7.orig/include/linux/fb.h 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/include/linux/fb.h 2014-03-29 16:03:17.000000000 +0100
+@@ -219,6 +219,34 @@
+ };
+ #endif
+
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_image32 {
++ __u32 dx; /* Where to place image */
++ __u32 dy;
++ __u32 width; /* Size of image */
++ __u32 height;
++ __u32 fg_color; /* Only used when a mono bitmap */
++ __u32 bg_color;
++ __u8 depth; /* Depth of the image */
++ const compat_uptr_t data; /* Pointer to image data */
++ struct fb_cmap32 cmap; /* color map info */
++};
++
++#define fb_image_from_compat(to, from) \
++ (to).dx = (from).dx; \
++ (to).dy = (from).dy; \
++ (to).width = (from).width; \
++ (to).height = (from).height; \
++ (to).fg_color = (from).fg_color; \
++ (to).bg_color = (from).bg_color; \
++ (to).depth = (from).depth; \
++ (to).data = compat_ptr((from).data); \
++ fb_cmap_from_compat((to).cmap, (from).cmap)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /*
+ * Frame buffer operations
+ *
+@@ -489,6 +517,9 @@
+ #define FBINFO_STATE_SUSPENDED 1
+ u32 state; /* Hardware state i.e suspend */
+ void *fbcon_par; /* fbcon use-only private area */
++
++ struct fb_image bgdecor;
++
+ /* From here on everything is device dependent */
+ void *par;
+ /* we need the PCI or similar aperture base/size not
+diff -Nur linux-3.13.7.orig/include/uapi/linux/fb.h linux-3.13.7/include/uapi/linux/fb.h
+--- linux-3.13.7.orig/include/uapi/linux/fb.h 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/include/uapi/linux/fb.h 2014-03-29 16:03:17.000000000 +0100
+@@ -8,6 +8,25 @@
+
+ #define FB_MAX 32 /* sufficient for now */
+
++struct fbcon_decor_iowrapper
++{
++ unsigned short vc; /* Virtual console */
++ unsigned char origin; /* Point of origin of the request */
++ void *data;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++struct fbcon_decor_iowrapper32
++{
++ unsigned short vc; /* Virtual console */
++ unsigned char origin; /* Point of origin of the request */
++ compat_uptr_t data;
++};
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /* ioctls
+ 0x46 is 'F' */
+ #define FBIOGET_VSCREENINFO 0x4600
+@@ -35,6 +54,25 @@
+ #define FBIOGET_DISPINFO 0x4618
+ #define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32)
+
++#define FBIOCONDECOR_SETCFG _IOWR('F', 0x19, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETCFG _IOR('F', 0x1A, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETSTATE _IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETSTATE _IOR('F', 0x1C, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETPIC _IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#define FBIOCONDECOR_SETCFG32 _IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETCFG32 _IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETSTATE32 _IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETSTATE32 _IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETPIC32 _IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#define FBCON_DECOR_THEME_LEN 128 /* Maximum lenght of a theme name */
++#define FBCON_DECOR_IO_ORIG_KERNEL 0 /* Kernel ioctl origin */
++#define FBCON_DECOR_IO_ORIG_USER 1 /* User ioctl origin */
++
+ #define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */
+ #define FB_TYPE_PLANES 1 /* Non interleaved planes */
+ #define FB_TYPE_INTERLEAVED_PLANES 2 /* Interleaved planes */
+@@ -277,6 +315,29 @@
+ __u32 reserved[4]; /* Reserved for future compatibility */
+ };
+
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_cmap32 {
++ __u32 start;
++ __u32 len; /* Number of entries */
++ compat_uptr_t red; /* Red values */
++ compat_uptr_t green;
++ compat_uptr_t blue;
++ compat_uptr_t transp; /* transparency, can be NULL */
++};
++
++#define fb_cmap_from_compat(to, from) \
++ (to).start = (from).start; \
++ (to).len = (from).len; \
++ (to).red = compat_ptr((from).red); \
++ (to).green = compat_ptr((from).green); \
++ (to).blue = compat_ptr((from).blue); \
++ (to).transp = compat_ptr((from).transp)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++
+ struct fb_cmap {
+ __u32 start; /* First entry */
+ __u32 len; /* Number of entries */
+diff -Nur linux-3.13.7.orig/kernel/sysctl.c linux-3.13.7/kernel/sysctl.c
+--- linux-3.13.7.orig/kernel/sysctl.c 2014-03-24 05:45:42.000000000 +0100
++++ linux-3.13.7/kernel/sysctl.c 2014-03-29 16:03:17.000000000 +0100
+@@ -143,6 +143,10 @@
+ static int ngroups_max = NGROUPS_MAX;
+ static const int cap_last_cap = CAP_LAST_CAP;
+
++#ifdef CONFIG_FB_CON_DECOR
++extern char fbcon_decor_path[];
++#endif
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -252,6 +256,15 @@
+ .mode = 0555,
+ .child = dev_table,
+ },
++#ifdef CONFIG_FB_CON_DECOR
++ {
++ .procname = "fbcondecor",
++ .data = &fbcon_decor_path,
++ .maxlen = KMOD_PATH_LEN,
++ .mode = 0644,
++ .proc_handler = &proc_dostring,
++ },
++#endif
+ { }
+ };
+
diff --git a/target/linux/patches/3.14.43/gemalto.patch b/target/linux/patches/3.14.43/gemalto.patch
new file mode 100644
index 000000000..65f7af1d7
--- /dev/null
+++ b/target/linux/patches/3.14.43/gemalto.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-2.6.36.orig/drivers/tty/serial/8250/serial_cs.c linux-2.6.36/drivers/serial/8250/serial_cs.c
+--- linux-2.6.36.orig/drivers/tty/serial/8250/serial_cs.c 2010-10-20 22:30:22.000000000 +0200
++++ linux-2.6.36/drivers/tty/serial/8250/serial_cs.c 2010-12-13 23:03:40.000000000 +0100
+@@ -794,6 +794,7 @@
+ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0025),
+ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0045),
+ PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0052),
++ PCMCIA_DEVICE_MANF_CARD(0x0157, 0x0100), /* Gemalto SCR */
+ PCMCIA_DEVICE_MANF_CARD(0x016c, 0x0006), /* Psion 56K+Fax */
+ PCMCIA_DEVICE_MANF_CARD(0x0200, 0x0001), /* MultiMobile */
+ PCMCIA_DEVICE_PROD_ID134("ADV", "TECH", "COMpad-32/85", 0x67459937, 0x916d02ba, 0x8fbe92ae),
diff --git a/target/linux/patches/3.14.43/initramfs-nosizelimit.patch b/target/linux/patches/3.14.43/initramfs-nosizelimit.patch
new file mode 100644
index 000000000..40d2f6bd8
--- /dev/null
+++ b/target/linux/patches/3.14.43/initramfs-nosizelimit.patch
@@ -0,0 +1,57 @@
+From 9a18df7a71bfa620b1278777d64783a359d7eb4e Mon Sep 17 00:00:00 2001
+From: Thorsten Glaser <tg@mirbsd.org>
+Date: Sun, 4 May 2014 01:37:54 +0200
+Subject: [PATCH] mount tmpfs-as-rootfs (initramfs) with -o
+ nr_blocks=0,nr_inodes=0
+
+I would have preferred to write this patch to be able to pass
+rootflags=nr_blocks=0,nr_inodes=0 on the kernel command line,
+and then hand these rootflags over to the initramfs (tmpfs)
+mount in the same way the kernel hands them over to the block
+device rootfs mount. But at least the Debian/m68k initrd also
+parses $rootflags from the environment and adds it to the call
+to the user-space mount for the eventual root device, which
+would make the kernel command line rootflags option be used in
+both places (tmpfs and e.g. ext4) which is guaranteed to error
+out in at least one of them.
+
+This change is intended to aid people in a setup where the
+initrd is the final root filesystem, i.e. not mounted over.
+This is especially useful in automated tests running on qemu
+for boards with constrained memory (e.g. 64 MiB on sh4).
+
+Considering that the initramfs is normally emptied out then
+overmounted, this change is probably safe for setups where
+initramfs just hosts early userspace, too, since the tmpfs
+backing it is not accessible any more later on, AFAICT.
+
+Signed-off-by: Thorsten Glaser <tg@mirbsd.org>
+---
+ init/do_mounts.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/init/do_mounts.c b/init/do_mounts.c
+index 82f2288..55a4cfe 100644
+--- a/init/do_mounts.c
++++ b/init/do_mounts.c
+@@ -594,6 +594,7 @@ out:
+ }
+
+ static bool is_tmpfs;
++static char tmpfs_rootflags[] = "nr_blocks=0,nr_inodes=0";
+ static struct dentry *rootfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+ {
+@@ -606,6 +607,9 @@ static struct dentry *rootfs_mount(struct file_system_type *fs_type,
+ if (IS_ENABLED(CONFIG_TMPFS) && is_tmpfs)
+ fill = shmem_fill_super;
+
++ if (is_tmpfs)
++ data = tmpfs_rootflags;
++
+ return mount_nodev(fs_type, flags, data, fill);
+ }
+
+--
+2.0.0.rc0
+
diff --git a/target/linux/patches/3.14.43/lemote-rfkill.patch b/target/linux/patches/3.14.43/lemote-rfkill.patch
new file mode 100644
index 000000000..a61488434
--- /dev/null
+++ b/target/linux/patches/3.14.43/lemote-rfkill.patch
@@ -0,0 +1,21 @@
+diff -Nur linux-3.3.orig/drivers/net/wireless/rtl818x/rtl8187/rfkill.c linux-3.3/drivers/net/wireless/rtl818x/rtl8187/rfkill.c
+--- linux-3.3.orig/drivers/net/wireless/rtl818x/rtl8187/rfkill.c 2012-03-19 00:15:34.000000000 +0100
++++ linux-3.3/drivers/net/wireless/rtl818x/rtl8187/rfkill.c 2012-03-27 23:29:46.000000000 +0200
+@@ -22,6 +22,9 @@
+
+ static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv)
+ {
++#ifdef CONFIG_LEMOTE_MACH2F
++ return 1;
++#else
+ u8 gpio;
+
+ gpio = rtl818x_ioread8(priv, &priv->map->GPIO0);
+@@ -29,6 +32,7 @@
+ gpio = rtl818x_ioread8(priv, &priv->map->GPIO1);
+
+ return gpio & priv->rfkill_mask;
++#endif
+ }
+
+ void rtl8187_rfkill_init(struct ieee80211_hw *hw)
diff --git a/target/linux/patches/3.14.43/microblaze-axi.patch b/target/linux/patches/3.14.43/microblaze-axi.patch
new file mode 100644
index 000000000..1a4b17d8c
--- /dev/null
+++ b/target/linux/patches/3.14.43/microblaze-axi.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-3.13.3.orig/drivers/net/ethernet/xilinx/xilinx_axienet_main.c linux-3.13.3/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+--- linux-3.13.3.orig/drivers/net/ethernet/xilinx/xilinx_axienet_main.c 2014-02-13 23:00:14.000000000 +0100
++++ linux-3.13.3/drivers/net/ethernet/xilinx/xilinx_axienet_main.c 2014-02-24 08:03:57.000000000 +0100
+@@ -25,6 +25,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/netdevice.h>
++#include <linux/of_irq.h>
+ #include <linux/of_mdio.h>
+ #include <linux/of_platform.h>
+ #include <linux/of_address.h>
diff --git a/target/linux/patches/3.14.43/microblaze-ethernet.patch b/target/linux/patches/3.14.43/microblaze-ethernet.patch
new file mode 100644
index 000000000..742ab477e
--- /dev/null
+++ b/target/linux/patches/3.14.43/microblaze-ethernet.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-3.11.10.orig/drivers/net/ethernet/xilinx/xilinx_emaclite.c linux-3.11.10/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+--- linux-3.11.10.orig/drivers/net/ethernet/xilinx/xilinx_emaclite.c 2013-11-29 19:42:37.000000000 +0100
++++ linux-3.11.10/drivers/net/ethernet/xilinx/xilinx_emaclite.c 2013-12-23 20:01:14.000000000 +0100
+@@ -1282,6 +1282,7 @@
+ { .compatible = "xlnx,opb-ethernetlite-1.01.b", },
+ { .compatible = "xlnx,xps-ethernetlite-1.00.a", },
+ { .compatible = "xlnx,xps-ethernetlite-2.00.a", },
++ { .compatible = "xlnx,xps-ethernetlite-2.00.b", },
+ { .compatible = "xlnx,xps-ethernetlite-2.01.a", },
+ { .compatible = "xlnx,xps-ethernetlite-3.00.a", },
+ { /* end of list */ },
diff --git a/target/linux/patches/3.14.43/mkpiggy.patch b/target/linux/patches/3.14.43/mkpiggy.patch
new file mode 100644
index 000000000..751678b74
--- /dev/null
+++ b/target/linux/patches/3.14.43/mkpiggy.patch
@@ -0,0 +1,28 @@
+diff -Nur linux-3.13.3.orig/arch/x86/boot/compressed/mkpiggy.c linux-3.13.3/arch/x86/boot/compressed/mkpiggy.c
+--- linux-3.13.3.orig/arch/x86/boot/compressed/mkpiggy.c 2014-02-13 23:00:14.000000000 +0100
++++ linux-3.13.3/arch/x86/boot/compressed/mkpiggy.c 2014-02-17 11:09:06.000000000 +0100
+@@ -29,7 +29,14 @@
+ #include <stdio.h>
+ #include <string.h>
+ #include <inttypes.h>
+-#include <tools/le_byteshift.h>
++
++static uint32_t getle32(const void *p)
++{
++ const uint8_t *cp = p;
++
++ return (uint32_t)cp[0] + ((uint32_t)cp[1] << 8) +
++ ((uint32_t)cp[2] << 16) + ((uint32_t)cp[3] << 24);
++}
+
+ int main(int argc, char *argv[])
+ {
+@@ -63,7 +70,7 @@
+ }
+
+ ilen = ftell(f);
+- olen = get_unaligned_le32(&olen);
++ olen = getle32(&olen);
+
+ /*
+ * Now we have the input (compressed) and output (uncompressed)
diff --git a/target/linux/patches/3.14.43/mptcp.patch b/target/linux/patches/3.14.43/mptcp.patch
new file mode 100644
index 000000000..9784e0577
--- /dev/null
+++ b/target/linux/patches/3.14.43/mptcp.patch
@@ -0,0 +1,17203 @@
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index d286bde..34d56d7 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -3162,7 +3162,7 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
+ */
+ memset(&tmp_opt, 0, sizeof(tmp_opt));
+ tcp_clear_options(&tmp_opt);
+- tcp_parse_options(skb, &tmp_opt, 0, NULL);
++ tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL);
+
+ req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index 2faef33..9c12362 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -309,12 +309,6 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
+ return NULL;
+ }
+
+-static inline struct inet6_request_sock *
+- inet6_rsk(const struct request_sock *rsk)
+-{
+- return NULL;
+-}
+-
+ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
+ {
+ return NULL;
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index 4ad0706..a230dd0 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -72,6 +72,53 @@ struct tcp_sack_block {
+ u32 end_seq;
+ };
+
++struct tcp_out_options {
++ u16 options; /* bit field of OPTION_* */
++ u8 ws; /* window scale, 0 to disable */
++ u8 num_sack_blocks;/* number of SACK blocks to include */
++ u8 hash_size; /* bytes in hash_location */
++ u16 mss; /* 0 to disable */
++ __u8 *hash_location; /* temporary pointer, overloaded */
++ __u32 tsval, tsecr; /* need to include OPTION_TS */
++ struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */
++#ifdef CONFIG_MPTCP
++ u16 mptcp_options; /* bit field of MPTCP related OPTION_* */
++ u8 dss_csum:1,
++ add_addr_v4:1,
++ add_addr_v6:1; /* dss-checksum required? */
++
++ __u32 data_seq; /* data sequence number, for MPTCP */
++ __u32 data_ack; /* data ack, for MPTCP */
++
++ union {
++ struct {
++ __u64 sender_key; /* sender's key for mptcp */
++ __u64 receiver_key; /* receiver's key for mptcp */
++ } mp_capable;
++
++ struct {
++ __u64 sender_truncated_mac;
++ __u32 sender_nonce;
++ /* random number of the sender */
++ __u32 token; /* token for mptcp */
++ } mp_join_syns;
++ };
++
++ struct {
++ struct in_addr addr;
++ u8 addr_id;
++ } add_addr4;
++
++ struct {
++ struct in6_addr addr;
++ u8 addr_id;
++ } add_addr6;
++
++ u16 remove_addrs; /* list of address id */
++ u8 addr_id; /* address id (mp_join or add_address) */
++#endif /* CONFIG_MPTCP */
++};
++
+ /*These are used to set the sack_ok field in struct tcp_options_received */
+ #define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */
+ #define TCP_FACK_ENABLED (1 << 1) /*1 = FACK is enabled locally*/
+@@ -95,6 +142,9 @@ struct tcp_options_received {
+ u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
+ };
+
++struct mptcp_cb;
++struct mptcp_tcp_sock;
++
+ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
+ {
+ rx_opt->tstamp_ok = rx_opt->sack_ok = 0;
+@@ -123,6 +173,7 @@ struct tcp_request_sock {
+ * FastOpen it's the seq#
+ * after data-in-SYN.
+ */
++ u8 saw_mpc:1;
+ };
+
+ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
+@@ -130,6 +181,8 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
+ return (struct tcp_request_sock *)req;
+ }
+
++struct tcp_md5sig_key;
++
+ struct tcp_sock {
+ /* inet_connection_sock has to be the first member of tcp_sock */
+ struct inet_connection_sock inet_conn;
+@@ -323,6 +376,45 @@ struct tcp_sock {
+ * socket. Used to retransmit SYNACKs etc.
+ */
+ struct request_sock *fastopen_rsk;
++
++
++ struct mptcp_cb *mpcb;
++ struct sock *meta_sk;
++ /* We keep these flags even if CONFIG_MPTCP is not checked, because
++ * it allows checking MPTCP capability just by checking the mpc flag,
++ * rather than adding ifdefs everywhere.
++ */
++ u16 mpc:1, /* Other end is multipath capable */
++ inside_tk_table:1, /* Is the tcp_sock inside the token-table? */
++ send_mp_fclose:1,
++ request_mptcp:1, /* Did we send out an MP_CAPABLE?
++ * (this speeds up mptcp_doit() in tcp_recvmsg)
++ */
++ mptcp_enabled:1, /* Is MPTCP enabled from the application ? */
++ pf:1, /* Potentially Failed state: when this flag is set, we
++ * stop using the subflow
++ */
++ mp_killed:1, /* Killed with a tcp_done in mptcp? */
++ was_meta_sk:1, /* This was a meta sk (in case of reuse) */
++ close_it:1, /* Must close socket in mptcp_data_ready? */
++ closing:1;
++ struct mptcp_tcp_sock *mptcp;
++#ifdef CONFIG_MPTCP
++ struct hlist_nulls_node tk_table;
++ u32 mptcp_loc_token;
++ u64 mptcp_loc_key;
++#endif /* CONFIG_MPTCP */
++
++ /* Functions that depend on the value of the mpc flag */
++ u32 (*__select_window)(struct sock *sk);
++ u16 (*select_window)(struct sock *sk);
++ void (*select_initial_window)(int __space, __u32 mss, __u32 *rcv_wnd,
++ __u32 *window_clamp, int wscale_ok,
++ __u8 *rcv_wscale, __u32 init_rcv_wnd,
++ const struct sock *sk);
++ void (*init_buffer_space)(struct sock *sk);
++ void (*set_rto)(struct sock *sk);
++ bool (*should_expand_sndbuf)(const struct sock *sk);
+ };
+
+ enum tsq_flags {
+@@ -334,6 +426,8 @@ enum tsq_flags {
+ TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call
+ * tcp_v{4|6}_mtu_reduced()
+ */
++ MPTCP_PATH_MANAGER, /* MPTCP deferred creation of new subflows */
++ MPTCP_SUB_DEFERRED, /* A subflow got deferred - process them */
+ };
+
+ static inline struct tcp_sock *tcp_sk(const struct sock *sk)
+@@ -352,6 +446,7 @@ struct tcp_timewait_sock {
+ #ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *tw_md5_key;
+ #endif
++ struct mptcp_tw *mptcp_tw;
+ };
+
+ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
+diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
+index f981ba7..0144c65 100644
+--- a/include/net/inet6_connection_sock.h
++++ b/include/net/inet6_connection_sock.h
+@@ -27,6 +27,8 @@ int inet6_csk_bind_conflict(const struct sock *sk,
+
+ struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
+ const struct request_sock *req);
++u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
++ const u32 rnd, const u32 synq_hsize);
+
+ struct request_sock *inet6_csk_search_req(const struct sock *sk,
+ struct request_sock ***prevp,
+diff --git a/include/net/inet_common.h b/include/net/inet_common.h
+index fe7994c..780f229 100644
+--- a/include/net/inet_common.h
++++ b/include/net/inet_common.h
+@@ -1,6 +1,8 @@
+ #ifndef _INET_COMMON_H
+ #define _INET_COMMON_H
+
++#include <net/sock.h>
++
+ extern const struct proto_ops inet_stream_ops;
+ extern const struct proto_ops inet_dgram_ops;
+
+@@ -13,6 +15,8 @@ struct sock;
+ struct sockaddr;
+ struct socket;
+
++int inet_create(struct net *net, struct socket *sock, int protocol, int kern);
++int inet6_create(struct net *net, struct socket *sock, int protocol, int kern);
+ int inet_release(struct socket *sock);
+ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index c55aeed..84d1927 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -243,6 +243,9 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
+
+ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
+
++u32 inet_synq_hash(const __be32 raddr, const __be16 rport, const u32 rnd,
++ const u32 synq_hsize);
++
+ struct request_sock *inet_csk_search_req(const struct sock *sk,
+ struct request_sock ***prevp,
+ const __be16 rport,
+diff --git a/include/net/mptcp.h b/include/net/mptcp.h
+new file mode 100644
+index 0000000..6454535
+--- /dev/null
++++ b/include/net/mptcp.h
+@@ -0,0 +1,1471 @@
++/*
++ * MPTCP implementation
++ *
++ * Initial Design & Implementation:
++ * Sébastien Barré <sebastien.barre@uclouvain.be>
++ *
++ * Current Maintainer & Author:
++ * Christoph Paasch <christoph.paasch@uclouvain.be>
++ *
++ * Additional authors:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ * Gregory Detal <gregory.detal@uclouvain.be>
++ * Fabien Duchêne <fabien.duchene@uclouvain.be>
++ * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de>
++ * Lavkesh Lahngir <lavkesh51@gmail.com>
++ * Andreas Ripke <ripke@neclab.eu>
++ * Vlad Dogaru <vlad.dogaru@intel.com>
++ * Octavian Purdila <octavian.purdila@intel.com>
++ * John Ronan <jronan@tssg.org>
++ * Catalin Nicutar <catalin.nicutar@gmail.com>
++ * Brandon Heller <brandonh@stanford.edu>
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _MPTCP_H
++#define _MPTCP_H
++
++#include <linux/inetdevice.h>
++#include <linux/ipv6.h>
++#include <linux/list.h>
++#include <linux/net.h>
++#include <linux/netpoll.h>
++#include <linux/skbuff.h>
++#include <linux/socket.h>
++#include <linux/tcp.h>
++#include <linux/kernel.h>
++
++#include <asm/byteorder.h>
++#include <asm/unaligned.h>
++#include <crypto/hash.h>
++#include <net/tcp.h>
++
++#if defined(__LITTLE_ENDIAN_BITFIELD)
++ #define ntohll(x) be64_to_cpu(x)
++ #define htonll(x) cpu_to_be64(x)
++#elif defined(__BIG_ENDIAN_BITFIELD)
++ #define ntohll(x) (x)
++ #define htonll(x) (x)
++#endif
++
++/* Max number of local or remote addresses we can store.
++ * When changing, see the bitfield below in mptcp_loc4/6. */
++#define MPTCP_MAX_ADDR 8
++
++#define MPTCP_SUBFLOW_RETRY_DELAY 1000
++
++struct mptcp_loc4 {
++ u8 loc4_id;
++ u8 low_prio:1;
++ struct in_addr addr;
++};
++
++struct mptcp_rem4 {
++ u8 rem4_id;
++ u8 bitfield;
++ u8 retry_bitfield;
++ __be16 port;
++ struct in_addr addr;
++};
++
++struct mptcp_loc6 {
++ u8 loc6_id;
++ u8 low_prio:1;
++ struct in6_addr addr;
++};
++
++struct mptcp_rem6 {
++ u8 rem6_id;
++ u8 bitfield;
++ u8 retry_bitfield;
++ __be16 port;
++ struct in6_addr addr;
++};
++
++struct mptcp_request_sock {
++ struct tcp_request_sock req;
++ struct mptcp_cb *mpcb;
++ /* Collision list in the tuple hashtable. We need to find
++ * the req sock when receiving the third msg of the 3-way handshake,
++ * since that one does not contain the token. If this makes
++ * the request sock too long, we can use kmalloc'ed specific entries for
++ * that tuple hashtable. At the moment, though, I extend the
++ * request_sock.
++ */
++ struct list_head collide_tuple;
++ struct hlist_nulls_node collide_tk;
++ u32 mptcp_rem_nonce;
++ u32 mptcp_loc_token;
++ u64 mptcp_loc_key;
++ u64 mptcp_rem_key;
++ u64 mptcp_hash_tmac;
++ u32 mptcp_loc_nonce;
++ u8 loc_id;
++ u8 rem_id; /* Address-id in the MP_JOIN */
++ u8 dss_csum:1,
++ low_prio:1;
++};
++
++struct mptcp_options_received {
++ u16 saw_mpc:1,
++ dss_csum:1,
++ drop_me:1,
++
++ is_mp_join:1,
++ join_ack:1,
++
++ saw_low_prio:2, /* 0x1 - low-prio set for this subflow
++ * 0x2 - low-prio set for another subflow
++ */
++ low_prio:1,
++
++ saw_add_addr:2, /* Saw at least one add_addr option:
++ * 0x1: IPv4 - 0x2: IPv6
++ */
++ more_add_addr:1, /* Saw one more add-addr. */
++
++ saw_rem_addr:1, /* Saw at least one rem_addr option */
++ more_rem_addr:1, /* Saw one more rem-addr. */
++
++ mp_fail:1,
++ mp_fclose:1;
++ u8 rem_id; /* Address-id in the MP_JOIN */
++ u8 prio_addr_id; /* Address-id in the MP_PRIO */
++
++ const unsigned char *add_addr_ptr; /* Pointer to add-address option */
++ const unsigned char *rem_addr_ptr; /* Pointer to rem-address option */
++
++ u32 data_ack;
++ u32 data_seq;
++ u16 data_len;
++
++ u32 mptcp_rem_token;/* Remote token */
++
++ /* Key inside the option (from mp_capable or fast_close) */
++ u64 mptcp_key;
++
++ u32 mptcp_recv_nonce;
++ u64 mptcp_recv_tmac;
++ u8 mptcp_recv_mac[20];
++};
++
++struct mptcp_tcp_sock {
++ struct tcp_sock *next; /* Next subflow socket */
++ struct list_head cb_list;
++ struct mptcp_options_received rx_opt;
++
++ /* Those three fields record the current mapping */
++ u64 map_data_seq;
++ u32 map_subseq;
++ u16 map_data_len;
++ u16 slave_sk:1,
++ fully_established:1,
++ establish_increased:1,
++ second_packet:1,
++ attached:1,
++ send_mp_fail:1,
++ include_mpc:1,
++ mapping_present:1,
++ map_data_fin:1,
++ low_prio:1, /* use this socket as backup */
++ rcv_low_prio:1, /* Peer sent low-prio option to us */
++ send_mp_prio:1, /* Trigger to send mp_prio on this socket */
++ pre_established:1; /* State between sending 3rd ACK and
++ * receiving the fourth ack of new subflows.
++ */
++
++ /* isn: needed to translate abs to relative subflow seqnums */
++ u32 snt_isn;
++ u32 rcv_isn;
++ u32 last_data_seq;
++ u8 path_index;
++ u8 loc_id;
++ u8 rem_id;
++
++ u32 last_rbuf_opti; /* Timestamp of last rbuf optimization */
++ unsigned int sent_pkts;
++
++ struct sk_buff *shortcut_ofoqueue; /* Shortcut to the current modified
++ * skb in the ofo-queue.
++ */
++
++ int init_rcv_wnd;
++ u32 infinite_cutoff_seq;
++ struct delayed_work work;
++ u32 mptcp_loc_nonce;
++ struct tcp_sock *tp; /* Where is my daddy? */
++ u32 last_end_data_seq;
++
++ /* MP_JOIN subflow: timer for retransmitting the 3rd ack */
++ struct timer_list mptcp_ack_timer;
++
++ /* HMAC of the third ack */
++ char sender_mac[20];
++};
++
++struct mptcp_tw {
++ struct list_head list;
++ u64 loc_key;
++ u64 rcv_nxt;
++ struct mptcp_cb __rcu *mpcb;
++ u8 meta_tw:1,
++ in_list:1;
++};
++
++#define MPTCP_PM_NAME_MAX 16
++struct mptcp_pm_ops {
++ struct list_head list;
++
++ /* Signal the creation of a new MPTCP-session. */
++ void (*new_session)(struct sock *meta_sk, int index);
++ void (*release_sock)(struct sock *meta_sk);
++ void (*fully_established)(struct sock *meta_sk);
++ void (*new_remote_address)(struct sock *meta_sk);
++ int (*get_local_index)(sa_family_t family, union inet_addr *addr,
++ struct net *net);
++ int (*get_local_id)(sa_family_t family, union inet_addr *addr,
++ struct net *net);
++ void (*addr_signal)(struct sock *sk, unsigned *size,
++ struct tcp_out_options *opts, struct sk_buff *skb);
++
++ char name[MPTCP_PM_NAME_MAX];
++ struct module *owner;
++};
++
++struct mptcp_cb {
++ struct sock *meta_sk;
++
++ /* list of sockets in this multipath connection */
++ struct tcp_sock *connection_list;
++ /* list of sockets that need a call to release_cb */
++ struct list_head callback_list;
++
++ spinlock_t tw_lock;
++ struct list_head tw_list;
++ unsigned char mptw_state;
++
++ atomic_t mpcb_refcnt;
++
++ /* High-order bits of 64-bit sequence numbers */
++ u32 snd_high_order[2];
++ u32 rcv_high_order[2];
++
++ u16 send_infinite_mapping:1,
++ in_time_wait:1,
++ list_rcvd:1, /* XXX TO REMOVE */
++ dss_csum:1,
++ server_side:1,
++ infinite_mapping_rcv:1,
++ infinite_mapping_snd:1,
++ dfin_combined:1, /* Was the DFIN combined with subflow-fin? */
++ passive_close:1,
++ snd_hiseq_index:1, /* Index in snd_high_order of snd_nxt */
++ rcv_hiseq_index:1; /* Index in rcv_high_order of rcv_nxt */
++
++ /* socket count in this connection */
++ u8 cnt_subflows;
++ u8 cnt_established;
++
++ u32 noneligible; /* Path mask of temporarily non
++ * eligible subflows by the scheduler
++ */
++
++ struct sk_buff_head reinject_queue;
++
++ u8 dfin_path_index;
++
++#define MPTCP_PM_SIZE 320
++ u8 mptcp_pm[MPTCP_PM_SIZE] __aligned(8);
++ struct mptcp_pm_ops *pm_ops;
++
++ /* Mutex needed, because otherwise mptcp_close will complain that the
++ * socket is owned by the user.
++ * E.g., mptcp_sub_close_wq is taking the meta-lock.
++ */
++ struct mutex mpcb_mutex;
++
++ /* Master socket, also part of the connection_list, this
++ * socket is the one that the application sees.
++ */
++ struct sock *master_sk;
++
++ u64 csum_cutoff_seq;
++
++ __u64 mptcp_loc_key;
++ __u32 mptcp_loc_token;
++ __u64 mptcp_rem_key;
++ __u32 mptcp_rem_token;
++
++ /* Create a new subflow - necessary because the meta-sk may be IPv4, but
++ * the new subflow can be IPv6
++ */
++ struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
++ struct request_sock *req,
++ struct dst_entry *dst);
++
++ /* Remote addresses */
++ struct mptcp_rem4 remaddr4[MPTCP_MAX_ADDR];
++ u8 rem4_bits;
++
++ struct mptcp_rem6 remaddr6[MPTCP_MAX_ADDR];
++ u8 rem6_bits;
++
++ u32 path_index_bits;
++ /* Next pi to pick up in case a new path becomes available */
++ u8 next_path_index;
++
++ /* Original snd/rcvbuf of the initial subflow.
++ * Used for the new subflows on the server-side to allow correct
++ * autotuning
++ */
++ int orig_sk_rcvbuf;
++ int orig_sk_sndbuf;
++ u32 orig_window_clamp;
++};
++
++#define MPTCP_SUB_CAPABLE 0
++#define MPTCP_SUB_LEN_CAPABLE_SYN 12
++#define MPTCP_SUB_LEN_CAPABLE_SYN_ALIGN 12
++#define MPTCP_SUB_LEN_CAPABLE_ACK 20
++#define MPTCP_SUB_LEN_CAPABLE_ACK_ALIGN 20
++
++#define MPTCP_SUB_JOIN 1
++#define MPTCP_SUB_LEN_JOIN_SYN 12
++#define MPTCP_SUB_LEN_JOIN_SYN_ALIGN 12
++#define MPTCP_SUB_LEN_JOIN_SYNACK 16
++#define MPTCP_SUB_LEN_JOIN_SYNACK_ALIGN 16
++#define MPTCP_SUB_LEN_JOIN_ACK 24
++#define MPTCP_SUB_LEN_JOIN_ACK_ALIGN 24
++
++#define MPTCP_SUB_DSS 2
++#define MPTCP_SUB_LEN_DSS 4
++#define MPTCP_SUB_LEN_DSS_ALIGN 4
++
++/* Lengths for seq and ack are the ones without the generic MPTCP-option header,
++ * as they are part of the DSS-option.
++ * To get the total length, just add the different options together.
++ */
++#define MPTCP_SUB_LEN_SEQ 10
++#define MPTCP_SUB_LEN_SEQ_CSUM 12
++#define MPTCP_SUB_LEN_SEQ_ALIGN 12
++
++#define MPTCP_SUB_LEN_SEQ_64 14
++#define MPTCP_SUB_LEN_SEQ_CSUM_64 16
++#define MPTCP_SUB_LEN_SEQ_64_ALIGN 16
++
++#define MPTCP_SUB_LEN_ACK 4
++#define MPTCP_SUB_LEN_ACK_ALIGN 4
++
++#define MPTCP_SUB_LEN_ACK_64 8
++#define MPTCP_SUB_LEN_ACK_64_ALIGN 8
++
++/* This is the "default" option-length we will send out most often.
++ * MPTCP DSS-header
++ * 32-bit data sequence number
++ * 32-bit data ack
++ *
++ * It is necessary to calculate the effective MSS we will be using when
++ * sending data.
++ */
++#define MPTCP_SUB_LEN_DSM_ALIGN (MPTCP_SUB_LEN_DSS_ALIGN + \
++ MPTCP_SUB_LEN_SEQ_ALIGN + \
++ MPTCP_SUB_LEN_ACK_ALIGN)
++
++#define MPTCP_SUB_ADD_ADDR 3
++#define MPTCP_SUB_LEN_ADD_ADDR4 8
++#define MPTCP_SUB_LEN_ADD_ADDR6 20
++#define MPTCP_SUB_LEN_ADD_ADDR4_ALIGN 8
++#define MPTCP_SUB_LEN_ADD_ADDR6_ALIGN 20
++
++#define MPTCP_SUB_REMOVE_ADDR 4
++#define MPTCP_SUB_LEN_REMOVE_ADDR 4
++
++#define MPTCP_SUB_PRIO 5
++#define MPTCP_SUB_LEN_PRIO 3
++#define MPTCP_SUB_LEN_PRIO_ADDR 4
++#define MPTCP_SUB_LEN_PRIO_ALIGN 4
++
++#define MPTCP_SUB_FAIL 6
++#define MPTCP_SUB_LEN_FAIL 12
++#define MPTCP_SUB_LEN_FAIL_ALIGN 12
++
++#define MPTCP_SUB_FCLOSE 7
++#define MPTCP_SUB_LEN_FCLOSE 12
++#define MPTCP_SUB_LEN_FCLOSE_ALIGN 12
++
++
++#define OPTION_MPTCP (1 << 5)
++
++static inline void reset_mpc(struct tcp_sock *tp)
++{
++ tp->mpc = 0;
++
++ tp->__select_window = __tcp_select_window;
++ tp->select_window = tcp_select_window;
++ tp->select_initial_window = tcp_select_initial_window;
++ tp->init_buffer_space = tcp_init_buffer_space;
++ tp->set_rto = tcp_set_rto;
++ tp->should_expand_sndbuf = tcp_should_expand_sndbuf;
++}
++
++/* Initializes MPTCP flags in tcp_sock (and other tcp_sock members that depend
++ * on those flags).
++ */
++static inline void mptcp_init_tcp_sock(struct tcp_sock *tp)
++{
++ reset_mpc(tp);
++}
++
++#ifdef CONFIG_MPTCP
++
++/* Used for checking if the mptcp initialization has been successful */
++extern bool mptcp_init_failed;
++
++/* MPTCP options */
++#define OPTION_TYPE_SYN (1 << 0)
++#define OPTION_TYPE_SYNACK (1 << 1)
++#define OPTION_TYPE_ACK (1 << 2)
++#define OPTION_MP_CAPABLE (1 << 3)
++#define OPTION_DATA_ACK (1 << 4)
++#define OPTION_ADD_ADDR (1 << 5)
++#define OPTION_MP_JOIN (1 << 6)
++#define OPTION_MP_FAIL (1 << 7)
++#define OPTION_MP_FCLOSE (1 << 8)
++#define OPTION_REMOVE_ADDR (1 << 9)
++#define OPTION_MP_PRIO (1 << 10)
++
++/* MPTCP flags */
++#define MPTCPHDR_ACK 0x01
++#define MPTCPHDR_SEQ 0x02
++#define MPTCPHDR_FIN 0x04
++#define MPTCPHDR_INF 0x08
++#define MPTCPHDR_SEQ64_SET 0x10 /* Did we received a 64-bit seq number */
++#define MPTCPHDR_SEQ64_OFO 0x20 /* Is it not in our circular array? */
++#define MPTCPHDR_SEQ64_INDEX 0x40 /* Index of seq in mpcb->snd_high_order */
++#define MPTCPHDR_DSS_CSUM 0x80
++
++/* It is impossible, that all 8 bits of mptcp_flags are set to 1 with the above
++ * Thus, defining MPTCPHDR_JOIN as 0xFF is safe.
++ */
++#define MPTCPHDR_JOIN 0xFF
++
++struct mptcp_option {
++ __u8 kind;
++ __u8 len;
++#if defined(__LITTLE_ENDIAN_BITFIELD)
++ __u8 ver:4,
++ sub:4;
++#elif defined(__BIG_ENDIAN_BITFIELD)
++ __u8 sub:4,
++ ver:4;
++#else
++#error "Adjust your <asm/byteorder.h> defines"
++#endif
++};
++
++struct mp_capable {
++ __u8 kind;
++ __u8 len;
++#if defined(__LITTLE_ENDIAN_BITFIELD)
++ __u8 ver:4,
++ sub:4;
++ __u8 h:1,
++ rsv:5,
++ b:1,
++ a:1;
++#elif defined(__BIG_ENDIAN_BITFIELD)
++ __u8 sub:4,
++ ver:4;
++ __u8 a:1,
++ b:1,
++ rsv:5,
++ h:1;
++#else
++#error "Adjust your <asm/byteorder.h> defines"
++#endif
++ __u64 sender_key;
++ __u64 receiver_key;
++} __attribute__((__packed__));
++
++struct mp_join {
++ __u8 kind;
++ __u8 len;
++#if defined(__LITTLE_ENDIAN_BITFIELD)
++ __u8 b:1,
++ rsv:3,
++ sub:4;
++#elif defined(__BIG_ENDIAN_BITFIELD)
++ __u8 sub:4,
++ rsv:3,
++ b:1;
++#else
++#error "Adjust your <asm/byteorder.h> defines"
++#endif
++ __u8 addr_id;
++ union {
++ struct {
++ u32 token;
++ u32 nonce;
++ } syn;
++ struct {
++ __u64 mac;
++ u32 nonce;
++ } synack;
++ struct {
++ __u8 mac[20];
++ } ack;
++ } u;
++} __attribute__((__packed__));
++
++struct mp_dss {
++ __u8 kind;
++ __u8 len;
++#if defined(__LITTLE_ENDIAN_BITFIELD)
++ __u16 rsv1:4,
++ sub:4,
++ A:1,
++ a:1,
++ M:1,
++ m:1,
++ F:1,
++ rsv2:3;
++#elif defined(__BIG_ENDIAN_BITFIELD)
++ __u16 sub:4,
++ rsv1:4,
++ rsv2:3,
++ F:1,
++ m:1,
++ M:1,
++ a:1,
++ A:1;
++#else
++#error "Adjust your <asm/byteorder.h> defines"
++#endif
++};
++
++struct mp_add_addr {
++ __u8 kind;
++ __u8 len;
++#if defined(__LITTLE_ENDIAN_BITFIELD)
++ __u8 ipver:4,
++ sub:4;
++#elif defined(__BIG_ENDIAN_BITFIELD)
++ __u8 sub:4,
++ ipver:4;
++#else
++#error "Adjust your <asm/byteorder.h> defines"
++#endif
++ __u8 addr_id;
++ union {
++ struct {
++ struct in_addr addr;
++ __be16 port;
++ } v4;
++ struct {
++ struct in6_addr addr;
++ __be16 port;
++ } v6;
++ } u;
++} __attribute__((__packed__));
++
++struct mp_remove_addr {
++ __u8 kind;
++ __u8 len;
++#if defined(__LITTLE_ENDIAN_BITFIELD)
++ __u8 rsv:4,
++ sub:4;
++#elif defined(__BIG_ENDIAN_BITFIELD)
++ __u8 sub:4,
++ rsv:4;
++#else
++#error "Adjust your <asm/byteorder.h> defines"
++#endif
++ /* list of addr_id */
++ __u8 addrs_id;
++};
++
++struct mp_fail {
++ __u8 kind;
++ __u8 len;
++#if defined(__LITTLE_ENDIAN_BITFIELD)
++ __u16 rsv1:4,
++ sub:4,
++ rsv2:8;
++#elif defined(__BIG_ENDIAN_BITFIELD)
++ __u16 sub:4,
++ rsv1:4,
++ rsv2:8;
++#else
++#error "Adjust your <asm/byteorder.h> defines"
++#endif
++ __be64 data_seq;
++} __attribute__((__packed__));
++
++struct mp_fclose {
++ __u8 kind;
++ __u8 len;
++#if defined(__LITTLE_ENDIAN_BITFIELD)
++ __u16 rsv1:4,
++ sub:4,
++ rsv2:8;
++#elif defined(__BIG_ENDIAN_BITFIELD)
++ __u16 sub:4,
++ rsv1:4,
++ rsv2:8;
++#else
++#error "Adjust your <asm/byteorder.h> defines"
++#endif
++ __u64 key;
++} __attribute__((__packed__));
++
++struct mp_prio {
++ __u8 kind;
++ __u8 len;
++#if defined(__LITTLE_ENDIAN_BITFIELD)
++ __u8 b:1,
++ rsv:3,
++ sub:4;
++#elif defined(__BIG_ENDIAN_BITFIELD)
++ __u8 sub:4,
++ rsv:3,
++ b:1;
++#else
++#error "Adjust your <asm/byteorder.h> defines"
++#endif
++ __u8 addr_id;
++} __attribute__((__packed__));
++
++static inline int mptcp_sub_len_dss(struct mp_dss *m, int csum)
++{
++ return 4 + m->A * (4 + m->a * 4) + m->M * (10 + m->m * 4 + csum * 2);
++}
++
++#define MPTCP_APP 2
++
++extern int sysctl_mptcp_enabled;
++extern int sysctl_mptcp_checksum;
++extern int sysctl_mptcp_debug;
++extern int sysctl_mptcp_syn_retries;
++
++extern struct workqueue_struct *mptcp_wq;
++
++#define mptcp_debug(fmt, args...) \
++ do { \
++ if (unlikely(sysctl_mptcp_debug)) \
++ pr_err(__FILE__ ": " fmt, ##args); \
++ } while (0)
++
++/* Iterates over all subflows */
++#define mptcp_for_each_tp(mpcb, tp) \
++ for ((tp) = (mpcb)->connection_list; (tp); (tp) = (tp)->mptcp->next)
++
++#define mptcp_for_each_sk(mpcb, sk) \
++ for ((sk) = (struct sock *)(mpcb)->connection_list; \
++ sk; \
++ sk = (struct sock *)tcp_sk(sk)->mptcp->next)
++
++#define mptcp_for_each_sk_safe(__mpcb, __sk, __temp) \
++ for (__sk = (struct sock *)(__mpcb)->connection_list, \
++ __temp = __sk ? (struct sock *)tcp_sk(__sk)->mptcp->next : NULL; \
++ __sk; \
++ __sk = __temp, \
++ __temp = __sk ? (struct sock *)tcp_sk(__sk)->mptcp->next : NULL)
++
++/* Iterates over all bit set to 1 in a bitset */
++#define mptcp_for_each_bit_set(b, i) \
++ for (i = ffs(b) - 1; i >= 0; i = ffs(b >> (i + 1) << (i + 1)) - 1)
++
++#define mptcp_for_each_bit_unset(b, i) \
++ mptcp_for_each_bit_set(~b, i)
++
++extern struct lock_class_key meta_key;
++extern struct lock_class_key meta_slock_key;
++extern u32 mptcp_secret[MD5_MESSAGE_BYTES / 4];
++
++/* This is needed to ensure that two subsequent key-generation result in
++ * different keys if the IPs and ports are the same.
++ */
++extern u32 mptcp_key_seed;
++
++#define MPTCP_HASH_SIZE 1024
++
++extern struct hlist_nulls_head tk_hashtable[MPTCP_HASH_SIZE];
++
++/* This second hashtable is needed to retrieve request socks
++ * created as a result of a join request. While the SYN contains
++ * the token, the final ack does not, so we need a separate hashtable
++ * to retrieve the mpcb.
++ */
++extern struct list_head mptcp_reqsk_htb[MPTCP_HASH_SIZE];
++extern spinlock_t mptcp_reqsk_hlock; /* hashtable protection */
++
++/* Lock, protecting the two hash-tables that hold the token. Namely,
++ * mptcp_reqsk_tk_htb and tk_hashtable
++ */
++extern spinlock_t mptcp_tk_hashlock; /* hashtable protection */
++
++void mptcp_data_ready(struct sock *sk, int bytes);
++void mptcp_write_space(struct sock *sk);
++
++void mptcp_add_meta_ofo_queue(struct sock *meta_sk, struct sk_buff *skb,
++ struct sock *sk);
++void mptcp_ofo_queue(struct sock *meta_sk);
++void mptcp_purge_ofo_queue(struct tcp_sock *meta_tp);
++void mptcp_cleanup_rbuf(struct sock *meta_sk, int copied);
++int mptcp_alloc_mpcb(struct sock *master_sk, __u64 remote_key, u32 window);
++int mptcp_add_sock(struct sock *meta_sk, struct sock *sk, u8 loc_id, u8 rem_id,
++ gfp_t flags);
++void mptcp_del_sock(struct sock *sk);
++void mptcp_update_metasocket(struct sock *sock, struct sock *meta_sk);
++void mptcp_reinject_data(struct sock *orig_sk, int clone_it);
++void mptcp_update_sndbuf(struct mptcp_cb *mpcb);
++struct sk_buff *mptcp_next_segment(struct sock *sk, int *reinject);
++void mptcp_send_fin(struct sock *meta_sk);
++void mptcp_send_active_reset(struct sock *meta_sk, gfp_t priority);
++int mptcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
++ int push_one, gfp_t gfp);
++void mptcp_parse_options(const uint8_t *ptr, int opsize,
++ struct tcp_options_received *opt_rx,
++ struct mptcp_options_received *mopt,
++ const struct sk_buff *skb);
++void mptcp_syn_options(struct sock *sk, struct tcp_out_options *opts,
++ unsigned *remaining);
++void mptcp_synack_options(struct request_sock *req,
++ struct tcp_out_options *opts,
++ unsigned *remaining);
++void mptcp_established_options(struct sock *sk, struct sk_buff *skb,
++ struct tcp_out_options *opts, unsigned *size);
++void mptcp_options_write(__be32 *ptr, struct tcp_sock *tp,
++ struct tcp_out_options *opts,
++ struct sk_buff *skb);
++void mptcp_close(struct sock *meta_sk, long timeout);
++int mptcp_doit(struct sock *sk);
++int mptcp_create_master_sk(struct sock *meta_sk, __u64 remote_key, u32 window);
++int mptcp_check_req_master(struct sock *sk, struct sock *child,
++ struct request_sock *req,
++ struct request_sock **prev,
++ struct mptcp_options_received *mopt);
++struct sock *mptcp_check_req_child(struct sock *sk, struct sock *child,
++ struct request_sock *req,
++ struct request_sock **prev,
++ struct mptcp_options_received *mopt);
++u32 __mptcp_select_window(struct sock *sk);
++void mptcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
++ __u32 *window_clamp, int wscale_ok,
++ __u8 *rcv_wscale, __u32 init_rcv_wnd,
++ const struct sock *sk);
++unsigned int mptcp_current_mss(struct sock *meta_sk);
++int mptcp_select_size(const struct sock *meta_sk, bool sg);
++void mptcp_key_sha1(u64 key, u32 *token, u64 *idsn);
++void mptcp_hmac_sha1(u8 *key_1, u8 *key_2, u8 *rand_1, u8 *rand_2,
++ u32 *hash_out);
++void mptcp_clean_rtx_infinite(struct sk_buff *skb, struct sock *sk);
++void mptcp_fin(struct sock *meta_sk);
++void mptcp_retransmit_timer(struct sock *meta_sk);
++int mptcp_write_wakeup(struct sock *meta_sk);
++void mptcp_sub_close_wq(struct work_struct *work);
++void mptcp_sub_close(struct sock *sk, unsigned long delay);
++struct sock *mptcp_select_ack_sock(const struct sock *meta_sk, int copied);
++void mptcp_fallback_meta_sk(struct sock *meta_sk);
++int mptcp_backlog_rcv(struct sock *meta_sk, struct sk_buff *skb);
++struct sock *mptcp_sk_clone(const struct sock *sk, int family, const gfp_t priority);
++void mptcp_ack_handler(unsigned long);
++int mptcp_check_rtt(const struct tcp_sock *tp, int time);
++int mptcp_check_snd_buf(const struct tcp_sock *tp);
++int mptcp_handle_options(struct sock *sk, const struct tcphdr *th, struct sk_buff *skb);
++void __init mptcp_init(void);
++int mptcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len);
++int mptcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
++ unsigned int mss_now, int reinject);
++int mptso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
++ unsigned int mss_now, gfp_t gfp, int reinject);
++void mptcp_destroy_sock(struct sock *sk);
++int mptcp_rcv_synsent_state_process(struct sock *sk, struct sock **skptr,
++ struct sk_buff *skb,
++ struct mptcp_options_received *mopt);
++unsigned int mptcp_xmit_size_goal(struct sock *meta_sk, u32 mss_now,
++ int large_allowed);
++int mptcp_time_wait(struct sock *sk, struct tcp_timewait_sock *tw);
++void mptcp_twsk_destructor(struct tcp_timewait_sock *tw);
++void mptcp_update_tw_socks(const struct tcp_sock *tp, int state);
++void mptcp_disconnect(struct sock *sk);
++bool mptcp_should_expand_sndbuf(const struct sock *sk);
++int mptcp_retransmit_skb(struct sock *meta_sk, struct sk_buff *skb);
++void mptcp_tsq_flags(struct sock *sk);
++void mptcp_tsq_sub_deferred(struct sock *meta_sk);
++struct mp_join *mptcp_find_join(struct sk_buff *skb);
++void mptcp_hash_remove_bh(struct tcp_sock *meta_tp);
++void mptcp_hash_remove(struct tcp_sock *meta_tp);
++struct sock *mptcp_hash_find(struct net *net, u32 token);
++int mptcp_lookup_join(struct sk_buff *skb, struct inet_timewait_sock *tw);
++int mptcp_do_join_short(struct sk_buff *skb, struct mptcp_options_received *mopt,
++ struct tcp_options_received *tmp_opt, struct net *net);
++void mptcp_reqsk_destructor(struct request_sock *req);
++void mptcp_reqsk_new_mptcp(struct request_sock *req,
++ const struct tcp_options_received *rx_opt,
++ const struct mptcp_options_received *mopt,
++ const struct sk_buff *skb);
++int mptcp_check_req(struct sk_buff *skb, struct net *net);
++void mptcp_connect_init(struct sock *sk);
++void mptcp_sub_force_close(struct sock *sk);
++int mptcp_sub_len_remove_addr_align(u16 bitfield);
++void mptcp_remove_shortcuts(const struct mptcp_cb *mpcb,
++ const struct sk_buff *skb);
++void mptcp_init_buffer_space(struct sock *sk);
++
++/* MPTCP-path-manager registration/initialization functions */
++int mptcp_register_path_manager(struct mptcp_pm_ops *pm);
++void mptcp_unregister_path_manager(struct mptcp_pm_ops *pm);
++void mptcp_init_path_manager(struct mptcp_cb *mpcb);
++void mptcp_cleanup_path_manager(struct mptcp_cb *mpcb);
++void mptcp_fallback_default(struct mptcp_cb *mpcb);
++void mptcp_get_default_path_manager(char *name);
++int mptcp_set_default_path_manager(const char *name);
++extern struct mptcp_pm_ops mptcp_pm_default;
++
++static inline
++struct mptcp_request_sock *mptcp_rsk(const struct request_sock *req)
++{
++ return (struct mptcp_request_sock *)req;
++}
++
++static inline
++struct request_sock *rev_mptcp_rsk(const struct mptcp_request_sock *req)
++{
++ return (struct request_sock *)req;
++}
++
++static inline bool mptcp_can_sendpage(struct sock *sk)
++{
++ struct sock *sk_it;
++
++ if (tcp_sk(sk)->mpcb->dss_csum)
++ return false;
++
++ mptcp_for_each_sk(tcp_sk(sk)->mpcb, sk_it) {
++ if (!(sk_it->sk_route_caps & NETIF_F_SG) ||
++ !(sk_it->sk_route_caps & NETIF_F_ALL_CSUM))
++ return false;
++ }
++
++ return true;
++}
++
++static inline void mptcp_push_pending_frames(struct sock *meta_sk)
++{
++ if (mptcp_next_segment(meta_sk, NULL)) {
++ struct tcp_sock *tp = tcp_sk(meta_sk);
++
++ /* We don't care about the MSS, because it will be set in
++ * mptcp_write_xmit.
++ */
++ __tcp_push_pending_frames(meta_sk, 0, tp->nonagle);
++ }
++}
++
++static inline void mptcp_send_reset(struct sock *sk)
++{
++ tcp_send_active_reset(sk, GFP_ATOMIC);
++ mptcp_sub_force_close(sk);
++}
++
++static inline int mptcp_is_data_seq(const struct sk_buff *skb)
++{
++ return TCP_SKB_CB(skb)->mptcp_flags & MPTCPHDR_SEQ;
++}
++
++static inline int mptcp_is_data_fin(const struct sk_buff *skb)
++{
++ return mptcp_is_data_seq(skb) &&
++ (TCP_SKB_CB(skb)->mptcp_flags & MPTCPHDR_FIN);
++}
++
++/* Is it a data-fin while in infinite mapping mode?
++ * In infinite mode, a subflow-fin is in fact a data-fin.
++ */
++static inline int mptcp_is_data_fin2(const struct sk_buff *skb,
++ const struct tcp_sock *tp)
++{
++ return mptcp_is_data_fin(skb) ||
++ (tp->mpcb->infinite_mapping_rcv && tcp_hdr(skb)->fin);
++}
++
++static inline void mptcp_skb_entail_init(const struct tcp_sock *tp,
++ struct sk_buff *skb)
++{
++ TCP_SKB_CB(skb)->mptcp_flags = MPTCPHDR_SEQ;
++}
++
++static inline u8 mptcp_get_64_bit(u64 data_seq, struct mptcp_cb *mpcb)
++{
++ u64 data_seq_high = (u32)(data_seq >> 32);
++
++ if (mpcb->rcv_high_order[0] == data_seq_high)
++ return 0;
++ else if (mpcb->rcv_high_order[1] == data_seq_high)
++ return MPTCPHDR_SEQ64_INDEX;
++ else
++ return MPTCPHDR_SEQ64_OFO;
++}
++
++/* Sets the data_seq and returns pointer to the in-skb field of the data_seq.
++ * If the packet has a 64-bit dseq, the pointer points to the last 32 bits.
++ */
++static inline __u32 *mptcp_skb_set_data_seq(const struct sk_buff *skb,
++ u32 *data_seq,
++ struct mptcp_cb *mpcb)
++{
++ __u32 *ptr = (__u32 *)(skb_transport_header(skb) + TCP_SKB_CB(skb)->dss_off);
++
++ if (TCP_SKB_CB(skb)->mptcp_flags & MPTCPHDR_SEQ64_SET) {
++ u64 data_seq64 = get_unaligned_be64(ptr);
++
++ if (mpcb)
++ TCP_SKB_CB(skb)->mptcp_flags |= mptcp_get_64_bit(data_seq64, mpcb);
++
++ *data_seq = (u32)data_seq64 ;
++ ptr++;
++ } else {
++ *data_seq = get_unaligned_be32(ptr);
++ }
++
++ return ptr;
++}
++
++static inline struct sock *mptcp_meta_sk(const struct sock *sk)
++{
++ return tcp_sk(sk)->meta_sk;
++}
++
++static inline struct tcp_sock *mptcp_meta_tp(const struct tcp_sock *tp)
++{
++ return tcp_sk(tp->meta_sk);
++}
++
++static inline int is_meta_tp(const struct tcp_sock *tp)
++{
++ return tp->mpcb && mptcp_meta_tp(tp) == tp;
++}
++
++static inline int is_meta_sk(const struct sock *sk)
++{
++ return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP &&
++ tcp_sk(sk)->mpc && mptcp_meta_sk(sk) == sk;
++}
++
++static inline int is_master_tp(const struct tcp_sock *tp)
++{
++ return !tp->mpc || (!tp->mptcp->slave_sk && !is_meta_tp(tp));
++}
++
++static inline void mptcp_hash_request_remove(struct request_sock *req)
++{
++ int in_softirq = 0;
++
++ if (list_empty(&mptcp_rsk(req)->collide_tuple))
++ return;
++
++ if (in_softirq()) {
++ spin_lock(&mptcp_reqsk_hlock);
++ in_softirq = 1;
++ } else {
++ spin_lock_bh(&mptcp_reqsk_hlock);
++ }
++
++ list_del(&mptcp_rsk(req)->collide_tuple);
++
++ if (in_softirq)
++ spin_unlock(&mptcp_reqsk_hlock);
++ else
++ spin_unlock_bh(&mptcp_reqsk_hlock);
++}
++
++static inline void mptcp_init_mp_opt(struct mptcp_options_received *mopt)
++{
++ mopt->saw_mpc = 0;
++ mopt->dss_csum = 0;
++ mopt->drop_me = 0;
++
++ mopt->is_mp_join = 0;
++ mopt->join_ack = 0;
++
++ mopt->saw_low_prio = 0;
++ mopt->low_prio = 0;
++
++ mopt->saw_add_addr = 0;
++ mopt->more_add_addr = 0;
++
++ mopt->saw_rem_addr = 0;
++ mopt->more_rem_addr = 0;
++
++ mopt->mp_fail = 0;
++ mopt->mp_fclose = 0;
++}
++
++static inline void mptcp_reset_mopt(struct tcp_sock *tp)
++{
++ struct mptcp_options_received *mopt = &tp->mptcp->rx_opt;
++
++ mopt->saw_low_prio = 0;
++ mopt->saw_add_addr = 0;
++ mopt->more_add_addr = 0;
++ mopt->saw_rem_addr = 0;
++ mopt->more_rem_addr = 0;
++ mopt->join_ack = 0;
++ mopt->mp_fail = 0;
++ mopt->mp_fclose = 0;
++}
++
++static inline __be32 mptcp_get_highorder_sndbits(const struct sk_buff *skb,
++ const struct mptcp_cb *mpcb)
++{
++ return htonl(mpcb->snd_high_order[(TCP_SKB_CB(skb)->mptcp_flags &
++ MPTCPHDR_SEQ64_INDEX) ? 1 : 0]);
++}
++
++static inline u64 mptcp_get_data_seq_64(const struct mptcp_cb *mpcb, int index,
++ u32 data_seq_32)
++{
++ return ((u64)mpcb->rcv_high_order[index] << 32) | data_seq_32;
++}
++
++static inline u64 mptcp_get_rcv_nxt_64(const struct tcp_sock *meta_tp)
++{
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ return mptcp_get_data_seq_64(mpcb, mpcb->rcv_hiseq_index,
++ meta_tp->rcv_nxt);
++}
++
++static inline void mptcp_check_sndseq_wrap(struct tcp_sock *meta_tp, int inc)
++{
++ if (unlikely(meta_tp->snd_nxt > meta_tp->snd_nxt + inc)) {
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ mpcb->snd_hiseq_index = mpcb->snd_hiseq_index ? 0 : 1;
++ mpcb->snd_high_order[mpcb->snd_hiseq_index] += 2;
++ }
++}
++
++static inline void mptcp_check_rcvseq_wrap(struct tcp_sock *meta_tp,
++ u32 old_rcv_nxt)
++{
++ if (unlikely(old_rcv_nxt > meta_tp->rcv_nxt)) {
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ mpcb->rcv_high_order[mpcb->rcv_hiseq_index] += 2;
++ mpcb->rcv_hiseq_index = mpcb->rcv_hiseq_index ? 0 : 1;
++ }
++}
++
++static inline int mptcp_sk_can_send(const struct sock *sk)
++{
++ return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
++ !tcp_sk(sk)->mptcp->pre_established;
++}
++
++static inline int mptcp_sk_can_recv(const struct sock *sk)
++{
++ return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCP_FIN_WAIT1 | TCP_FIN_WAIT2);
++}
++
++static inline int mptcp_sk_can_send_ack(const struct sock *sk)
++{
++ return !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV |
++ TCPF_CLOSE | TCPF_LISTEN)) &&
++ !tcp_sk(sk)->mptcp->pre_established;
++}
++
++/* Only support GSO if all subflows supports it */
++static inline bool mptcp_sk_can_gso(const struct sock *meta_sk)
++{
++ struct sock *sk;
++
++ if (tcp_sk(meta_sk)->mpcb->dss_csum)
++ return 0;
++
++ mptcp_for_each_sk(tcp_sk(meta_sk)->mpcb, sk) {
++ if (!mptcp_sk_can_send(sk))
++ continue;
++ if (!sk_can_gso(sk))
++ return false;
++ }
++ return true;
++}
++
++static inline bool mptcp_can_sg(const struct sock *meta_sk)
++{
++ struct sock *sk;
++
++ if (tcp_sk(meta_sk)->mpcb->dss_csum)
++ return 0;
++
++ mptcp_for_each_sk(tcp_sk(meta_sk)->mpcb, sk) {
++ if (!mptcp_sk_can_send(sk))
++ continue;
++ if (!(sk->sk_route_caps & NETIF_F_SG))
++ return false;
++ }
++ return true;
++}
++
++static inline void mptcp_set_rto(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct sock *sk_it;
++ struct inet_connection_sock *micsk = inet_csk(mptcp_meta_sk(sk));
++ __u32 max_rto = 0;
++
++ /* We are in recovery-phase on the MPTCP-level. Do not update the
++ * RTO, because this would kill exponential backoff.
++ */
++ if (micsk->icsk_retransmits)
++ return;
++
++ mptcp_for_each_sk(tp->mpcb, sk_it) {
++ if (mptcp_sk_can_send(sk_it) &&
++ inet_csk(sk_it)->icsk_rto > max_rto)
++ max_rto = inet_csk(sk_it)->icsk_rto;
++ }
++ if (max_rto) {
++ micsk->icsk_rto = max_rto << 1;
++
++ /* A successfull rto-measurement - reset backoff counter */
++ micsk->icsk_backoff = 0;
++ }
++}
++
++static inline int mptcp_sysctl_syn_retries(void)
++{
++ return sysctl_mptcp_syn_retries;
++}
++
++static inline void mptcp_sub_close_passive(struct sock *sk)
++{
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++ struct tcp_sock *tp = tcp_sk(sk), *meta_tp = tcp_sk(meta_sk);
++
++ /* Only close, if the app did a send-shutdown (passive close), and we
++ * received the data-ack of the data-fin.
++ */
++ if (tp->mpcb->passive_close && meta_tp->snd_una == meta_tp->write_seq)
++ mptcp_sub_close(sk, 0);
++}
++
++static inline bool mptcp_fallback_infinite(struct sock *sk, int flag)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ /* If data has been acknowleged on the meta-level, fully_established
++ * will have been set before and thus we will not fall back to infinite
++ * mapping.
++ */
++ if (likely(tp->mptcp->fully_established))
++ return false;
++
++ if (!(flag & MPTCP_FLAG_DATA_ACKED))
++ return false;
++
++ /* Don't fallback twice ;) */
++ if (tp->mpcb->infinite_mapping_snd)
++ return false;
++
++ pr_err("%s %#x will fallback - pi %d, src %pI4 dst %pI4 from %pS\n",
++ __func__, tp->mpcb->mptcp_loc_token, tp->mptcp->path_index,
++ &inet_sk(sk)->inet_saddr, &inet_sk(sk)->inet_daddr,
++ __builtin_return_address(0));
++ if (!is_master_tp(tp))
++ return true;
++
++ tp->mpcb->infinite_mapping_snd = 1;
++ tp->mpcb->infinite_mapping_rcv = 1;
++ tp->mptcp->fully_established = 1;
++
++ return false;
++}
++
++/* Find the first free index in the bitfield */
++static inline int __mptcp_find_free_index(u8 bitfield, int j, u8 base)
++{
++ int i;
++ mptcp_for_each_bit_unset(bitfield >> base, i) {
++ /* We wrapped at the bitfield - try from 0 on */
++ if (i + base >= sizeof(bitfield) * 8) {
++ mptcp_for_each_bit_unset(bitfield, i) {
++ if (i >= sizeof(bitfield) * 8)
++ goto exit;
++
++ if (i != j)
++ return i;
++ }
++ goto exit;
++ }
++ if (i + base >= sizeof(bitfield) * 8)
++ break;
++
++ if (i + base != j)
++ return i + base;
++ }
++exit:
++ return -1;
++}
++
++static inline int mptcp_find_free_index(u8 bitfield)
++{
++ return __mptcp_find_free_index(bitfield, -1, 0);
++}
++
++/* Find the first index whose bit in the bit-field == 0 */
++static inline u8 mptcp_set_new_pathindex(struct mptcp_cb *mpcb)
++{
++ u8 base = mpcb->next_path_index;
++ int i;
++
++ /* Start at 1, because 0 is reserved for the meta-sk */
++ mptcp_for_each_bit_unset(mpcb->path_index_bits >> base, i) {
++ if (i + base < 1)
++ continue;
++ if (i + base >= sizeof(mpcb->path_index_bits) * 8)
++ break;
++ i += base;
++ mpcb->path_index_bits |= (1 << i);
++ mpcb->next_path_index = i + 1;
++ return i;
++ }
++ mptcp_for_each_bit_unset(mpcb->path_index_bits, i) {
++ if (i >= sizeof(mpcb->path_index_bits) * 8)
++ break;
++ if (i < 1)
++ continue;
++ mpcb->path_index_bits |= (1 << i);
++ mpcb->next_path_index = i + 1;
++ return i;
++ }
++
++ return 0;
++}
++
++static inline int mptcp_v6_is_v4_mapped(struct sock *sk)
++{
++ return sk->sk_family == AF_INET6 &&
++ ipv6_addr_type(&inet6_sk(sk)->saddr) == IPV6_ADDR_MAPPED;
++}
++
++/* TCP and MPTCP mpc flag-depending functions */
++u16 mptcp_select_window(struct sock *sk);
++void mptcp_init_buffer_space(struct sock *sk);
++void mptcp_tcp_set_rto(struct sock *sk);
++
++static inline void set_mpc(struct tcp_sock *tp)
++{
++ tp->mpc = 1;
++
++ tp->__select_window = __mptcp_select_window;
++ tp->select_window = mptcp_select_window;
++ tp->select_initial_window = mptcp_select_initial_window;
++ tp->init_buffer_space = mptcp_init_buffer_space;
++ tp->set_rto = mptcp_tcp_set_rto;
++ tp->should_expand_sndbuf = mptcp_should_expand_sndbuf;
++}
++
++#else /* CONFIG_MPTCP */
++#define mptcp_debug(fmt, args...) \
++ do { \
++ } while (0)
++
++/* Without MPTCP, we just do one iteration
++ * over the only socket available. This assumes that
++ * the sk/tp arg is the socket in that case.
++ */
++#define mptcp_for_each_sk(mpcb, sk)
++#define mptcp_for_each_sk_safe(__mpcb, __sk, __temp)
++
++static inline int mptcp_is_data_fin(const struct sk_buff *skb)
++{
++ return 0;
++}
++static inline int mptcp_is_data_seq(const struct sk_buff *skb)
++{
++ return 0;
++}
++static inline struct sock *mptcp_meta_sk(const struct sock *sk)
++{
++ return NULL;
++}
++static inline struct tcp_sock *mptcp_meta_tp(const struct tcp_sock *tp)
++{
++ return NULL;
++}
++static inline int is_meta_sk(const struct sock *sk)
++{
++ return 0;
++}
++static inline int is_master_tp(const struct tcp_sock *tp)
++{
++ return 0;
++}
++static inline void mptcp_purge_ofo_queue(struct tcp_sock *meta_tp) {}
++static inline void mptcp_cleanup_rbuf(const struct sock *meta_sk, int copied) {}
++static inline void mptcp_del_sock(const struct sock *sk) {}
++static inline void mptcp_reinject_data(struct sock *orig_sk, int clone_it) {}
++static inline void mptcp_update_sndbuf(const struct mptcp_cb *mpcb) {}
++static inline void mptcp_skb_entail_init(const struct tcp_sock *tp,
++ const struct sk_buff *skb) {}
++static inline void mptcp_clean_rtx_infinite(const struct sk_buff *skb,
++ const struct sock *sk) {}
++static inline void mptcp_retransmit_timer(const struct sock *meta_sk) {}
++static inline int mptcp_write_wakeup(struct sock *meta_sk)
++{
++ return 0;
++}
++static inline void mptcp_sub_close(struct sock *sk, unsigned long delay) {}
++static inline void mptcp_set_rto(const struct sock *sk) {}
++static inline void mptcp_send_fin(const struct sock *meta_sk) {}
++static inline void mptcp_parse_options(const uint8_t *ptr, const int opsize,
++ const struct tcp_options_received *opt_rx,
++ const struct mptcp_options_received *mopt,
++ const struct sk_buff *skb) {}
++static inline void mptcp_syn_options(struct sock *sk,
++ struct tcp_out_options *opts,
++ unsigned *remaining) {}
++static inline void mptcp_synack_options(struct request_sock *req,
++ struct tcp_out_options *opts,
++ unsigned *remaining) {}
++
++static inline void mptcp_established_options(struct sock *sk,
++ struct sk_buff *skb,
++ struct tcp_out_options *opts,
++ unsigned *size) {}
++static inline void mptcp_options_write(__be32 *ptr, struct tcp_sock *tp,
++ struct tcp_out_options *opts,
++ struct sk_buff *skb) {}
++static inline void mptcp_close(struct sock *meta_sk, long timeout) {}
++static inline int mptcp_doit(struct sock *sk)
++{
++ return 0;
++}
++static inline int mptcp_check_req_master(const struct sock *sk,
++ const struct sock *child,
++ struct request_sock *req,
++ struct request_sock **prev,
++ const struct mptcp_options_received *mopt)
++{
++ return 1;
++}
++static inline struct sock *mptcp_check_req_child(struct sock *sk,
++ struct sock *child,
++ struct request_sock *req,
++ struct request_sock **prev,
++ struct mptcp_options_received *mopt)
++{
++ return NULL;
++}
++static inline unsigned int mptcp_current_mss(struct sock *meta_sk)
++{
++ return 0;
++}
++static inline int mptcp_select_size(const struct sock *meta_sk, bool sg)
++{
++ return 0;
++}
++static inline void mptcp_sub_close_passive(struct sock *sk) {}
++static inline bool mptcp_fallback_infinite(const struct sock *sk, int flag)
++{
++ return false;
++}
++static inline void mptcp_init_mp_opt(const struct mptcp_options_received *mopt) {}
++static inline int mptcp_check_rtt(const struct tcp_sock *tp, int time)
++{
++ return 0;
++}
++static inline int mptcp_check_snd_buf(const struct tcp_sock *tp)
++{
++ return 0;
++}
++static inline int mptcp_sysctl_syn_retries(void)
++{
++ return 0;
++}
++static inline void mptcp_send_reset(const struct sock *sk) {}
++static inline void mptcp_send_active_reset(struct sock *meta_sk,
++ gfp_t priority) {}
++static inline int mptcp_write_xmit(struct sock *sk, unsigned int mss_now,
++ int nonagle, int push_one, gfp_t gfp)
++{
++ return 0;
++}
++static inline struct sock *mptcp_sk_clone(const struct sock *sk, int family,
++ const gfp_t priority)
++{
++ return NULL;
++}
++static inline int mptcp_handle_options(struct sock *sk,
++ const struct tcphdr *th,
++ struct sk_buff *skb)
++{
++ return 0;
++}
++static inline void mptcp_reset_mopt(struct tcp_sock *tp) {}
++static inline void __init mptcp_init(void) {}
++static inline int mptcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
++{
++ return 0;
++}
++static inline int mptcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
++ unsigned int mss_now, int reinject)
++{
++ return 0;
++}
++static inline int mptso_fragment(struct sock *sk, struct sk_buff *skb,
++ unsigned int len, unsigned int mss_now,
++ gfp_t gfp, int reinject)
++{
++ return 0;
++}
++static inline bool mptcp_sk_can_gso(const struct sock *sk)
++{
++ return false;
++}
++static inline bool mptcp_can_sg(const struct sock *meta_sk)
++{
++ return false;
++}
++static inline unsigned int mptcp_xmit_size_goal(struct sock *meta_sk,
++ u32 mss_now, int large_allowed)
++{
++ return 0;
++}
++static inline void mptcp_destroy_sock(struct sock *sk) {}
++static inline int mptcp_rcv_synsent_state_process(struct sock *sk,
++ struct sock **skptr,
++ struct sk_buff *skb,
++ struct mptcp_options_received *mopt)
++{
++ return 0;
++}
++static inline bool mptcp_can_sendpage(struct sock *sk)
++{
++ return false;
++}
++static inline int mptcp_time_wait(struct sock *sk, struct tcp_timewait_sock *tw)
++{
++ return 0;
++}
++static inline void mptcp_twsk_destructor(struct tcp_timewait_sock *tw) {}
++static inline void mptcp_update_tw_socks(const struct tcp_sock *tp, int state) {}
++static inline void mptcp_disconnect(struct sock *sk) {}
++static inline void mptcp_tsq_flags(struct sock *sk) {}
++static inline void mptcp_tsq_sub_deferred(struct sock *meta_sk) {}
++static inline void mptcp_hash_remove_bh(struct tcp_sock *meta_tp) {}
++static inline void mptcp_hash_remove(struct tcp_sock *meta_tp) {}
++static inline void mptcp_reqsk_new_mptcp(struct request_sock *req,
++ const struct tcp_options_received *rx_opt,
++ const struct mptcp_options_received *mopt,
++ const struct sk_buff *skb) {}
++static inline void mptcp_remove_shortcuts(const struct mptcp_cb *mpcb,
++ const struct sk_buff *skb) {}
++#endif /* CONFIG_MPTCP */
++
++#endif /* _MPTCP_H */
+diff --git a/include/net/mptcp_v4.h b/include/net/mptcp_v4.h
+new file mode 100644
+index 0000000..047884c
+--- /dev/null
++++ b/include/net/mptcp_v4.h
+@@ -0,0 +1,69 @@
++/*
++ * MPTCP implementation
++ *
++ * Initial Design & Implementation:
++ * Sébastien Barré <sebastien.barre@uclouvain.be>
++ *
++ * Current Maintainer & Author:
++ * Christoph Paasch <christoph.paasch@uclouvain.be>
++ *
++ * Additional authors:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ * Gregory Detal <gregory.detal@uclouvain.be>
++ * Fabien Duchêne <fabien.duchene@uclouvain.be>
++ * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de>
++ * Lavkesh Lahngir <lavkesh51@gmail.com>
++ * Andreas Ripke <ripke@neclab.eu>
++ * Vlad Dogaru <vlad.dogaru@intel.com>
++ * Octavian Purdila <octavian.purdila@intel.com>
++ * John Ronan <jronan@tssg.org>
++ * Catalin Nicutar <catalin.nicutar@gmail.com>
++ * Brandon Heller <brandonh@stanford.edu>
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef MPTCP_V4_H_
++#define MPTCP_V4_H_
++
++
++#include <linux/in.h>
++#include <linux/skbuff.h>
++#include <net/mptcp.h>
++#include <net/request_sock.h>
++#include <net/sock.h>
++
++extern struct request_sock_ops mptcp_request_sock_ops;
++
++#ifdef CONFIG_MPTCP
++
++int mptcp_v4_do_rcv(struct sock *meta_sk, struct sk_buff *skb);
++int mptcp_v4_rem_raddress(struct mptcp_cb *mpcb, u8 id);
++int mptcp_v4_add_raddress(struct mptcp_cb *mpcb, const struct in_addr *addr,
++ __be16 port, u8 id);
++void mptcp_v4_set_init_addr_bit(struct mptcp_cb *mpcb, __be32 daddr, int index);
++struct sock *mptcp_v4_search_req(const __be16 rport, const __be32 raddr,
++ const __be32 laddr, const struct net *net);
++int mptcp_init4_subsockets(struct sock *meta_sk, const struct mptcp_loc4 *loc,
++ struct mptcp_rem4 *rem);
++int mptcp_pm_v4_init(void);
++void mptcp_pm_v4_undo(void);
++u32 mptcp_v4_get_nonce(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
++ u32 seq);
++u64 mptcp_v4_get_key(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport);
++
++#else
++
++static inline int mptcp_v4_do_rcv(const struct sock *meta_sk,
++ const struct sk_buff *skb)
++{
++ return 0;
++}
++
++#endif /* CONFIG_MPTCP */
++
++#endif /* MPTCP_V4_H_ */
+diff --git a/include/net/mptcp_v6.h b/include/net/mptcp_v6.h
+new file mode 100644
+index 0000000..c303208
+--- /dev/null
++++ b/include/net/mptcp_v6.h
+@@ -0,0 +1,72 @@
++/*
++ * MPTCP implementation
++ *
++ * Initial Design & Implementation:
++ * Sébastien Barré <sebastien.barre@uclouvain.be>
++ *
++ * Current Maintainer & Author:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ *
++ * Additional authors:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ * Gregory Detal <gregory.detal@uclouvain.be>
++ * Fabien Duchêne <fabien.duchene@uclouvain.be>
++ * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de>
++ * Lavkesh Lahngir <lavkesh51@gmail.com>
++ * Andreas Ripke <ripke@neclab.eu>
++ * Vlad Dogaru <vlad.dogaru@intel.com>
++ * Octavian Purdila <octavian.purdila@intel.com>
++ * John Ronan <jronan@tssg.org>
++ * Catalin Nicutar <catalin.nicutar@gmail.com>
++ * Brandon Heller <brandonh@stanford.edu>
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef _MPTCP_V6_H
++#define _MPTCP_V6_H
++
++#include <linux/in6.h>
++#include <net/if_inet6.h>
++
++#include <net/mptcp.h>
++
++extern struct request_sock_ops mptcp6_request_sock_ops;
++extern struct proto mptcpv6_prot;
++
++#ifdef CONFIG_MPTCP
++
++int mptcp_v6_do_rcv(struct sock *meta_sk, struct sk_buff *skb);
++int mptcp_v6_rem_raddress(struct mptcp_cb *mpcb, u8 id);
++int mptcp_v6_add_raddress(struct mptcp_cb *mpcb, const struct in6_addr *addr,
++ __be16 port, u8 id);
++void mptcp_v6_set_init_addr_bit(struct mptcp_cb *mpcb,
++ const struct in6_addr *daddr, int index);
++struct sock *mptcp_v6_search_req(const __be16 rport, const struct in6_addr *raddr,
++ const struct in6_addr *laddr, const struct net *net);
++int mptcp_init6_subsockets(struct sock *meta_sk, const struct mptcp_loc6 *loc,
++ struct mptcp_rem6 *rem);
++int mptcp_pm_v6_init(void);
++void mptcp_pm_v6_undo(void);
++struct sock *mptcp_v6v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
++ struct request_sock *req,
++ struct dst_entry *dst);
++__u32 mptcp_v6_get_nonce(const __be32 *saddr, const __be32 *daddr,
++ __be16 sport, __be16 dport, u32 seq);
++u64 mptcp_v6_get_key(const __be32 *saddr, const __be32 *daddr,
++ __be16 sport, __be16 dport);
++
++#else /* CONFIG_MPTCP */
++
++static inline int mptcp_v6_do_rcv(struct sock *meta_sk, struct sk_buff *skb)
++{
++ return 0;
++}
++
++#endif /* CONFIG_MPTCP */
++
++#endif /* _MPTCP_V6_H */
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 991dcd9..6297c97 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -15,6 +15,7 @@
+ #include <net/netns/packet.h>
+ #include <net/netns/ipv4.h>
+ #include <net/netns/ipv6.h>
++#include <net/netns/mptcp.h>
+ #include <net/netns/sctp.h>
+ #include <net/netns/dccp.h>
+ #include <net/netns/netfilter.h>
+@@ -90,6 +91,9 @@ struct net {
+ #if IS_ENABLED(CONFIG_IPV6)
+ struct netns_ipv6 ipv6;
+ #endif
++#if IS_ENABLED(CONFIG_MPTCP)
++ struct netns_mptcp mptcp;
++#endif
+ #if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
+ struct netns_sctp sctp;
+ #endif
+diff --git a/include/net/netns/mptcp.h b/include/net/netns/mptcp.h
+new file mode 100644
+index 0000000..bad418b
+--- /dev/null
++++ b/include/net/netns/mptcp.h
+@@ -0,0 +1,44 @@
++/*
++ * MPTCP implementation - MPTCP namespace
++ *
++ * Initial Design & Implementation:
++ * Sébastien Barré <sebastien.barre@uclouvain.be>
++ *
++ * Current Maintainer:
++ * Christoph Paasch <christoph.paasch@uclouvain.be>
++ *
++ * Additional authors:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ * Gregory Detal <gregory.detal@uclouvain.be>
++ * Fabien Duchêne <fabien.duchene@uclouvain.be>
++ * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de>
++ * Lavkesh Lahngir <lavkesh51@gmail.com>
++ * Andreas Ripke <ripke@neclab.eu>
++ * Vlad Dogaru <vlad.dogaru@intel.com>
++ * Octavian Purdila <octavian.purdila@intel.com>
++ * John Ronan <jronan@tssg.org>
++ * Catalin Nicutar <catalin.nicutar@gmail.com>
++ * Brandon Heller <brandonh@stanford.edu>
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#ifndef __NETNS_MPTCP_H__
++#define __NETNS_MPTCP_H__
++
++#include <linux/compiler.h>
++
++enum {
++ MPTCP_PM_FULLMESH = 0,
++ MPTCP_PM_MAX
++};
++
++struct netns_mptcp {
++ void *path_managers[MPTCP_PM_MAX];
++};
++
++#endif /* __NETNS_MPTCP_H__ */
+diff --git a/include/net/request_sock.h b/include/net/request_sock.h
+index 7f830ff..e79e87a 100644
+--- a/include/net/request_sock.h
++++ b/include/net/request_sock.h
+@@ -164,7 +164,7 @@ struct request_sock_queue {
+ };
+
+ int reqsk_queue_alloc(struct request_sock_queue *queue,
+- unsigned int nr_table_entries);
++ unsigned int nr_table_entries, gfp_t flags);
+
+ void __reqsk_queue_destroy(struct request_sock_queue *queue);
+ void reqsk_queue_destroy(struct request_sock_queue *queue);
+diff --git a/include/net/sock.h b/include/net/sock.h
+index b9586a1..09a682e 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -899,6 +899,16 @@ void sk_clear_memalloc(struct sock *sk);
+
+ int sk_wait_data(struct sock *sk, long *timeo);
+
++/* START - needed for MPTCP */
++extern void sock_def_error_report(struct sock *sk);
++extern struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
++ int family);
++extern void sock_lock_init(struct sock *sk);
++
++extern struct lock_class_key af_callback_keys[AF_MAX];
++extern char *const af_family_clock_key_strings[AF_MAX+1];
++/* END - needed for MPTCP */
++
+ struct request_sock_ops;
+ struct timewait_sock_ops;
+ struct inet_hashinfo;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 743acce..db0cc04 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -176,6 +176,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
+ #define TCPOPT_SACK 5 /* SACK Block */
+ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
+ #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
++#define TCPOPT_MPTCP 30
+ #define TCPOPT_EXP 254 /* Experimental */
+ /* Magic number to be after the option value for sharing TCP
+ * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
+@@ -234,6 +235,27 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
+ */
+ #define TFO_SERVER_ALWAYS 0x1000
+
++/* Flags from tcp_input.c for tcp_ack */
++#define FLAG_DATA 0x01 /* Incoming frame contained data. */
++#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
++#define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */
++#define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */
++#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
++#define FLAG_DATA_SACKED 0x20 /* New SACK. */
++#define FLAG_ECE 0x40 /* ECE in this ACK */
++#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
++#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
++#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
++#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
++#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
++#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
++#define MPTCP_FLAG_DATA_ACKED 0x8000
++
++#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
++#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
++#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
++#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
++
+ extern struct inet_timewait_death_row tcp_death_row;
+
+ /* sysctl variables for tcp */
+@@ -349,6 +371,112 @@ extern struct proto tcp_prot;
+ #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
+ #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
+
++/**** START - Exports needed for MPTCP ****/
++extern const struct inet_connection_sock_af_ops ipv4_specific;
++extern const struct inet_connection_sock_af_ops ipv6_specific;
++extern const struct inet_connection_sock_af_ops ipv6_mapped;
++extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
++extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
++
++struct mptcp_options_received;
++
++int tcp_close_state(struct sock *sk);
++void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle, int
++ size_goal);
++void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
++ const struct sk_buff *skb);
++int tcp_xmit_probe_skb(struct sock *sk, int urgent);
++void tcp_cwnd_validate(struct sock *sk);
++void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb);
++int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
++ gfp_t gfp_mask);
++unsigned int tcp_mss_split_point(const struct sock *sk,
++ const struct sk_buff *skb,
++ unsigned int mss_now,
++ unsigned int max_segs,
++ int nonagle);
++bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb);
++bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
++ unsigned int cur_mss, int nonagle);
++bool tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
++ unsigned int cur_mss);
++unsigned int tcp_cwnd_test(const struct tcp_sock *tp, const struct sk_buff *skb);
++int tcp_mtu_probe(struct sock *sk);
++int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
++ unsigned int mss_now);
++void __pskb_trim_head(struct sk_buff *skb, int len);
++void tcp_queue_skb(struct sock *sk, struct sk_buff *skb);
++void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags);
++void tcp_reset(struct sock *sk);
++bool tcp_may_update_window(const struct tcp_sock *tp, const u32 ack,
++ const u32 ack_seq, const u32 nwin);
++bool tcp_urg_mode(const struct tcp_sock *tp);
++void tcp_ack_probe(struct sock *sk);
++void tcp_rearm_rto(struct sock *sk);
++int tcp_write_timeout(struct sock *sk);
++bool retransmits_timed_out(struct sock *sk, unsigned int boundary,
++ unsigned int timeout, bool syn_set);
++void tcp_write_err(struct sock *sk);
++void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr);
++void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
++ unsigned int mss_now);
++
++int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req);
++void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
++ struct request_sock *req);
++__u32 tcp_v4_init_sequence(const struct sk_buff *skb);
++int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
++ struct request_sock *req,
++ u16 queue_mapping);
++void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb);
++struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb);
++struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb);
++void tcp_v4_reqsk_destructor(struct request_sock *req);
++
++int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req);
++void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
++ struct request_sock *req);
++__u32 tcp_v6_init_sequence(const struct sk_buff *skb);
++int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
++ struct flowi6 *fl6, struct request_sock *req,
++ u16 queue_mapping);
++void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
++int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
++int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
++void tcp_v6_destroy_sock(struct sock *sk);
++void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
++void tcp_v6_hash(struct sock *sk);
++struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb);
++struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
++ struct request_sock *req,
++ struct dst_entry *dst);
++void tcp_v6_reqsk_destructor(struct request_sock *req);
++
++void sock_valbool_flag(struct sock *sk, int bit, int valbool);
++unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
++ int large_allowed);
++u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb);
++
++void skb_clone_fraglist(struct sk_buff *skb);
++void copy_skb_header(struct sk_buff *new, const struct sk_buff *old);
++
++void inet_twsk_free(struct inet_timewait_sock *tw);
++/* These states need RST on ABORT according to RFC793 */
++static inline bool tcp_need_reset(int state)
++{
++ return (1 << state) &
++ (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
++ TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
++}
++
++bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
++ int hlen);
++int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
++ bool *fragstolen);
++bool tcp_try_coalesce(struct sock *sk, struct sk_buff *to,
++ struct sk_buff *from, bool *fragstolen);
++/**** END - Exports needed for MPTCP ****/
++
+ void tcp_tasklet_init(void);
+
+ void tcp_v4_err(struct sk_buff *skb, u32);
+@@ -445,6 +573,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ size_t len, int nonblock, int flags, int *addr_len);
+ void tcp_parse_options(const struct sk_buff *skb,
+ struct tcp_options_received *opt_rx,
++ struct mptcp_options_received *mopt_rx,
+ int estab, struct tcp_fastopen_cookie *foc);
+ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
+
+@@ -557,11 +686,15 @@ void tcp_send_delayed_ack(struct sock *sk);
+ void tcp_send_loss_probe(struct sock *sk);
+ bool tcp_schedule_loss_probe(struct sock *sk);
+
++u16 tcp_select_window(struct sock *sk);
++
+ /* tcp_input.c */
+ void tcp_cwnd_application_limited(struct sock *sk);
+ void tcp_resume_early_retransmit(struct sock *sk);
+ void tcp_rearm_rto(struct sock *sk);
+ void tcp_reset(struct sock *sk);
++void tcp_set_rto(struct sock *sk);
++bool tcp_should_expand_sndbuf(const struct sock *sk);
+
+ /* tcp_timer.c */
+ void tcp_init_xmit_timers(struct sock *);
+@@ -705,14 +838,24 @@ void tcp_send_window_probe(struct sock *sk);
+ */
+ struct tcp_skb_cb {
+ union {
+- struct inet_skb_parm h4;
++ union {
++ struct inet_skb_parm h4;
+ #if IS_ENABLED(CONFIG_IPV6)
+- struct inet6_skb_parm h6;
++ struct inet6_skb_parm h6;
+ #endif
+- } header; /* For incoming frames */
++ } header; /* For incoming frames */
++#ifdef CONFIG_MPTCP
++ __u32 path_mask; /* path indices that tried to send this skb */
++#endif
++ };
+ __u32 seq; /* Starting sequence number */
+ __u32 end_seq; /* SEQ + FIN + SYN + datalen */
+ __u32 when; /* used to compute rtt's */
++#ifdef CONFIG_MPTCP
++ __u8 mptcp_flags; /* flags for the MPTCP layer */
++ __u8 dss_off; /* Number of 4-byte words until
++ * seq-number */
++#endif
+ __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
+
+ __u8 sacked; /* State flags for SACK/FACK. */
+@@ -1058,7 +1201,8 @@ u32 tcp_default_init_rwnd(u32 mss);
+ /* Determine a window scaling and initial window to offer. */
+ void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
+ __u32 *window_clamp, int wscale_ok,
+- __u8 *rcv_wscale, __u32 init_rcv_wnd);
++ __u8 *rcv_wscale, __u32 init_rcv_wnd,
++ const struct sock *sk);
+
+ static inline int tcp_win_from_space(int space)
+ {
+@@ -1070,12 +1214,18 @@ static inline int tcp_win_from_space(int space)
+ /* Note: caller must be prepared to deal with negative returns */
+ static inline int tcp_space(const struct sock *sk)
+ {
++ if (tcp_sk(sk)->mpc)
++ sk = tcp_sk(sk)->meta_sk;
++
+ return tcp_win_from_space(sk->sk_rcvbuf -
+ atomic_read(&sk->sk_rmem_alloc));
+ }
+
+ static inline int tcp_full_space(const struct sock *sk)
+ {
++ if (tcp_sk(sk)->mpc)
++ sk = tcp_sk(sk)->meta_sk;
++
+ return tcp_win_from_space(sk->sk_rcvbuf);
+ }
+
+@@ -1090,6 +1240,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
+ tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
+ tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+ tcp_rsk(req)->snt_synack = 0;
++ tcp_rsk(req)->saw_mpc = 0;
+ req->mss = rx_opt->mss_clamp;
+ req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
+ ireq->tstamp_ok = rx_opt->tstamp_ok;
+diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
+index d758163..3d81e49 100644
+--- a/include/uapi/linux/if.h
++++ b/include/uapi/linux/if.h
+@@ -53,6 +53,9 @@
+
+ #define IFF_ECHO 0x40000 /* echo sent packets */
+
++#define IFF_NOMULTIPATH 0x80000 /* Disable for MPTCP */
++#define IFF_MPBACKUP 0x100000 /* Use as backup path for MPTCP */
++
+ #define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
+ IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
+
+diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
+index 377f1e5..2ffcb03 100644
+--- a/include/uapi/linux/tcp.h
++++ b/include/uapi/linux/tcp.h
+@@ -112,6 +112,7 @@ enum {
+ #define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */
+ #define TCP_TIMESTAMP 24
+ #define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */
++#define MPTCP_ENABLED 26
+
+ struct tcp_repair_opt {
+ __u32 opt_code;
+diff --git a/net/Kconfig b/net/Kconfig
+index e411046..3e4b278 100644
+--- a/net/Kconfig
++++ b/net/Kconfig
+@@ -79,6 +79,7 @@ if INET
+ source "net/ipv4/Kconfig"
+ source "net/ipv6/Kconfig"
+ source "net/netlabel/Kconfig"
++source "net/mptcp/Kconfig"
+
+ endif # if INET
+
+diff --git a/net/Makefile b/net/Makefile
+index cbbbe6d..244bac1 100644
+--- a/net/Makefile
++++ b/net/Makefile
+@@ -20,6 +20,7 @@ obj-$(CONFIG_INET) += ipv4/
+ obj-$(CONFIG_XFRM) += xfrm/
+ obj-$(CONFIG_UNIX) += unix/
+ obj-$(CONFIG_NET) += ipv6/
++obj-$(CONFIG_MPTCP) += mptcp/
+ obj-$(CONFIG_PACKET) += packet/
+ obj-$(CONFIG_NET_KEY) += key/
+ obj-$(CONFIG_BRIDGE) += bridge/
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 45fa2f1..3cfdbc0 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5271,7 +5271,7 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
+
+ dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
+ IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
+- IFF_AUTOMEDIA)) |
++ IFF_AUTOMEDIA | IFF_NOMULTIPATH | IFF_MPBACKUP)) |
+ (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
+ IFF_ALLMULTI));
+
+diff --git a/net/core/request_sock.c b/net/core/request_sock.c
+index 4425148..e128f08 100644
+--- a/net/core/request_sock.c
++++ b/net/core/request_sock.c
+@@ -38,7 +38,8 @@ int sysctl_max_syn_backlog = 256;
+ EXPORT_SYMBOL(sysctl_max_syn_backlog);
+
+ int reqsk_queue_alloc(struct request_sock_queue *queue,
+- unsigned int nr_table_entries)
++ unsigned int nr_table_entries,
++ gfp_t flags)
+ {
+ size_t lopt_size = sizeof(struct listen_sock);
+ struct listen_sock *lopt;
+@@ -48,9 +49,11 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
+ nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
+ lopt_size += nr_table_entries * sizeof(struct request_sock *);
+ if (lopt_size > PAGE_SIZE)
+- lopt = vzalloc(lopt_size);
++ lopt = __vmalloc(lopt_size,
++ flags | __GFP_HIGHMEM | __GFP_ZERO,
++ PAGE_KERNEL);
+ else
+- lopt = kzalloc(lopt_size, GFP_KERNEL);
++ lopt = kzalloc(lopt_size, flags);
+ if (lopt == NULL)
+ return -ENOMEM;
+
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 90b96a1..2564d89 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -472,7 +472,7 @@ static inline void skb_drop_fraglist(struct sk_buff *skb)
+ skb_drop_list(&skb_shinfo(skb)->frag_list);
+ }
+
+-static void skb_clone_fraglist(struct sk_buff *skb)
++void skb_clone_fraglist(struct sk_buff *skb)
+ {
+ struct sk_buff *list;
+
+@@ -894,7 +894,7 @@ static void skb_headers_offset_update(struct sk_buff *skb, int off)
+ skb->inner_mac_header += off;
+ }
+
+-static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
++void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
+ {
+ __copy_skb_header(new, old);
+
+diff --git a/net/core/sock.c b/net/core/sock.c
+index c0fc6bd..7314971 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -231,7 +231,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
+ "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
+ "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
+ };
+-static const char *const af_family_clock_key_strings[AF_MAX+1] = {
++char *const af_family_clock_key_strings[AF_MAX+1] = {
+ "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
+ "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
+ "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
+@@ -252,7 +252,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
+ * sk_callback_lock locking rules are per-address-family,
+ * so split the lock classes by using a per-AF key:
+ */
+-static struct lock_class_key af_callback_keys[AF_MAX];
++struct lock_class_key af_callback_keys[AF_MAX];
+
+ /* Take into consideration the size of the struct sk_buff overhead in the
+ * determination of these values, since that is non-constant across
+@@ -602,7 +602,7 @@ out:
+ return ret;
+ }
+
+-static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
++void sock_valbool_flag(struct sock *sk, int bit, int valbool)
+ {
+ if (valbool)
+ sock_set_flag(sk, bit);
+@@ -1204,7 +1204,7 @@ lenout:
+ *
+ * (We also register the sk_lock with the lock validator.)
+ */
+-static inline void sock_lock_init(struct sock *sk)
++void sock_lock_init(struct sock *sk)
+ {
+ sock_lock_init_class_and_name(sk,
+ af_family_slock_key_strings[sk->sk_family],
+@@ -1252,7 +1252,7 @@ void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
+ }
+ EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
+
+-static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
++struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
+ int family)
+ {
+ struct sock *sk;
+@@ -2184,7 +2184,7 @@ static void sock_def_wakeup(struct sock *sk)
+ rcu_read_unlock();
+ }
+
+-static void sock_def_error_report(struct sock *sk)
++void sock_def_error_report(struct sock *sk)
+ {
+ struct socket_wq *wq;
+
+diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
+index 05c57f0..630434d 100644
+--- a/net/ipv4/Kconfig
++++ b/net/ipv4/Kconfig
+@@ -556,6 +556,30 @@ config TCP_CONG_ILLINOIS
+ For further details see:
+ http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
+
++config TCP_CONG_COUPLED
++ tristate "MPTCP COUPLED CONGESTION CONTROL"
++ depends on MPTCP
++ default n
++ ---help---
++ MultiPath TCP Coupled Congestion Control
++ To enable it, just put 'coupled' in tcp_congestion_control
++
++config TCP_CONG_OLIA
++ tristate "MPTCP Opportunistic Linked Increase"
++ depends on MPTCP
++ default n
++ ---help---
++ MultiPath TCP Opportunistic Linked Increase Congestion Control
++ To enable it, just put 'olia' in tcp_congestion_control
++
++config TCP_CONG_WVEGAS
++ tristate "MPTCP WVEGAS CONGESTION CONTROL"
++ depends on MPTCP
++ default n
++ ---help---
++ wVegas congestion control for MPTCP
++ To enable it, just put 'wvegas' in tcp_congestion_control
++
+ choice
+ prompt "Default TCP congestion control"
+ default DEFAULT_CUBIC
+@@ -584,6 +608,15 @@ choice
+ config DEFAULT_WESTWOOD
+ bool "Westwood" if TCP_CONG_WESTWOOD=y
+
++ config DEFAULT_COUPLED
++ bool "Coupled" if TCP_CONG_COUPLED=y
++
++ config DEFAULT_OLIA
++ bool "Olia" if TCP_CONG_OLIA=y
++
++ config DEFAULT_WVEGAS
++ bool "Wvegas" if TCP_CONG_WVEGAS=y
++
+ config DEFAULT_RENO
+ bool "Reno"
+
+@@ -605,6 +638,8 @@ config DEFAULT_TCP_CONG
+ default "vegas" if DEFAULT_VEGAS
+ default "westwood" if DEFAULT_WESTWOOD
+ default "veno" if DEFAULT_VENO
++ default "coupled" if DEFAULT_COUPLED
++ default "wvegas" if DEFAULT_WVEGAS
+ default "reno" if DEFAULT_RENO
+ default "cubic"
+
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 19ab78a..567918a 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -104,6 +104,7 @@
+ #include <net/ip_fib.h>
+ #include <net/inet_connection_sock.h>
+ #include <net/tcp.h>
++#include <net/mptcp.h>
+ #include <net/udp.h>
+ #include <net/udplite.h>
+ #include <net/ping.h>
+@@ -246,8 +247,7 @@ EXPORT_SYMBOL(inet_listen);
+ * Create an inet socket.
+ */
+
+-static int inet_create(struct net *net, struct socket *sock, int protocol,
+- int kern)
++int inet_create(struct net *net, struct socket *sock, int protocol, int kern)
+ {
+ struct sock *sk;
+ struct inet_protosw *answer;
+@@ -679,6 +679,23 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
+ lock_sock(sk2);
+
+ sock_rps_record_flow(sk2);
++
++ if (sk2->sk_protocol == IPPROTO_TCP && tcp_sk(sk2)->mpc) {
++ struct sock *sk_it = sk2;
++
++ mptcp_for_each_sk(tcp_sk(sk2)->mpcb, sk_it)
++ sock_rps_record_flow(sk_it);
++
++ if (tcp_sk(sk2)->mpcb->master_sk) {
++ sk_it = tcp_sk(sk2)->mpcb->master_sk;
++
++ write_lock_bh(&sk_it->sk_callback_lock);
++ sk_it->sk_wq = newsock->wq;
++ sk_it->sk_socket = newsock;
++ write_unlock_bh(&sk_it->sk_callback_lock);
++ }
++ }
++
+ WARN_ON(!((1 << sk2->sk_state) &
+ (TCPF_ESTABLISHED | TCPF_SYN_RECV |
+ TCPF_CLOSE_WAIT | TCPF_CLOSE)));
+@@ -1767,6 +1784,9 @@ static int __init inet_init(void)
+
+ ip_init();
+
++ /* We must initialize MPTCP before TCP. */
++ mptcp_init();
++
+ tcp_v4_init();
+
+ /* Setup TCP slab cache for open requests. */
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 0d1e2cb..423dfb6 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -23,6 +23,7 @@
+ #include <net/route.h>
+ #include <net/tcp_states.h>
+ #include <net/xfrm.h>
++#include <net/mptcp.h>
+
+ #ifdef INET_CSK_DEBUG
+ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
+@@ -468,8 +469,8 @@ no_route:
+ }
+ EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
+
+-static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
+- const u32 rnd, const u32 synq_hsize)
++u32 inet_synq_hash(const __be32 raddr, const __be16 rport, const u32 rnd,
++ const u32 synq_hsize)
+ {
+ return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
+ }
+@@ -667,7 +668,12 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
+ const struct request_sock *req,
+ const gfp_t priority)
+ {
+- struct sock *newsk = sk_clone_lock(sk, priority);
++ struct sock *newsk;
++
++ if (sk->sk_protocol == IPPROTO_TCP && tcp_sk(sk)->mpc)
++ newsk = mptcp_sk_clone(sk, req->rsk_ops->family, priority);
++ else
++ newsk = sk_clone_lock(sk, priority);
+
+ if (newsk != NULL) {
+ struct inet_connection_sock *newicsk = inet_csk(newsk);
+@@ -744,7 +750,8 @@ int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
+ {
+ struct inet_sock *inet = inet_sk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
+- int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
++ int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries,
++ GFP_KERNEL);
+
+ if (rc != 0)
+ return rc;
+@@ -802,9 +809,14 @@ void inet_csk_listen_stop(struct sock *sk)
+
+ while ((req = acc_req) != NULL) {
+ struct sock *child = req->sk;
++ bool mutex_taken = false;
+
+ acc_req = req->dl_next;
+
++ if (is_meta_sk(child)) {
++ mutex_lock(&tcp_sk(child)->mpcb->mpcb_mutex);
++ mutex_taken = true;
++ }
+ local_bh_disable();
+ bh_lock_sock(child);
+ WARN_ON(sock_owned_by_user(child));
+@@ -833,6 +845,8 @@ void inet_csk_listen_stop(struct sock *sk)
+
+ bh_unlock_sock(child);
+ local_bh_enable();
++ if (mutex_taken)
++ mutex_unlock(&tcp_sk(child)->mpcb->mpcb_mutex);
+ sock_put(child);
+
+ sk_acceptq_removed(sk);
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index f2ed13c..f08addc 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -284,7 +284,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+
+ /* check for timestamp cookie support */
+ memset(&tcp_opt, 0, sizeof(tcp_opt));
+- tcp_parse_options(skb, &tcp_opt, 0, NULL);
++ tcp_parse_options(skb, &tcp_opt, NULL, 0, NULL);
+
+ if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
+ goto out;
+@@ -354,10 +354,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+ /* Try to redo what tcp_v4_send_synack did. */
+ req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
+
+- tcp_select_initial_window(tcp_full_space(sk), req->mss,
++ tp->select_initial_window(tcp_full_space(sk), req->mss,
+ &req->rcv_wnd, &req->window_clamp,
+ ireq->wscale_ok, &rcv_wscale,
+- dst_metric(&rt->dst, RTAX_INITRWND));
++ dst_metric(&rt->dst, RTAX_INITRWND), sk);
+
+ ireq->rcv_wscale = rcv_wscale;
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 97c8f56..be72a40 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -271,6 +271,7 @@
+
+ #include <net/icmp.h>
+ #include <net/inet_common.h>
++#include <net/mptcp.h>
+ #include <net/tcp.h>
+ #include <net/xfrm.h>
+ #include <net/ip.h>
+@@ -419,6 +420,9 @@ void tcp_init_sock(struct sock *sk)
+ sk->sk_sndbuf = sysctl_tcp_wmem[1];
+ sk->sk_rcvbuf = sysctl_tcp_rmem[1];
+
++ /* Set function pointers in tcp_sock to tcp functions. */
++ mptcp_init_tcp_sock(tp);
++
+ local_bh_disable();
+ sock_update_memcg(sk);
+ sk_sockets_allocated_inc(sk);
+@@ -607,6 +611,8 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
+ tcb->seq = tcb->end_seq = tp->write_seq;
+ tcb->tcp_flags = TCPHDR_ACK;
+ tcb->sacked = 0;
++ if (tp->mpc)
++ mptcp_skb_entail_init(tp, skb);
+ skb_header_release(skb);
+ tcp_add_write_queue_tail(sk, skb);
+ sk->sk_wmem_queued += skb->truesize;
+@@ -640,8 +646,8 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
+ atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
+ }
+
+-static void tcp_push(struct sock *sk, int flags, int mss_now,
+- int nonagle, int size_goal)
++void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
++ int size_goal)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+@@ -726,6 +732,14 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
+ int ret;
+
+ sock_rps_record_flow(sk);
++
++#ifdef CONFIG_MPTCP
++ if (tcp_sk(sk)->mpc) {
++ struct sock *sk_it;
++ mptcp_for_each_sk(tcp_sk(sk)->mpcb, sk_it)
++ sock_rps_record_flow(sk_it);
++ }
++#endif
+ /*
+ * We can't seek on a socket input
+ */
+@@ -821,8 +835,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+ return NULL;
+ }
+
+-static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
+- int large_allowed)
++unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, int large_allowed)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 xmit_size_goal, old_size_goal;
+@@ -872,8 +885,13 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
+ {
+ int mss_now;
+
+- mss_now = tcp_current_mss(sk);
+- *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
++ if (tcp_sk(sk)->mpc) {
++ mss_now = mptcp_current_mss(sk);
++ *size_goal = mptcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
++ } else {
++ mss_now = tcp_current_mss(sk);
++ *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
++ }
+
+ return mss_now;
+ }
+@@ -897,6 +915,26 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
+ goto out_err;
+ }
+
++ if (tp->mpc) {
++ struct sock *sk_it = sk;
++
++ /* We must check this with socket-lock hold because we iterate
++ * over the subflows.
++ */
++ if (!mptcp_can_sendpage(sk)) {
++ ssize_t ret;
++
++ release_sock(sk);
++ ret = sock_no_sendpage(sk->sk_socket, page, offset,
++ size, flags);
++ lock_sock(sk);
++ return ret;
++ }
++
++ mptcp_for_each_sk(tp->mpcb, sk_it)
++ sock_rps_record_flow(sk_it);
++ }
++
+ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+
+ mss_now = tcp_send_mss(sk, &size_goal, flags);
+@@ -1001,8 +1039,9 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
+ {
+ ssize_t res;
+
+- if (!(sk->sk_route_caps & NETIF_F_SG) ||
+- !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
++ /* If MPTCP is enabled, we check it later after establishment */
++ if (!tcp_sk(sk)->mpc && (!(sk->sk_route_caps & NETIF_F_SG) ||
++ !(sk->sk_route_caps & NETIF_F_ALL_CSUM)))
+ return sock_no_sendpage(sk->sk_socket, page, offset, size,
+ flags);
+
+@@ -1018,6 +1057,9 @@ static inline int select_size(const struct sock *sk, bool sg)
+ const struct tcp_sock *tp = tcp_sk(sk);
+ int tmp = tp->mss_cache;
+
++ if (tp->mpc)
++ return mptcp_select_size(sk, sg);
++
+ if (sg) {
+ if (sk_can_gso(sk)) {
+ /* Small frames wont use a full page:
+@@ -1105,6 +1147,12 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ goto do_error;
+ }
+
++ if (tp->mpc) {
++ struct sock *sk_it = sk;
++ mptcp_for_each_sk(tp->mpcb, sk_it)
++ sock_rps_record_flow(sk_it);
++ }
++
+ if (unlikely(tp->repair)) {
+ if (tp->repair_queue == TCP_RECV_QUEUE) {
+ copied = tcp_send_rcvq(sk, msg, size);
+@@ -1132,7 +1180,10 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+ goto out_err;
+
+- sg = !!(sk->sk_route_caps & NETIF_F_SG);
++ if (tp->mpc)
++ sg = mptcp_can_sg(sk);
++ else
++ sg = !!(sk->sk_route_caps & NETIF_F_SG);
+
+ while (--iovlen >= 0) {
+ size_t seglen = iov->iov_len;
+@@ -1183,8 +1234,15 @@ new_segment:
+
+ /*
+ * Check whether we can use HW checksum.
++ *
++ * If dss-csum is enabled, we do not do hw-csum.
++ * In case of non-mptcp we check the
++ * device-capabilities.
++ * In case of mptcp, hw-csum's will be handled
++ * later in mptcp_write_xmit.
+ */
+- if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
++ if (((tp->mpc && !tp->mpcb->dss_csum) || !tp->mpc) &&
++ (tp->mpc || sk->sk_route_caps & NETIF_F_ALL_CSUM))
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ skb_entail(sk, skb);
+@@ -1385,6 +1443,11 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
+
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+
++ if (is_meta_sk(sk)) {
++ mptcp_cleanup_rbuf(sk, copied);
++ return;
++ }
++
+ WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
+ "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+ tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
+@@ -1421,7 +1484,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
+
+ /* Optimize, __tcp_select_window() is not cheap. */
+ if (2*rcv_window_now <= tp->window_clamp) {
+- __u32 new_window = __tcp_select_window(sk);
++ __u32 new_window = tp->__select_window(sk);
+
+ /* Send ACK now, if this read freed lots of space
+ * in our buffer. Certainly, new_window is new window.
+@@ -1622,6 +1685,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+
+ lock_sock(sk);
+
++#ifdef CONFIG_MPTCP
++ if (tp->mpc) {
++ struct sock *sk_it;
++ mptcp_for_each_sk(tp->mpcb, sk_it)
++ sock_rps_record_flow(sk_it);
++ }
++#endif
++
+ err = -ENOTCONN;
+ if (sk->sk_state == TCP_LISTEN)
+ goto out;
+@@ -2069,7 +2140,7 @@ static const unsigned char new_state[16] = {
+ /* TCP_CLOSING */ TCP_CLOSING,
+ };
+
+-static int tcp_close_state(struct sock *sk)
++int tcp_close_state(struct sock *sk)
+ {
+ int next = (int)new_state[sk->sk_state];
+ int ns = next & TCP_STATE_MASK;
+@@ -2098,8 +2169,12 @@ void tcp_shutdown(struct sock *sk, int how)
+ (TCPF_ESTABLISHED | TCPF_SYN_SENT |
+ TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
+ /* Clear out any half completed packets. FIN if needed. */
+- if (tcp_close_state(sk))
+- tcp_send_fin(sk);
++ if (tcp_close_state(sk)) {
++ if (!is_meta_sk(sk))
++ tcp_send_fin(sk);
++ else
++ mptcp_send_fin(sk);
++ }
+ }
+ }
+ EXPORT_SYMBOL(tcp_shutdown);
+@@ -2124,6 +2199,11 @@ void tcp_close(struct sock *sk, long timeout)
+ int data_was_unread = 0;
+ int state;
+
++ if (is_meta_sk(sk)) {
++ mptcp_close(sk, timeout);
++ return;
++ }
++
+ lock_sock(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+@@ -2290,15 +2370,6 @@ out:
+ }
+ EXPORT_SYMBOL(tcp_close);
+
+-/* These states need RST on ABORT according to RFC793 */
+-
+-static inline bool tcp_need_reset(int state)
+-{
+- return (1 << state) &
+- (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
+- TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
+-}
+-
+ int tcp_disconnect(struct sock *sk, int flags)
+ {
+ struct inet_sock *inet = inet_sk(sk);
+@@ -2339,6 +2410,13 @@ int tcp_disconnect(struct sock *sk, int flags)
+ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
+ inet_reset_saddr(sk);
+
++ if (is_meta_sk(sk)) {
++ mptcp_disconnect(sk);
++ } else {
++ if (tp->inside_tk_table)
++ mptcp_hash_remove_bh(tp);
++ }
++
+ sk->sk_shutdown = 0;
+ sock_reset_flag(sk, SOCK_DONE);
+ tp->srtt = 0;
+@@ -2698,6 +2776,18 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ tp->notsent_lowat = val;
+ sk->sk_write_space(sk);
+ break;
++#ifdef CONFIG_MPTCP
++ case MPTCP_ENABLED:
++ if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_LISTEN) {
++ if (val)
++ tp->mptcp_enabled = 1;
++ else
++ tp->mptcp_enabled = 0;
++ } else {
++ err = -EPERM;
++ }
++ break;
++#endif
+ default:
+ err = -ENOPROTOOPT;
+ break;
+@@ -2917,6 +3007,11 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
+ case TCP_NOTSENT_LOWAT:
+ val = tp->notsent_lowat;
+ break;
++#ifdef CONFIG_MPTCP
++ case MPTCP_ENABLED:
++ val = tp->mptcp_enabled;
++ break;
++#endif
+ default:
+ return -ENOPROTOOPT;
+ }
+@@ -3106,8 +3201,11 @@ void tcp_done(struct sock *sk)
+ if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
+ TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
+
++ WARN_ON(sk->sk_state == TCP_CLOSE);
+ tcp_set_state(sk, TCP_CLOSE);
++
+ tcp_clear_xmit_timers(sk);
++
+ if (req != NULL)
+ reqsk_fastopen_remove(sk, req, false);
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index eeaac39..cb06531 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -74,6 +74,9 @@
+ #include <linux/ipsec.h>
+ #include <asm/unaligned.h>
+ #include <net/netdma.h>
++#include <net/mptcp.h>
++#include <net/mptcp_v4.h>
++#include <net/mptcp_v6.h>
+
+ int sysctl_tcp_timestamps __read_mostly = 1;
+ int sysctl_tcp_window_scaling __read_mostly = 1;
+@@ -99,25 +102,6 @@ int sysctl_tcp_thin_dupack __read_mostly;
+ int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
+ int sysctl_tcp_early_retrans __read_mostly = 3;
+
+-#define FLAG_DATA 0x01 /* Incoming frame contained data. */
+-#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
+-#define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */
+-#define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */
+-#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
+-#define FLAG_DATA_SACKED 0x20 /* New SACK. */
+-#define FLAG_ECE 0x40 /* ECE in this ACK */
+-#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
+-#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
+-#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
+-#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
+-#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
+-#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
+-
+-#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
+-#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
+-#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
+-#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
+-
+ #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
+ #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
+
+@@ -283,8 +267,12 @@ static void tcp_sndbuf_expand(struct sock *sk)
+ per_mss = roundup_pow_of_two(per_mss) +
+ SKB_DATA_ALIGN(sizeof(struct sk_buff));
+
+- nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
+- nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
++ if (tp->mpc) {
++ nr_segs = mptcp_check_snd_buf(tp);
++ } else {
++ nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
++ nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
++ }
+
+ /* Fast Recovery (RFC 5681 3.2) :
+ * Cubic needs 1.7 factor, rounded to 2 to include
+@@ -292,8 +280,16 @@ static void tcp_sndbuf_expand(struct sock *sk)
+ */
+ sndmem = 2 * nr_segs * per_mss;
+
+- if (sk->sk_sndbuf < sndmem)
++ /* MPTCP: after this sndmem is the new contribution of the
++ * current subflow to the aggregated sndbuf */
++ if (sk->sk_sndbuf < sndmem) {
++ int old_sndbuf = sk->sk_sndbuf;
+ sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
++ /* MPTCP: ok, the subflow sndbuf has grown, reflect
++ * this in the aggregate buffer.*/
++ if (tp->mpc && old_sndbuf != sk->sk_sndbuf)
++ mptcp_update_sndbuf(tp->mpcb);
++ }
+ }
+
+ /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
+@@ -342,10 +338,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
+ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
++ struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
+
+ /* Check #1 */
+- if (tp->rcv_ssthresh < tp->window_clamp &&
+- (int)tp->rcv_ssthresh < tcp_space(sk) &&
++ if (meta_tp->rcv_ssthresh < meta_tp->window_clamp &&
++ (int)meta_tp->rcv_ssthresh < tcp_space(sk) &&
+ !sk_under_memory_pressure(sk)) {
+ int incr;
+
+@@ -353,14 +351,14 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
+ * will fit to rcvbuf in future.
+ */
+ if (tcp_win_from_space(skb->truesize) <= skb->len)
+- incr = 2 * tp->advmss;
++ incr = 2 * meta_tp->advmss;
+ else
+- incr = __tcp_grow_window(sk, skb);
++ incr = __tcp_grow_window(meta_sk, skb);
+
+ if (incr) {
+ incr = max_t(int, incr, 2 * skb->len);
+- tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
+- tp->window_clamp);
++ meta_tp->rcv_ssthresh = min(meta_tp->rcv_ssthresh + incr,
++ meta_tp->window_clamp);
+ inet_csk(sk)->icsk_ack.quick |= 1;
+ }
+ }
+@@ -543,7 +541,10 @@ void tcp_rcv_space_adjust(struct sock *sk)
+ int copied;
+
+ time = tcp_time_stamp - tp->rcvq_space.time;
+- if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
++ if (tp->mpc) {
++ if (mptcp_check_rtt(tp, time))
++ return;
++ } else if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
+ return;
+
+ /* Number of bytes copied to user in last RTT */
+@@ -768,7 +769,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
+ /* Calculate rto without backoff. This is the second half of Van Jacobson's
+ * routine referred to above.
+ */
+-static void tcp_set_rto(struct sock *sk)
++void tcp_set_rto(struct sock *sk)
+ {
+ const struct tcp_sock *tp = tcp_sk(sk);
+ /* Old crap is replaced with new one. 8)
+@@ -2914,7 +2915,7 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
+ return false;
+
+ tcp_rtt_estimator(sk, seq_rtt);
+- tcp_set_rto(sk);
++ tp->set_rto(sk);
+
+ /* RFC6298: only reset backoff on valid RTT measurement. */
+ inet_csk(sk)->icsk_backoff = 0;
+@@ -2998,7 +2999,7 @@ void tcp_resume_early_retransmit(struct sock *sk)
+ }
+
+ /* If we get here, the whole TSO packet has not been acked. */
+-static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
++u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 packets_acked;
+@@ -3092,6 +3093,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
+ */
+ if (!(scb->tcp_flags & TCPHDR_SYN)) {
+ flag |= FLAG_DATA_ACKED;
++ if (tp->mpc && mptcp_is_data_seq(skb))
++ flag |= MPTCP_FLAG_DATA_ACKED;
+ } else {
+ flag |= FLAG_SYN_ACKED;
+ tp->retrans_stamp = 0;
+@@ -3194,7 +3197,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
+ return flag;
+ }
+
+-static void tcp_ack_probe(struct sock *sk)
++void tcp_ack_probe(struct sock *sk)
+ {
+ const struct tcp_sock *tp = tcp_sk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
+@@ -3241,9 +3244,8 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
+ /* Check that window update is acceptable.
+ * The function assumes that snd_una<=ack<=snd_next.
+ */
+-static inline bool tcp_may_update_window(const struct tcp_sock *tp,
+- const u32 ack, const u32 ack_seq,
+- const u32 nwin)
++bool tcp_may_update_window(const struct tcp_sock *tp, const u32 ack,
++ const u32 ack_seq, const u32 nwin)
+ {
+ return after(ack, tp->snd_una) ||
+ after(ack_seq, tp->snd_wl1) ||
+@@ -3362,7 +3364,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
+ }
+
+ /* This routine deals with incoming acks, but not outgoing ones. */
+-static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
++static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
+ {
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+@@ -3455,6 +3457,16 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
+ flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, sack_rtt);
+ acked -= tp->packets_out;
+
++ if (tp->mpc) {
++ if (mptcp_fallback_infinite(sk, flag)) {
++ pr_err("%s resetting flow\n", __func__);
++ mptcp_send_reset(sk);
++ goto invalid_ack;
++ }
++
++ mptcp_clean_rtx_infinite(skb, sk);
++ }
++
+ /* Advance cwnd if state allows */
+ if (tcp_may_raise_cwnd(sk, flag))
+ tcp_cong_avoid(sk, ack, acked, prior_in_flight);
+@@ -3519,8 +3531,9 @@ old_ack:
+ * the fast version below fails.
+ */
+ void tcp_parse_options(const struct sk_buff *skb,
+- struct tcp_options_received *opt_rx, int estab,
+- struct tcp_fastopen_cookie *foc)
++ struct tcp_options_received *opt_rx,
++ struct mptcp_options_received *mopt,
++ int estab, struct tcp_fastopen_cookie *foc)
+ {
+ const unsigned char *ptr;
+ const struct tcphdr *th = tcp_hdr(skb);
+@@ -3603,6 +3616,10 @@ void tcp_parse_options(const struct sk_buff *skb,
+ */
+ break;
+ #endif
++ case TCPOPT_MPTCP:
++ mptcp_parse_options(ptr - 2, opsize, opt_rx,
++ mopt, skb);
++ break;
+ case TCPOPT_EXP:
+ /* Fast Open option shares code 254 using a
+ * 16 bits magic number. It's valid only in
+@@ -3664,8 +3681,8 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
+ if (tcp_parse_aligned_timestamp(tp, th))
+ return true;
+ }
+-
+- tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
++ tcp_parse_options(skb, &tp->rx_opt, tp->mpc ? &tp->mptcp->rx_opt : NULL,
++ 1, NULL);
+ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
+ tp->rx_opt.rcv_tsecr -= tp->tsoffset;
+
+@@ -3838,6 +3855,8 @@ static void tcp_fin(struct sock *sk)
+ dst = __sk_dst_get(sk);
+ if (!dst || !dst_metric(dst, RTAX_QUICKACK))
+ inet_csk(sk)->icsk_ack.pingpong = 1;
++ if (tp->mpc)
++ mptcp_sub_close_passive(sk);
+ break;
+
+ case TCP_CLOSE_WAIT:
+@@ -3859,6 +3878,13 @@ static void tcp_fin(struct sock *sk)
+ tcp_set_state(sk, TCP_CLOSING);
+ break;
+ case TCP_FIN_WAIT2:
++ if (tp->mpc) {
++ /* The socket will get closed by mptcp_data_ready.
++ * We first have to process all data-sequences.
++ */
++ tp->close_it = 1;
++ break;
++ }
+ /* Received a FIN -- send ACK and enter TIME_WAIT. */
+ tcp_send_ack(sk);
+ tcp_time_wait(sk, TCP_TIME_WAIT, 0);
+@@ -3883,6 +3909,10 @@ static void tcp_fin(struct sock *sk)
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ sk->sk_state_change(sk);
+
++ /* Don't wake up MPTCP-subflows */
++ if (tp->mpc)
++ return;
++
+ /* Do not send POLL_HUP for half duplex close. */
+ if (sk->sk_shutdown == SHUTDOWN_MASK ||
+ sk->sk_state == TCP_CLOSE)
+@@ -4080,7 +4110,11 @@ static void tcp_ofo_queue(struct sock *sk)
+ tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
+ }
+
+- if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
++ /* In case of MPTCP, the segment may be empty if it's a
++ * non-data DATA_FIN. (see beginning of tcp_data_queue)
++ */
++ if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt) &&
++ !(tp->mpc && TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)) {
+ SOCK_DEBUG(sk, "ofo packet was already received\n");
+ __skb_unlink(skb, &tp->out_of_order_queue);
+ __kfree_skb(skb);
+@@ -4104,6 +4138,9 @@ static int tcp_prune_queue(struct sock *sk);
+ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
+ unsigned int size)
+ {
++ if (tcp_sk(sk)->mpc)
++ sk = mptcp_meta_sk(sk);
++
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
+ !sk_rmem_schedule(sk, skb, size)) {
+
+@@ -4134,15 +4171,16 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
+ * Better try to coalesce them right now to avoid future collapses.
+ * Returns true if caller should free @from instead of queueing it
+ */
+-static bool tcp_try_coalesce(struct sock *sk,
+- struct sk_buff *to,
+- struct sk_buff *from,
+- bool *fragstolen)
++bool tcp_try_coalesce(struct sock *sk, struct sk_buff *to, struct sk_buff *from,
++ bool *fragstolen)
+ {
+ int delta;
+
+ *fragstolen = false;
+
++ if (tcp_sk(sk)->mpc && !is_meta_sk(sk))
++ return false;
++
+ if (tcp_hdr(from)->fin)
+ return false;
+
+@@ -4232,7 +4270,9 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
+
+ /* Do skb overlap to previous one? */
+ if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
+- if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
++ /* MPTCP allows non-data data-fin to be in the ofo-queue */
++ if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq) &&
++ !(tp->mpc && end_seq == seq)) {
+ /* All the bits are present. Drop. */
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
+ __kfree_skb(skb);
+@@ -4270,6 +4310,9 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
+ end_seq);
+ break;
+ }
++ /* MPTCP allows non-data data-fin to be in the ofo-queue */
++ if (tp->mpc && TCP_SKB_CB(skb1)->seq == TCP_SKB_CB(skb1)->end_seq)
++ continue;
+ __skb_unlink(skb1, &tp->out_of_order_queue);
+ tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
+ TCP_SKB_CB(skb1)->end_seq);
+@@ -4287,8 +4330,8 @@ end:
+ }
+ }
+
+-static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
+- bool *fragstolen)
++int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
++ bool *fragstolen)
+ {
+ int eaten;
+ struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
+@@ -4350,7 +4393,10 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
+ int eaten = -1;
+ bool fragstolen = false;
+
+- if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
++ /* If no data is present, but a data_fin is in the options, we still
++ * have to call mptcp_queue_skb later on. */
++ if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq &&
++ !(tp->mpc && mptcp_is_data_fin(skb)))
+ goto drop;
+
+ skb_dst_drop(skb);
+@@ -4396,7 +4442,7 @@ queue_and_out:
+ eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
+ }
+ tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+- if (skb->len)
++ if (skb->len || mptcp_is_data_fin(skb))
+ tcp_event_data_recv(sk, skb);
+ if (th->fin)
+ tcp_fin(sk);
+@@ -4418,7 +4464,11 @@ queue_and_out:
+
+ if (eaten > 0)
+ kfree_skb_partial(skb, fragstolen);
+- if (!sock_flag(sk, SOCK_DEAD))
++ if (!sock_flag(sk, SOCK_DEAD) || tp->mpc)
++ /* MPTCP: we always have to call data_ready, because
++ * we may be about to receive a data-fin, which still
++ * must get queued.
++ */
+ sk->sk_data_ready(sk, 0);
+ return;
+ }
+@@ -4470,6 +4520,8 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
+ next = skb_queue_next(list, skb);
+
+ __skb_unlink(skb, list);
++ if (tcp_sk(sk)->mpc)
++ mptcp_remove_shortcuts(tcp_sk(sk)->mpcb, skb);
+ __kfree_skb(skb);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
+
+@@ -4642,6 +4694,18 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
+ struct tcp_sock *tp = tcp_sk(sk);
+ bool res = false;
+
++ if (is_meta_sk(sk)) {
++ if (!skb_queue_empty(&tp->out_of_order_queue)) {
++ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
++ mptcp_purge_ofo_queue(tp);
++
++ /* No sack at the mptcp-level */
++ sk_mem_reclaim(sk);
++ res = true;
++ }
++ return res;
++ }
++
+ if (!skb_queue_empty(&tp->out_of_order_queue)) {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
+ __skb_queue_purge(&tp->out_of_order_queue);
+@@ -4731,7 +4795,7 @@ void tcp_cwnd_application_limited(struct sock *sk)
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ }
+
+-static bool tcp_should_expand_sndbuf(const struct sock *sk)
++bool tcp_should_expand_sndbuf(const struct sock *sk)
+ {
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+@@ -4766,7 +4830,7 @@ static void tcp_new_space(struct sock *sk)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+- if (tcp_should_expand_sndbuf(sk)) {
++ if (tp->should_expand_sndbuf(sk)) {
+ tcp_sndbuf_expand(sk);
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ }
+@@ -4778,8 +4842,9 @@ static void tcp_check_space(struct sock *sk)
+ {
+ if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
+ sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
+- if (sk->sk_socket &&
+- test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
++ if (tcp_sk(sk)->mpc ||
++ (sk->sk_socket &&
++ test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)))
+ tcp_new_space(sk);
+ }
+ }
+@@ -4802,7 +4867,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
+ /* ... and right edge of window advances far enough.
+ * (tcp_recvmsg() will send ACK otherwise). Or...
+ */
+- __tcp_select_window(sk) >= tp->rcv_wnd) ||
++ tp->__select_window(sk) >= tp->rcv_wnd) ||
+ /* We ACK each frame or... */
+ tcp_in_quickack_mode(sk) ||
+ /* We have out of order data. */
+@@ -4904,6 +4969,10 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+
++ /* MPTCP urgent data is not yet supported */
++ if (tp->mpc)
++ return;
++
+ /* Check if we get a new urgent pointer - normally not. */
+ if (th->urg)
+ tcp_check_urg(sk, th);
+@@ -4971,8 +5040,7 @@ static inline bool tcp_checksum_complete_user(struct sock *sk,
+ }
+
+ #ifdef CONFIG_NET_DMA
+-static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
+- int hlen)
++bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ int chunk = skb->len - hlen;
+@@ -5081,9 +5149,15 @@ syn_challenge:
+ goto discard;
+ }
+
++ /* If valid: post process the received MPTCP options. */
++ if (tp->mpc && mptcp_handle_options(sk, th, skb))
++ goto discard;
++
+ return true;
+
+ discard:
++ if (tp->mpc)
++ mptcp_reset_mopt(tp);
+ __kfree_skb(skb);
+ return false;
+ }
+@@ -5135,6 +5209,10 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+
+ tp->rx_opt.saw_tstamp = 0;
+
++ /* MPTCP: force slowpath. */
++ if (tp->mpc)
++ goto slow_path;
++
+ /* pred_flags is 0xS?10 << 16 + snd_wnd
+ * if header_prediction is to be made
+ * 'S' will always be tp->tcp_header_len >> 2
+@@ -5349,7 +5427,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
+ */
+ tp->lsndtime = tcp_time_stamp;
+
+- tcp_init_buffer_space(sk);
++ tp->init_buffer_space(sk);
+
+ if (sock_flag(sk, SOCK_KEEPOPEN))
+ inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
+@@ -5379,7 +5457,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
+ /* Get original SYNACK MSS value if user MSS sets mss_clamp */
+ tcp_clear_options(&opt);
+ opt.user_mss = opt.mss_clamp = 0;
+- tcp_parse_options(synack, &opt, 0, NULL);
++ tcp_parse_options(synack, &opt, NULL, 0, NULL);
+ mss = opt.mss_clamp;
+ }
+
+@@ -5414,8 +5492,11 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_fastopen_cookie foc = { .len = -1 };
+ int saved_clamp = tp->rx_opt.mss_clamp;
++ struct mptcp_options_received mopt;
++ mptcp_init_mp_opt(&mopt);
+
+- tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
++ tcp_parse_options(skb, &tp->rx_opt,
++ tp->mpc ? &tp->mptcp->rx_opt : &mopt, 0, &foc);
+ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
+ tp->rx_opt.rcv_tsecr -= tp->tsoffset;
+
+@@ -5462,6 +5543,21 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ if (!th->syn)
+ goto discard_and_undo;
+
++ if (tp->request_mptcp || tp->mpc) {
++ int ret;
++ ret = mptcp_rcv_synsent_state_process(sk, &sk,
++ skb, &mopt);
++
++ /* May have changed if we support MPTCP */
++ tp = tcp_sk(sk);
++ icsk = inet_csk(sk);
++
++ if (ret == 1)
++ goto reset_and_undo;
++ if (ret == 2)
++ goto discard;
++ }
++
+ /* rfc793:
+ * "If the SYN bit is on ...
+ * are acceptable then ...
+@@ -5474,6 +5570,15 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+ tcp_ack(sk, skb, FLAG_SLOWPATH);
+
++ if (tp->mpc && !is_master_tp(tp)) {
++ /* Timer for repeating the ACK until an answer
++ * arrives. Used only when establishing an additional
++ * subflow inside of an MPTCP connection.
++ */
++ sk_reset_timer(sk, &tp->mptcp->mptcp_ack_timer,
++ jiffies + icsk->icsk_rto);
++ }
++
+ /* Ok.. it's good. Set up sequence numbers and
+ * move to established.
+ */
+@@ -5500,6 +5605,11 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ tp->tcp_header_len = sizeof(struct tcphdr);
+ }
+
++ if (tp->mpc) {
++ tp->tcp_header_len += MPTCP_SUB_LEN_DSM_ALIGN;
++ tp->advmss -= MPTCP_SUB_LEN_DSM_ALIGN;
++ }
++
+ if (tcp_is_sack(tp) && sysctl_tcp_fack)
+ tcp_enable_fack(tp);
+
+@@ -5520,7 +5630,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ tcp_rcv_fastopen_synack(sk, skb, &foc))
+ return -1;
+
+- if (sk->sk_write_pending ||
++ /* With MPTCP we cannot send data on the third ack due to the
++ * lack of option-space */
++ if ((sk->sk_write_pending && !tp->mpc) ||
+ icsk->icsk_accept_queue.rskq_defer_accept ||
+ icsk->icsk_ack.pingpong) {
+ /* Save one ACK. Data will be ready after
+@@ -5562,6 +5674,7 @@ discard:
+ tcp_paws_reject(&tp->rx_opt, 0))
+ goto discard_and_undo;
+
++ /* TODO - check this here for MPTCP */
+ if (th->syn) {
+ /* We see SYN without ACK. It is attempt of
+ * simultaneous connect with crossed SYNs.
+@@ -5578,6 +5691,11 @@ discard:
+ tp->tcp_header_len = sizeof(struct tcphdr);
+ }
+
++ if (tp->mpc) {
++ tp->tcp_header_len += MPTCP_SUB_LEN_DSM_ALIGN;
++ tp->advmss -= MPTCP_SUB_LEN_DSM_ALIGN;
++ }
++
+ tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+ tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
+
+@@ -5636,6 +5754,7 @@ reset_and_undo:
+
+ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len)
++ __releases(&sk->sk_lock.slock)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
+@@ -5687,6 +5806,10 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+
+ case TCP_SYN_SENT:
+ queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
++ if (is_meta_sk(sk)) {
++ sk = tcp_sk(sk)->mpcb->master_sk;
++ tp = tcp_sk(sk);
++ }
+ if (queued >= 0)
+ return queued;
+
+@@ -5694,6 +5817,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ tcp_urg(sk, skb, th);
+ __kfree_skb(skb);
+ tcp_data_snd_check(sk);
++ if (tp->mpc && is_master_tp(tp))
++ bh_unlock_sock(sk);
+ return 0;
+ }
+
+@@ -5736,7 +5861,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+
+ tcp_mtup_init(sk);
+ tp->copied_seq = tp->rcv_nxt;
+- tcp_init_buffer_space(sk);
++ tp->init_buffer_space(sk);
+ }
+ smp_mb();
+ tcp_set_state(sk, TCP_ESTABLISHED);
+@@ -5756,6 +5881,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+
+ if (tp->rx_opt.tstamp_ok)
+ tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
++ if (tp->mpc)
++ tp->advmss -= MPTCP_SUB_LEN_DSM_ALIGN;
+
+ if (req) {
+ /* Re-arm the timer because data may have been sent out.
+@@ -5777,6 +5904,12 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+
+ tcp_initialize_rcv_mss(sk);
+ tcp_fast_path_on(tp);
++ /* Send an ACK when establishing a new
++ * MPTCP subflow, i.e. using an MP_JOIN
++ * subtype.
++ */
++ if (tp->mpc && !is_master_tp(tp))
++ tcp_send_ack(sk);
+ break;
+
+ case TCP_FIN_WAIT1: {
+@@ -5828,7 +5961,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ tmo = tcp_fin_time(sk);
+ if (tmo > TCP_TIMEWAIT_LEN) {
+ inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
+- } else if (th->fin || sock_owned_by_user(sk)) {
++ } else if (th->fin || mptcp_is_data_fin(skb) ||
++ sock_owned_by_user(sk)) {
+ /* Bad case. We could lose such FIN otherwise.
+ * It is not a big problem, but it looks confusing
+ * and not so rare event. We still can lose it now,
+@@ -5857,6 +5991,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ goto discard;
+ }
+ break;
++ case TCP_CLOSE:
++ if (tp->mp_killed)
++ goto discard;
+ }
+
+ /* step 6: check the URG bit */
+@@ -5877,7 +6014,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ */
+ if (sk->sk_shutdown & RCV_SHUTDOWN) {
+ if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
+- after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
++ after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt) &&
++ !tp->mpc) {
++ /* In case of mptcp, the reset is handled by
++ * mptcp_rcv_state_process
++ */
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
+ tcp_reset(sk);
+ return 1;
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 1e4eac7..5891fa6 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -67,6 +67,8 @@
+ #include <net/icmp.h>
+ #include <net/inet_hashtables.h>
+ #include <net/tcp.h>
++#include <net/mptcp.h>
++#include <net/mptcp_v4.h>
+ #include <net/transp_v6.h>
+ #include <net/ipv6.h>
+ #include <net/inet_common.h>
+@@ -99,7 +101,7 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
+ struct inet_hashinfo tcp_hashinfo;
+ EXPORT_SYMBOL(tcp_hashinfo);
+
+-static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
++__u32 tcp_v4_init_sequence(const struct sk_buff *skb)
+ {
+ return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
+ ip_hdr(skb)->saddr,
+@@ -334,7 +336,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ struct inet_sock *inet;
+ const int type = icmp_hdr(icmp_skb)->type;
+ const int code = icmp_hdr(icmp_skb)->code;
+- struct sock *sk;
++ struct sock *sk, *meta_sk;
+ struct sk_buff *skb;
+ struct request_sock *req;
+ __u32 seq;
+@@ -358,13 +360,19 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ return;
+ }
+
+- bh_lock_sock(sk);
++ tp = tcp_sk(sk);
++ if (tp->mpc)
++ meta_sk = mptcp_meta_sk(sk);
++ else
++ meta_sk = sk;
++
++ bh_lock_sock(meta_sk);
+ /* If too many ICMPs get dropped on busy
+ * servers this needs to be solved differently.
+ * We do take care of PMTU discovery (RFC1191) special case :
+ * we can receive locally generated ICMP messages while socket is held.
+ */
+- if (sock_owned_by_user(sk)) {
++ if (sock_owned_by_user(meta_sk)) {
+ if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
+ NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+ }
+@@ -377,7 +385,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ }
+
+ icsk = inet_csk(sk);
+- tp = tcp_sk(sk);
+ req = tp->fastopen_rsk;
+ seq = ntohl(th->seq);
+ if (sk->sk_state != TCP_LISTEN &&
+@@ -411,11 +418,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ goto out;
+
+ tp->mtu_info = info;
+- if (!sock_owned_by_user(sk)) {
++ if (!sock_owned_by_user(meta_sk)) {
+ tcp_v4_mtu_reduced(sk);
+ } else {
+ if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
+ sock_hold(sk);
++ if (tp->mpc)
++ mptcp_tsq_flags(sk);
+ }
+ goto out;
+ }
+@@ -431,7 +440,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+
+ /* XXX (TFO) - revisit the following logic for TFO */
+
+- if (sock_owned_by_user(sk))
++ if (sock_owned_by_user(meta_sk))
+ break;
+
+ icsk->icsk_backoff--;
+@@ -473,7 +482,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ switch (sk->sk_state) {
+ struct request_sock *req, **prev;
+ case TCP_LISTEN:
+- if (sock_owned_by_user(sk))
++ if (sock_owned_by_user(meta_sk))
+ goto out;
+
+ req = inet_csk_search_req(sk, &prev, th->dest,
+@@ -506,7 +515,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ It can f.e. if SYNs crossed,
+ or Fast Open.
+ */
+- if (!sock_owned_by_user(sk)) {
++ if (!sock_owned_by_user(meta_sk)) {
+ sk->sk_err = err;
+
+ sk->sk_error_report(sk);
+@@ -535,7 +544,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ */
+
+ inet = inet_sk(sk);
+- if (!sock_owned_by_user(sk) && inet->recverr) {
++ if (!sock_owned_by_user(meta_sk) && inet->recverr) {
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
+ } else { /* Only an error on timeout */
+@@ -543,7 +552,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ }
+
+ out:
+- bh_unlock_sock(sk);
++ bh_unlock_sock(meta_sk);
+ sock_put(sk);
+ }
+
+@@ -585,7 +594,7 @@ EXPORT_SYMBOL(tcp_v4_send_check);
+ * Exception: precedence violation. We do not implement it in any case.
+ */
+
+-static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
++void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
+ {
+ const struct tcphdr *th = tcp_hdr(skb);
+ struct {
+@@ -709,10 +718,10 @@ release_sk1:
+ outside socket context is ugly, certainly. What can I do?
+ */
+
+-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
++static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 data_ack,
+ u32 win, u32 tsval, u32 tsecr, int oif,
+ struct tcp_md5sig_key *key,
+- int reply_flags, u8 tos)
++ int reply_flags, u8 tos, int mptcp)
+ {
+ const struct tcphdr *th = tcp_hdr(skb);
+ struct {
+@@ -721,6 +730,10 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+ #ifdef CONFIG_TCP_MD5SIG
+ + (TCPOLEN_MD5SIG_ALIGNED >> 2)
+ #endif
++#ifdef CONFIG_MPTCP
++ + ((MPTCP_SUB_LEN_DSS >> 2) +
++ (MPTCP_SUB_LEN_ACK >> 2))
++#endif
+ ];
+ } rep;
+ struct ip_reply_arg arg;
+@@ -765,6 +778,21 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+ ip_hdr(skb)->daddr, &rep.th);
+ }
+ #endif
++#ifdef CONFIG_MPTCP
++ if (mptcp) {
++ int offset = (tsecr) ? 3 : 0;
++ /* Construction of 32-bit data_ack */
++ rep.opt[offset++] = htonl((TCPOPT_MPTCP << 24) |
++ ((MPTCP_SUB_LEN_DSS + MPTCP_SUB_LEN_ACK) << 16) |
++ (0x20 << 8) |
++ (0x01));
++ rep.opt[offset] = htonl(data_ack);
++
++ arg.iov[0].iov_len += MPTCP_SUB_LEN_DSS + MPTCP_SUB_LEN_ACK;
++ rep.th.doff = arg.iov[0].iov_len / 4;
++ }
++#endif /* CONFIG_MPTCP */
++
+ arg.flags = reply_flags;
+ arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
+ ip_hdr(skb)->saddr, /* XXX */
+@@ -783,36 +811,44 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ {
+ struct inet_timewait_sock *tw = inet_twsk(sk);
+ struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
++ u32 data_ack = 0;
++ int mptcp = 0;
++
++ if (tcptw->mptcp_tw && tcptw->mptcp_tw->meta_tw) {
++ data_ack = (u32)tcptw->mptcp_tw->rcv_nxt;
++ mptcp = 1;
++ }
+
+ tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
++ data_ack,
+ tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+ tcp_time_stamp + tcptw->tw_ts_offset,
+ tcptw->tw_ts_recent,
+ tw->tw_bound_dev_if,
+ tcp_twsk_md5_key(tcptw),
+ tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
+- tw->tw_tos
++ tw->tw_tos, mptcp
+ );
+
+ inet_twsk_put(tw);
+ }
+
+-static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+- struct request_sock *req)
++void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
++ struct request_sock *req)
+ {
+ /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+ * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+ */
+ tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+ tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+- tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
++ tcp_rsk(req)->rcv_nxt, 0, req->rcv_wnd,
+ tcp_time_stamp,
+ req->ts_recent,
+ 0,
+ tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
+ AF_INET),
+ inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
+- ip_hdr(skb)->tos);
++ ip_hdr(skb)->tos, 0);
+ }
+
+ /*
+@@ -820,9 +856,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+ * This still operates on a request_sock only, not on a big
+ * socket.
+ */
+-static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
+- struct request_sock *req,
+- u16 queue_mapping)
++int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
++ struct request_sock *req,
++ u16 queue_mapping)
+ {
+ const struct inet_request_sock *ireq = inet_rsk(req);
+ struct flowi4 fl4;
+@@ -850,7 +886,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
+ return err;
+ }
+
+-static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
++int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
+ {
+ int res = tcp_v4_send_synack(sk, NULL, req, 0);
+
+@@ -862,7 +898,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
+ /*
+ * IPv4 request_sock destructor.
+ */
+-static void tcp_v4_reqsk_destructor(struct request_sock *req)
++void tcp_v4_reqsk_destructor(struct request_sock *req)
+ {
+ kfree(inet_rsk(req)->opt);
+ }
+@@ -902,7 +938,7 @@ EXPORT_SYMBOL(tcp_syn_flood_action);
+ /*
+ * Save and compile IPv4 options into the request_sock if needed.
+ */
+-static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
++struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
+ {
+ const struct ip_options *opt = &(IPCB(skb)->opt);
+ struct ip_options_rcu *dopt = NULL;
+@@ -1254,7 +1290,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
+ };
+
+ #ifdef CONFIG_TCP_MD5SIG
+-static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
++const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+ .md5_lookup = tcp_v4_reqsk_md5_lookup,
+ .calc_md5_hash = tcp_v4_md5_hash_skb,
+ };
+@@ -1412,7 +1448,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
+ tcp_init_congestion_control(child);
+ tcp_mtup_init(child);
+ tcp_init_metrics(child);
+- tcp_init_buffer_space(child);
++ tp->init_buffer_space(child);
+
+ /* Queue the data carried in the SYN packet. We need to first
+ * bump skb's refcnt because the caller will attempt to free it.
+@@ -1444,6 +1480,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
+ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+ struct tcp_options_received tmp_opt;
++ struct mptcp_options_received mopt;
+ struct request_sock *req;
+ struct inet_request_sock *ireq;
+ struct tcp_sock *tp = tcp_sk(sk);
+@@ -1458,6 +1495,22 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ struct sk_buff *skb_synack;
+ int do_fastopen;
+
++ tcp_clear_options(&tmp_opt);
++ tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
++ tmp_opt.user_mss = tp->rx_opt.user_mss;
++ mptcp_init_mp_opt(&mopt);
++ tcp_parse_options(skb, &tmp_opt, &mopt, 0, want_cookie ? NULL : &foc);
++
++#ifdef CONFIG_MPTCP
++ /* MPTCP structures not initialized, so clear MPTCP fields */
++ if (mptcp_init_failed)
++ mptcp_init_mp_opt(&mopt);
++
++ if (mopt.is_mp_join)
++ return mptcp_do_join_short(skb, &mopt, &tmp_opt, sock_net(sk));
++ if (mopt.drop_me)
++ goto drop;
++#endif
+ /* Never answer to SYNs send to broadcast or multicast */
+ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+ goto drop;
+@@ -1483,7 +1536,22 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ goto drop;
+ }
+
+- req = inet_reqsk_alloc(&tcp_request_sock_ops);
++#ifdef CONFIG_MPTCP
++ if (sysctl_mptcp_enabled == MPTCP_APP && !tp->mptcp_enabled)
++ mopt.saw_mpc = 0;
++ if (mopt.saw_mpc && !want_cookie) {
++ req = inet_reqsk_alloc(&mptcp_request_sock_ops);
++
++ if (!req)
++ goto drop;
++
++ mptcp_rsk(req)->mpcb = NULL;
++ mptcp_rsk(req)->dss_csum = mopt.dss_csum;
++ mptcp_rsk(req)->collide_tk.pprev = NULL;
++ } else
++#endif
++ req = inet_reqsk_alloc(&tcp_request_sock_ops);
++
+ if (!req)
+ goto drop;
+
+@@ -1491,17 +1559,15 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
+ #endif
+
+- tcp_clear_options(&tmp_opt);
+- tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
+- tmp_opt.user_mss = tp->rx_opt.user_mss;
+- tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
+-
+ if (want_cookie && !tmp_opt.saw_tstamp)
+ tcp_clear_options(&tmp_opt);
+
+ tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
+ tcp_openreq_init(req, &tmp_opt, skb);
+
++ if (mopt.saw_mpc && !want_cookie)
++ mptcp_reqsk_new_mptcp(req, &tmp_opt, &mopt, skb);
++
+ ireq = inet_rsk(req);
+ ireq->ir_loc_addr = daddr;
+ ireq->ir_rmt_addr = saddr;
+@@ -1713,7 +1779,7 @@ put_and_exit:
+ }
+ EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
+
+-static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
++struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
+ {
+ struct tcphdr *th = tcp_hdr(skb);
+ const struct iphdr *iph = ip_hdr(skb);
+@@ -1730,8 +1796,15 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
+
+ if (nsk) {
+ if (nsk->sk_state != TCP_TIME_WAIT) {
++ /* Don't lock again the meta-sk. It has been locked
++ * before mptcp_v4_do_rcv.
++ */
++ if (tcp_sk(nsk)->mpc && !is_meta_sk(sk))
++ bh_lock_sock(mptcp_meta_sk(nsk));
+ bh_lock_sock(nsk);
++
+ return nsk;
++
+ }
+ inet_twsk_put(inet_twsk(nsk));
+ return NULL;
+@@ -1788,6 +1861,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
+ goto discard;
+ #endif
+
++ if (is_meta_sk(sk))
++ return mptcp_v4_do_rcv(sk, skb);
++
+ if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+ struct dst_entry *dst = sk->sk_rx_dst;
+
+@@ -1919,7 +1995,7 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+ } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
+ wake_up_interruptible_sync_poll(sk_sleep(sk),
+ POLLIN | POLLRDNORM | POLLRDBAND);
+- if (!inet_csk_ack_scheduled(sk))
++ if (!inet_csk_ack_scheduled(sk) && !tp->mpc)
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+ (3 * tcp_rto_min(sk)) / 4,
+ TCP_RTO_MAX);
+@@ -1936,7 +2012,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ {
+ const struct iphdr *iph;
+ const struct tcphdr *th;
+- struct sock *sk;
++ struct sock *sk, *meta_sk = NULL;
+ int ret;
+ struct net *net = dev_net(skb->dev);
+
+@@ -1969,18 +2045,42 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+ skb->len - th->doff * 4);
+ TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
++#ifdef CONFIG_MPTCP
++ TCP_SKB_CB(skb)->mptcp_flags = 0;
++ TCP_SKB_CB(skb)->dss_off = 0;
++#endif
+ TCP_SKB_CB(skb)->when = 0;
+ TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
+ TCP_SKB_CB(skb)->sacked = 0;
+
+ sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
+- if (!sk)
+- goto no_tcp_socket;
+
+ process:
+- if (sk->sk_state == TCP_TIME_WAIT)
++ if (sk && sk->sk_state == TCP_TIME_WAIT)
+ goto do_time_wait;
+
++#ifdef CONFIG_MPTCP
++ if (!sk && th->syn && !th->ack) {
++ int ret = mptcp_lookup_join(skb, NULL);
++
++ if (ret < 0) {
++ tcp_v4_send_reset(NULL, skb);
++ goto discard_it;
++ } else if (ret > 0) {
++ return 0;
++ }
++ }
++
++ /* Is there a pending request sock for this segment ? */
++ if ((!sk || sk->sk_state == TCP_LISTEN) && mptcp_check_req(skb, net)) {
++ if (sk)
++ sock_put(sk);
++ return 0;
++ }
++#endif
++ if (!sk)
++ goto no_tcp_socket;
++
+ if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+ goto discard_and_relse;
+@@ -1996,11 +2096,21 @@ process:
+ sk_mark_napi_id(sk, skb);
+ skb->dev = NULL;
+
+- bh_lock_sock_nested(sk);
++ if (tcp_sk(sk)->mpc) {
++ meta_sk = mptcp_meta_sk(sk);
++
++ bh_lock_sock_nested(meta_sk);
++ if (sock_owned_by_user(meta_sk))
++ skb->sk = sk;
++ } else {
++ meta_sk = sk;
++ bh_lock_sock_nested(sk);
++ }
++
+ ret = 0;
+- if (!sock_owned_by_user(sk)) {
++ if (!sock_owned_by_user(meta_sk)) {
+ #ifdef CONFIG_NET_DMA
+- struct tcp_sock *tp = tcp_sk(sk);
++ struct tcp_sock *tp = tcp_sk(meta_sk);
+ if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
+ tp->ucopy.dma_chan = net_dma_find_channel();
+ if (tp->ucopy.dma_chan)
+@@ -2008,16 +2118,16 @@ process:
+ else
+ #endif
+ {
+- if (!tcp_prequeue(sk, skb))
++ if (!tcp_prequeue(meta_sk, skb))
+ ret = tcp_v4_do_rcv(sk, skb);
+ }
+- } else if (unlikely(sk_add_backlog(sk, skb,
+- sk->sk_rcvbuf + sk->sk_sndbuf))) {
+- bh_unlock_sock(sk);
++ } else if (unlikely(sk_add_backlog(meta_sk, skb,
++ meta_sk->sk_rcvbuf + meta_sk->sk_sndbuf))) {
++ bh_unlock_sock(meta_sk);
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
+ goto discard_and_relse;
+ }
+- bh_unlock_sock(sk);
++ bh_unlock_sock(meta_sk);
+
+ sock_put(sk);
+
+@@ -2072,6 +2182,18 @@ do_time_wait:
+ sk = sk2;
+ goto process;
+ }
++#ifdef CONFIG_MPTCP
++ if (th->syn && !th->ack) {
++ int ret = mptcp_lookup_join(skb, inet_twsk(sk));
++
++ if (ret < 0) {
++ tcp_v4_send_reset(NULL, skb);
++ goto discard_it;
++ } else if (ret > 0) {
++ return 0;
++ }
++ }
++#endif
+ /* Fall through to ACK */
+ }
+ case TCP_TW_ACK:
+@@ -2154,6 +2276,11 @@ void tcp_v4_destroy_sock(struct sock *sk)
+
+ tcp_cleanup_congestion_control(sk);
+
++ if (tp->mpc)
++ mptcp_destroy_sock(sk);
++ if (tp->inside_tk_table)
++ mptcp_hash_remove(tp);
++
+ /* Cleanup up the write buffer. */
+ tcp_write_queue_purge(sk);
+
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 7a436c5..72f9b8e 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -18,11 +18,13 @@
+ * Jorge Cwik, <jorge@laser.satlink.net>
+ */
+
++#include <linux/kconfig.h>
+ #include <linux/mm.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/sysctl.h>
+ #include <linux/workqueue.h>
++#include <net/mptcp.h>
+ #include <net/tcp.h>
+ #include <net/inet_common.h>
+ #include <net/xfrm.h>
+@@ -95,10 +97,13 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
+ struct tcp_options_received tmp_opt;
+ struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
+ bool paws_reject = false;
++ struct mptcp_options_received mopt;
+
+ tmp_opt.saw_tstamp = 0;
+ if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
+- tcp_parse_options(skb, &tmp_opt, 0, NULL);
++ mptcp_init_mp_opt(&mopt);
++
++ tcp_parse_options(skb, &tmp_opt, &mopt, 0, NULL);
+
+ if (tmp_opt.saw_tstamp) {
+ tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
+@@ -106,6 +111,11 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
+ tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+ paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
+ }
++
++ if (unlikely(mopt.mp_fclose) && tcptw->mptcp_tw) {
++ if (mopt.mptcp_key == tcptw->mptcp_tw->loc_key)
++ goto kill_with_rst;
++ }
+ }
+
+ if (tw->tw_substate == TCP_FIN_WAIT2) {
+@@ -128,6 +138,16 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
+ if (!th->ack ||
+ !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
+ TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
++ /* If mptcp_is_data_fin() returns true, we are sure that
++ * mopt has been initialized - otherwise it would not
++ * be a DATA_FIN.
++ */
++ if (tcptw->mptcp_tw && tcptw->mptcp_tw->meta_tw &&
++ mptcp_is_data_fin(skb) &&
++ TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
++ mopt.data_seq + 1 == (u32)tcptw->mptcp_tw->rcv_nxt)
++ return TCP_TW_ACK;
++
+ inet_twsk_put(tw);
+ return TCP_TW_SUCCESS;
+ }
+@@ -270,6 +290,11 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
+ const struct tcp_sock *tp = tcp_sk(sk);
+ bool recycle_ok = false;
+
++ if (is_meta_sk(sk)) {
++ mptcp_update_tw_socks(tp, state);
++ goto tcp_done;
++ }
++
+ if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
+ recycle_ok = tcp_remember_stamp(sk);
+
+@@ -290,6 +315,15 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
+ tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
+ tcptw->tw_ts_offset = tp->tsoffset;
+
++ if (tp->mpc) {
++ if (mptcp_time_wait(sk, tcptw)) {
++ inet_twsk_free(tw);
++ goto exit;
++ }
++ } else {
++ tcptw->mptcp_tw = NULL;
++ }
++
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (tw->tw_family == PF_INET6) {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+@@ -347,15 +381,19 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
+ }
+
++exit:
+ tcp_update_metrics(sk);
++tcp_done:
+ tcp_done(sk);
+ }
+
+ void tcp_twsk_destructor(struct sock *sk)
+ {
+-#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_timewait_sock *twsk = tcp_twsk(sk);
+
++ if (twsk->mptcp_tw)
++ mptcp_twsk_destructor(twsk);
++#ifdef CONFIG_TCP_MD5SIG
+ if (twsk->tw_md5_key)
+ kfree_rcu(twsk->tw_md5_key, rcu);
+ #endif
+@@ -392,6 +430,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
+
+ newtp->snd_sml = newtp->snd_una =
+ newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
++#ifdef CONFIG_MPTCP
++ memset(&newtp->rcvq_space, 0, sizeof(newtp->rcvq_space));
++#endif
+
+ tcp_prequeue_init(newtp);
+ INIT_LIST_HEAD(&newtp->tsq_node);
+@@ -436,7 +477,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
+
+ newtp->urg_data = 0;
+
+- if (sock_flag(newsk, SOCK_KEEPOPEN))
++ /* MPTCP: If we are creating a subflow, KEEPOPEN might have been
++ * set on the meta. But, keepalive is entirely handled at the
++ * meta-socket, so let's keep it there.
++ */
++ if (sock_flag(newsk, SOCK_KEEPOPEN) && is_meta_sk(sk))
+ inet_csk_reset_keepalive_timer(newsk,
+ keepalive_time_when(newtp));
+
+@@ -468,6 +513,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
+ newtp->rx_opt.ts_recent_stamp = 0;
+ newtp->tcp_header_len = sizeof(struct tcphdr);
+ }
++ if (treq->saw_mpc)
++ newtp->tcp_header_len += MPTCP_SUB_LEN_DSM_ALIGN;
+ newtp->tsoffset = 0;
+ #ifdef CONFIG_TCP_MD5SIG
+ newtp->md5sig_info = NULL; /*XXX*/
+@@ -504,16 +551,20 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ bool fastopen)
+ {
+ struct tcp_options_received tmp_opt;
++ struct mptcp_options_received mopt;
+ struct sock *child;
+ const struct tcphdr *th = tcp_hdr(skb);
+ __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
+ bool paws_reject = false;
+
+- BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
++ BUG_ON(!tcp_sk(sk)->mpc && fastopen == (sk->sk_state == TCP_LISTEN));
+
+ tmp_opt.saw_tstamp = 0;
++
++ mptcp_init_mp_opt(&mopt);
++
+ if (th->doff > (sizeof(struct tcphdr)>>2)) {
+- tcp_parse_options(skb, &tmp_opt, 0, NULL);
++ tcp_parse_options(skb, &tmp_opt, &mopt, 0, NULL);
+
+ if (tmp_opt.saw_tstamp) {
+ tmp_opt.ts_recent = req->ts_recent;
+@@ -552,7 +603,14 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ *
+ * Reset timer after retransmitting SYNACK, similar to
+ * the idea of fast retransmit in recovery.
++ *
++ * Fall back to TCP if MP_CAPABLE is not set.
+ */
++
++ if (tcp_rsk(req)->saw_mpc && !mopt.saw_mpc)
++ tcp_rsk(req)->saw_mpc = false;
++
++
+ if (!inet_rtx_syn_ack(sk, req))
+ req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
+ TCP_RTO_MAX) + jiffies;
+@@ -674,7 +732,20 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+
+ /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
+ if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
+- TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
++ TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1 &&
++ /* TODO MPTCP:
++ * We do this here, because otherwise options sent in the third ack,
++ * or duplicate fourth ack will get lost. Options like MP_PRIO, ADD_ADDR,...
++ *
++ * We could store them in request_sock, but this would mean that we
++ * have to put tcp_options_received and mptcp_options_received in there,
++ * increasing considerably the size of the request-sock.
++ *
++ * As soon as we have reworked the request-sock MPTCP-fields and
++ * created a mptcp_request_sock structure, we can handle options
++ * correclty there without increasing request_sock.
++ */
++ !tcp_rsk(req)->saw_mpc) {
+ inet_rsk(req)->acked = 1;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
+ return NULL;
+@@ -686,10 +757,29 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ * ESTABLISHED STATE. If it will be dropped after
+ * socket is created, wait for troubles.
+ */
+- child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
++#ifdef CONFIG_MPTCP
++ if (tcp_sk(sk)->mpc)
++ /* MPTCP: We call the mptcp-specific syn_recv_sock */
++ child = tcp_sk(sk)->mpcb->syn_recv_sock(sk, skb, req, NULL);
++ else
++#endif
++ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb,
++ req, NULL);
++
+ if (child == NULL)
+ goto listen_overflow;
+
++ if (!is_meta_sk(sk)) {
++ int ret = mptcp_check_req_master(sk, child, req, prev, &mopt);
++ if (ret < 0)
++ goto listen_overflow;
++
++ /* MPTCP-supported */
++ if (!ret)
++ return tcp_sk(child)->mpcb->master_sk;
++ } else {
++ return mptcp_check_req_child(sk, child, req, prev, &mopt);
++ }
+ inet_csk_reqsk_queue_unlink(sk, req, prev);
+ inet_csk_reqsk_queue_removed(sk, req);
+
+@@ -739,8 +829,9 @@ int tcp_child_process(struct sock *parent, struct sock *child,
+ {
+ int ret = 0;
+ int state = child->sk_state;
++ struct sock *meta_sk = tcp_sk(child)->mpc ? mptcp_meta_sk(child) : child;
+
+- if (!sock_owned_by_user(child)) {
++ if (!sock_owned_by_user(meta_sk)) {
+ ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
+ skb->len);
+ /* Wakeup parent, send SIGIO */
+@@ -751,10 +842,14 @@ int tcp_child_process(struct sock *parent, struct sock *child,
+ * in main socket hash table and lock on listening
+ * socket does not protect us more.
+ */
+- __sk_add_backlog(child, skb);
++ if (tcp_sk(child)->mpc)
++ skb->sk = child;
++ __sk_add_backlog(meta_sk, skb);
+ }
+
+- bh_unlock_sock(child);
++ if (tcp_sk(child)->mpc)
++ bh_unlock_sock(child);
++ bh_unlock_sock(meta_sk);
+ sock_put(child);
+ return ret;
+ }
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 17a11e6..6b45057 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -36,6 +36,12 @@
+
+ #define pr_fmt(fmt) "TCP: " fmt
+
++#include <net/mptcp.h>
++#include <net/mptcp_v4.h>
++#if IS_ENABLED(CONFIG_IPV6)
++#include <net/mptcp_v6.h>
++#endif
++#include <net/ipv6.h>
+ #include <net/tcp.h>
+
+ #include <linux/compiler.h>
+@@ -72,7 +78,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ int push_one, gfp_t gfp);
+
+ /* Account for new data that has been sent to the network. */
+-static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
++void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
+ {
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+@@ -211,7 +217,7 @@ u32 tcp_default_init_rwnd(u32 mss)
+ void tcp_select_initial_window(int __space, __u32 mss,
+ __u32 *rcv_wnd, __u32 *window_clamp,
+ int wscale_ok, __u8 *rcv_wscale,
+- __u32 init_rcv_wnd)
++ __u32 init_rcv_wnd, const struct sock *sk)
+ {
+ unsigned int space = (__space < 0 ? 0 : __space);
+
+@@ -266,11 +272,15 @@ EXPORT_SYMBOL(tcp_select_initial_window);
+ * value can be stuffed directly into th->window for an outgoing
+ * frame.
+ */
+-static u16 tcp_select_window(struct sock *sk)
++u16 tcp_select_window(struct sock *sk)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+- u32 cur_win = tcp_receive_window(tp);
+- u32 new_win = __tcp_select_window(sk);
++ /* The window must never shrink at the meta-level. At the subflow we
++ * have to allow this. Otherwise we may announce a window too large
++ * for the current meta-level sk_rcvbuf.
++ */
++ u32 cur_win = tcp_receive_window(tp->mpc ? tcp_sk(mptcp_meta_sk(sk)) : tp);
++ u32 new_win = tp->__select_window(sk);
+
+ /* Never shrink the offered window */
+ if (new_win < cur_win) {
+@@ -283,6 +293,7 @@ static u16 tcp_select_window(struct sock *sk)
+ */
+ new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
+ }
++
+ tp->rcv_wnd = new_win;
+ tp->rcv_wup = tp->rcv_nxt;
+
+@@ -361,7 +372,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
+ /* Constructs common control bits of non-data skb. If SYN/FIN is present,
+ * auto increment end seqno.
+ */
+-static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
++void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
+ {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+@@ -381,7 +392,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
+ TCP_SKB_CB(skb)->end_seq = seq;
+ }
+
+-static inline bool tcp_urg_mode(const struct tcp_sock *tp)
++bool tcp_urg_mode(const struct tcp_sock *tp)
+ {
+ return tp->snd_una != tp->snd_up;
+ }
+@@ -391,17 +402,7 @@ static inline bool tcp_urg_mode(const struct tcp_sock *tp)
+ #define OPTION_MD5 (1 << 2)
+ #define OPTION_WSCALE (1 << 3)
+ #define OPTION_FAST_OPEN_COOKIE (1 << 8)
+-
+-struct tcp_out_options {
+- u16 options; /* bit field of OPTION_* */
+- u16 mss; /* 0 to disable */
+- u8 ws; /* window scale, 0 to disable */
+- u8 num_sack_blocks; /* number of SACK blocks to include */
+- u8 hash_size; /* bytes in hash_location */
+- __u8 *hash_location; /* temporary pointer, overloaded */
+- __u32 tsval, tsecr; /* need to include OPTION_TS */
+- struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */
+-};
++/* Before adding here - take a look at OPTION_MPTCP in include/net/mptcp.h */
+
+ /* Write previously computed TCP options to the packet.
+ *
+@@ -417,7 +418,7 @@ struct tcp_out_options {
+ * (but it may well be that other scenarios fail similarly).
+ */
+ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
+- struct tcp_out_options *opts)
++ struct tcp_out_options *opts, struct sk_buff *skb)
+ {
+ u16 options = opts->options; /* mungable copy */
+
+@@ -500,6 +501,9 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
+ }
+ ptr += (foc->len + 3) >> 2;
+ }
++
++ if (unlikely(OPTION_MPTCP & opts->options))
++ mptcp_options_write(ptr, tp, opts, skb);
+ }
+
+ /* Compute TCP options for SYN packets. This is not the final
+@@ -551,6 +555,8 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
+ if (unlikely(!(OPTION_TS & opts->options)))
+ remaining -= TCPOLEN_SACKPERM_ALIGNED;
+ }
++ if (tp->request_mptcp || tp->mpc)
++ mptcp_syn_options(sk, opts, &remaining);
+
+ if (fastopen && fastopen->cookie.len >= 0) {
+ u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
+@@ -624,6 +630,9 @@ static unsigned int tcp_synack_options(struct sock *sk,
+ }
+ }
+
++ if (tcp_rsk(req)->saw_mpc)
++ mptcp_synack_options(req, opts, &remaining);
++
+ return MAX_TCP_OPTION_SPACE - remaining;
+ }
+
+@@ -657,16 +666,22 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
+ opts->tsecr = tp->rx_opt.ts_recent;
+ size += TCPOLEN_TSTAMP_ALIGNED;
+ }
++ if (tp->mpc)
++ mptcp_established_options(sk, skb, opts, &size);
+
+ eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
+ if (unlikely(eff_sacks)) {
+- const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
+- opts->num_sack_blocks =
+- min_t(unsigned int, eff_sacks,
+- (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
+- TCPOLEN_SACK_PERBLOCK);
+- size += TCPOLEN_SACK_BASE_ALIGNED +
+- opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
++ const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
++ if (remaining < TCPOLEN_SACK_BASE_ALIGNED)
++ opts->num_sack_blocks = 0;
++ else
++ opts->num_sack_blocks =
++ min_t(unsigned int, eff_sacks,
++ (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
++ TCPOLEN_SACK_PERBLOCK);
++ if (opts->num_sack_blocks)
++ size += TCPOLEN_SACK_BASE_ALIGNED +
++ opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
+ }
+
+ return size;
+@@ -714,7 +729,7 @@ static void tcp_tasklet_func(unsigned long data)
+ unsigned long flags;
+ struct list_head *q, *n;
+ struct tcp_sock *tp;
+- struct sock *sk;
++ struct sock *sk, *meta_sk;
+
+ local_irq_save(flags);
+ list_splice_init(&tsq->head, &list);
+@@ -725,15 +740,27 @@ static void tcp_tasklet_func(unsigned long data)
+ list_del(&tp->tsq_node);
+
+ sk = (struct sock *)tp;
+- bh_lock_sock(sk);
++ meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
++ bh_lock_sock(meta_sk);
+
+- if (!sock_owned_by_user(sk)) {
++ if (!sock_owned_by_user(meta_sk)) {
+ tcp_tsq_handler(sk);
++ if (tp->mpc)
++ tcp_tsq_handler(meta_sk);
+ } else {
+ /* defer the work to tcp_release_cb() */
+ set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
++
++ /* For MPTCP, we set the tsq-bit on the meta, and the
++ * subflow as we don't know if the limitation happened
++ * while inside mptcp_write_xmit or during tcp_write_xmit.
++ */
++ if (tp->mpc) {
++ set_bit(TCP_TSQ_DEFERRED, &tcp_sk(meta_sk)->tsq_flags);
++ mptcp_tsq_flags(sk);
++ }
+ }
+- bh_unlock_sock(sk);
++ bh_unlock_sock(meta_sk);
+
+ clear_bit(TSQ_QUEUED, &tp->tsq_flags);
+ sk_free(sk);
+@@ -743,7 +770,10 @@ static void tcp_tasklet_func(unsigned long data)
+ #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
+ (1UL << TCP_WRITE_TIMER_DEFERRED) | \
+ (1UL << TCP_DELACK_TIMER_DEFERRED) | \
+- (1UL << TCP_MTU_REDUCED_DEFERRED))
++ (1UL << TCP_MTU_REDUCED_DEFERRED) | \
++ (1UL << MPTCP_PATH_MANAGER) | \
++ (1UL << MPTCP_SUB_DEFERRED))
++
+ /**
+ * tcp_release_cb - tcp release_sock() callback
+ * @sk: socket
+@@ -790,6 +820,13 @@ void tcp_release_cb(struct sock *sk)
+ sk->sk_prot->mtu_reduced(sk);
+ __sock_put(sk);
+ }
++ if (flags & (1UL << MPTCP_PATH_MANAGER)) {
++ if (tcp_sk(sk)->mpcb->pm_ops->release_sock)
++ tcp_sk(sk)->mpcb->pm_ops->release_sock(sk);
++ __sock_put(sk);
++ }
++ if (flags & (1UL << MPTCP_SUB_DEFERRED))
++ mptcp_tsq_sub_deferred(sk);
+ }
+ EXPORT_SYMBOL(tcp_release_cb);
+
+@@ -849,8 +886,8 @@ void tcp_wfree(struct sk_buff *skb)
+ * We are working here with either a clone of the original
+ * SKB, or a fresh unique copy made by the retransmit engine.
+ */
+-static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+- gfp_t gfp_mask)
++int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
++ gfp_t gfp_mask)
+ {
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_sock *inet;
+@@ -878,10 +915,28 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
+
+- if (unlikely(skb_cloned(skb)))
+- skb = pskb_copy(skb, gfp_mask);
+- else
++ if (unlikely(skb_cloned(skb))) {
++ struct sk_buff *newskb;
++ if (mptcp_is_data_seq(skb))
++ skb_push(skb, MPTCP_SUB_LEN_DSS_ALIGN +
++ MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN);
++
++ newskb = pskb_copy(skb, gfp_mask);
++
++ if (mptcp_is_data_seq(skb)) {
++ skb_pull(skb, MPTCP_SUB_LEN_DSS_ALIGN +
++ MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN);
++ if (newskb)
++ skb_pull(newskb, MPTCP_SUB_LEN_DSS_ALIGN +
++ MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN);
++ }
++ skb = newskb;
++ } else {
+ skb = skb_clone(skb, gfp_mask);
++ }
+ if (unlikely(!skb))
+ return -ENOBUFS;
+ }
+@@ -929,7 +984,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+ */
+ th->window = htons(min(tp->rcv_wnd, 65535U));
+ } else {
+- th->window = htons(tcp_select_window(sk));
++ th->window = htons(tp->select_window(sk));
+ }
+ th->check = 0;
+ th->urg_ptr = 0;
+@@ -945,7 +1000,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+ }
+ }
+
+- tcp_options_write((__be32 *)(th + 1), tp, &opts);
++ tcp_options_write((__be32 *)(th + 1), tp, &opts, skb);
+ if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
+ TCP_ECN_send(sk, skb, tcp_header_size);
+
+@@ -984,7 +1039,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+ * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
+ * otherwise socket can stall.
+ */
+-static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
++void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+@@ -997,15 +1052,16 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
+ }
+
+ /* Initialize TSO segments for a packet. */
+-static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
+- unsigned int mss_now)
++void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
++ unsigned int mss_now)
+ {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ /* Make sure we own this skb before messing gso_size/gso_segs */
+ WARN_ON_ONCE(skb_cloned(skb));
+
+- if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
++ if (skb->len <= mss_now || (is_meta_sk(sk) && !mptcp_sk_can_gso(sk)) ||
++ (!is_meta_sk(sk) && !sk_can_gso(sk)) || skb->ip_summed == CHECKSUM_NONE) {
+ /* Avoid the costly divide in the normal
+ * non-TSO case.
+ */
+@@ -1037,7 +1093,7 @@ static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
+ /* Pcount in the middle of the write queue got changed, we need to do various
+ * tweaks to fix counters
+ */
+-static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
++void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+@@ -1078,6 +1134,9 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
+ int nlen;
+ u8 flags;
+
++ if (tcp_sk(sk)->mpc && mptcp_is_data_seq(skb))
++ mptcp_fragment(sk, skb, len, mss_now, 0);
++
+ if (WARN_ON(len > skb->len))
+ return -EINVAL;
+
+@@ -1160,7 +1219,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
+ * eventually). The difference is that pulled data not copied, but
+ * immediately discarded.
+ */
+-static void __pskb_trim_head(struct sk_buff *skb, int len)
++void __pskb_trim_head(struct sk_buff *skb, int len)
+ {
+ struct skb_shared_info *shinfo;
+ int i, k, eat;
+@@ -1201,6 +1260,9 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
+ /* Remove acked data from a packet in the transmit queue. */
+ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
+ {
++ if (tcp_sk(sk)->mpc && !is_meta_sk(sk) && mptcp_is_data_seq(skb))
++ return mptcp_trim_head(sk, skb, len);
++
+ if (skb_unclone(skb, GFP_ATOMIC))
+ return -ENOMEM;
+
+@@ -1218,6 +1280,15 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
+ if (tcp_skb_pcount(skb) > 1)
+ tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
+
++#ifdef CONFIG_MPTCP
++ /* Some data got acked - we assume that the seq-number reached the dest.
++ * Anyway, our MPTCP-option has been trimmed above - we lost it here.
++ * Only remove the SEQ if the call does not come from a meta retransmit.
++ */
++ if (tcp_sk(sk)->mpc && !is_meta_sk(sk))
++ TCP_SKB_CB(skb)->mptcp_flags &= ~MPTCPHDR_SEQ;
++#endif
++
+ return 0;
+ }
+
+@@ -1377,7 +1448,7 @@ unsigned int tcp_current_mss(struct sock *sk)
+ }
+
+ /* Congestion window validation. (RFC2861) */
+-static void tcp_cwnd_validate(struct sock *sk)
++void tcp_cwnd_validate(struct sock *sk)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+@@ -1411,8 +1482,8 @@ static bool tcp_minshall_check(const struct tcp_sock *tp)
+ * But we can avoid doing the divide again given we already have
+ * skb_pcount = skb->len / mss_now
+ */
+-static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
+- const struct sk_buff *skb)
++void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
++ const struct sk_buff *skb)
+ {
+ if (skb->len < tcp_skb_pcount(skb) * mss_now)
+ tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
+@@ -1433,19 +1504,28 @@ static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
+ (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
+ }
+ /* Returns the portion of skb which can be sent right away */
+-static unsigned int tcp_mss_split_point(const struct sock *sk,
+- const struct sk_buff *skb,
+- unsigned int mss_now,
+- unsigned int max_segs,
+- int nonagle)
++unsigned int tcp_mss_split_point(const struct sock *sk,
++ const struct sk_buff *skb,
++ unsigned int mss_now,
++ unsigned int max_segs,
++ int nonagle)
+ {
+ const struct tcp_sock *tp = tcp_sk(sk);
++ const struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
+ u32 partial, needed, window, max_len;
+
+- window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
++ if (!tp->mpc)
++ window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
++ else
++ /* We need to evaluate the available space in the sending window
++ * at the subflow level. However, the subflow seq has not yet
++ * been set. Nevertheless we know that the caller will set it to
++ * write_seq.
++ */
++ window = tcp_wnd_end(tp) - tp->write_seq;
+ max_len = mss_now * max_segs;
+
+- if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
++ if (likely(max_len <= window && skb != tcp_write_queue_tail(meta_sk)))
+ return max_len;
+
+ needed = min(skb->len, window);
+@@ -1467,13 +1547,14 @@ static unsigned int tcp_mss_split_point(const struct sock *sk,
+ /* Can at least one segment of SKB be sent right now, according to the
+ * congestion window rules? If so, return how many segments are allowed.
+ */
+-static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
+- const struct sk_buff *skb)
++unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
++ const struct sk_buff *skb)
+ {
+ u32 in_flight, cwnd;
+
+ /* Don't be strict about the congestion window for the final FIN. */
+- if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
++ if (skb &&
++ ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || mptcp_is_data_fin(skb)) &&
+ tcp_skb_pcount(skb) == 1)
+ return 1;
+
+@@ -1489,8 +1570,8 @@ static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
+ * This must be invoked the first time we consider transmitting
+ * SKB onto the wire.
+ */
+-static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
+- unsigned int mss_now)
++int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
++ unsigned int mss_now)
+ {
+ int tso_segs = tcp_skb_pcount(skb);
+
+@@ -1505,8 +1586,8 @@ static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
+ /* Return true if the Nagle test allows this packet to be
+ * sent now.
+ */
+-static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
+- unsigned int cur_mss, int nonagle)
++bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
++ unsigned int cur_mss, int nonagle)
+ {
+ /* Nagle rule does not apply to frames, which sit in the middle of the
+ * write_queue (they have no chances to get new data).
+@@ -1518,7 +1599,8 @@ static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buf
+ return true;
+
+ /* Don't use the nagle rule for urgent data (or for the final FIN). */
+- if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
++ if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
++ mptcp_is_data_fin(skb))
+ return true;
+
+ if (!tcp_nagle_check(skb->len < cur_mss, tp, cur_mss, nonagle))
+@@ -1528,9 +1610,8 @@ static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buf
+ }
+
+ /* Does at least the first segment of SKB fit into the send window? */
+-static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
+- const struct sk_buff *skb,
+- unsigned int cur_mss)
++bool tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
++ unsigned int cur_mss)
+ {
+ u32 end_seq = TCP_SKB_CB(skb)->end_seq;
+
+@@ -1549,14 +1630,16 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
+ {
+ const struct tcp_sock *tp = tcp_sk(sk);
+ unsigned int cwnd_quota;
++ const struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
++ const struct tcp_sock *meta_tp = tcp_sk(meta_sk);
+
+- tcp_init_tso_segs(sk, skb, cur_mss);
++ tcp_init_tso_segs(meta_sk, skb, cur_mss);
+
+- if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
++ if (!tcp_nagle_test(meta_tp, skb, cur_mss, nonagle))
+ return 0;
+
+ cwnd_quota = tcp_cwnd_test(tp, skb);
+- if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
++ if (cwnd_quota && !tcp_snd_wnd_test(meta_tp, skb, cur_mss))
+ cwnd_quota = 0;
+
+ return cwnd_quota;
+@@ -1566,12 +1649,16 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
+ bool tcp_may_send_now(struct sock *sk)
+ {
+ const struct tcp_sock *tp = tcp_sk(sk);
+- struct sk_buff *skb = tcp_send_head(sk);
++ struct sk_buff *skb;
++ const struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
++ const struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++
++ skb = tcp_send_head(meta_sk);
+
+ return skb &&
+ tcp_snd_test(sk, skb, tcp_current_mss(sk),
+- (tcp_skb_is_last(sk, skb) ?
+- tp->nonagle : TCP_NAGLE_PUSH));
++ (tcp_skb_is_last(meta_sk, skb) ?
++ meta_tp->nonagle : TCP_NAGLE_PUSH));
+ }
+
+ /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
+@@ -1588,6 +1675,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
+ int nlen = skb->len - len;
+ u8 flags;
+
++ if (tcp_sk(sk)->mpc && mptcp_is_data_seq(skb))
++ mptso_fragment(sk, skb, len, mss_now, gfp, 0);
++
+ /* All of a TSO frame must be composed of paged data. */
+ if (skb->len != skb->data_len)
+ return tcp_fragment(sk, skb, len, mss_now);
+@@ -1633,29 +1723,39 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
+ *
+ * This algorithm is from John Heffner.
+ */
+-static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
++bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
++ struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ u32 send_win, cong_win, limit, in_flight;
+ int win_divisor;
+
+- if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
++ if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || mptcp_is_data_fin(skb))
+ goto send_now;
+
+ if (icsk->icsk_ca_state != TCP_CA_Open)
+ goto send_now;
+
+ /* Defer for less than two clock ticks. */
+- if (tp->tso_deferred &&
+- (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
++ if (meta_tp->tso_deferred &&
++ (((u32)jiffies << 1) >> 1) - (meta_tp->tso_deferred >> 1) > 1)
+ goto send_now;
+
+ in_flight = tcp_packets_in_flight(tp);
+
+ BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
+
+- send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
++ if (!tp->mpc)
++ send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
++ else
++ /* We need to evaluate the available space in the sending window
++ * at the subflow level. However, the subflow seq has not yet
++ * been set. Nevertheless we know that the caller will set it to
++ * write_seq.
++ */
++ send_win = tcp_wnd_end(tp) - tp->write_seq;
+
+ /* From in_flight test above, we know that cwnd > in_flight. */
+ cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
+@@ -1668,7 +1768,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+ goto send_now;
+
+ /* Middle in queue won't get any more data, full sendable already? */
+- if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
++ if ((skb != tcp_write_queue_tail(meta_sk)) && (limit >= skb->len))
+ goto send_now;
+
+ win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
+@@ -1694,13 +1794,13 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
+ /* Ok, it looks like it is advisable to defer.
+ * Do not rearm the timer if already set to not break TCP ACK clocking.
+ */
+- if (!tp->tso_deferred)
+- tp->tso_deferred = 1 | (jiffies << 1);
++ if (!meta_tp->tso_deferred)
++ meta_tp->tso_deferred = 1 | (jiffies << 1);
+
+ return true;
+
+ send_now:
+- tp->tso_deferred = 0;
++ meta_tp->tso_deferred = 0;
+ return false;
+ }
+
+@@ -1713,7 +1813,7 @@ send_now:
+ * 1 if a probe was sent,
+ * -1 otherwise
+ */
+-static int tcp_mtu_probe(struct sock *sk)
++int tcp_mtu_probe(struct sock *sk)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
+@@ -1858,6 +1958,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ int cwnd_quota;
+ int result;
+
++ if (is_meta_sk(sk))
++ return mptcp_write_xmit(sk, mss_now, nonagle, push_one, gfp);
++
+ sent_pkts = 0;
+
+ if (!push_one) {
+@@ -2313,6 +2416,10 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
+ return;
+
++ /* Currently not supported for MPTCP - but it should be possible */
++ if (tp->mpc)
++ return;
++
+ tcp_for_write_queue_from_safe(skb, tmp, sk) {
+ if (!tcp_can_collapse(sk, skb))
+ break;
+@@ -2410,10 +2517,26 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ */
+ if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
+ skb_headroom(skb) >= 0xFFFF)) {
+- struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
+- GFP_ATOMIC);
++ struct sk_buff *nskb;
++
++ if (mptcp_is_data_seq(skb))
++ skb_push(skb, MPTCP_SUB_LEN_DSS_ALIGN +
++ MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN);
++
++ nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
++
++ if (mptcp_is_data_seq(skb)) {
++ skb_pull(skb, MPTCP_SUB_LEN_DSS_ALIGN +
++ MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN);
++ if (nskb)
++ skb_pull(nskb, MPTCP_SUB_LEN_DSS_ALIGN +
++ MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN);
++ }
+ err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+- -ENOBUFS;
++ -ENOBUFS;
+ } else {
+ err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+ }
+@@ -2640,6 +2763,11 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
+ {
+ struct sk_buff *skb;
+
++ if (is_meta_sk(sk)) {
++ mptcp_send_active_reset(sk, priority);
++ return;
++ }
++
+ /* NOTE: No TCP options attached and we never retransmit this. */
+ skb = alloc_skb(MAX_TCP_HEADER, priority);
+ if (!skb) {
+@@ -2742,14 +2870,14 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+ (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
+ req->window_clamp = tcp_full_space(sk);
+
+- /* tcp_full_space because it is guaranteed to be the first packet */
+- tcp_select_initial_window(tcp_full_space(sk),
+- mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
++ tp->select_initial_window(tcp_full_space(sk),
++ mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) -
++ (tcp_rsk(req)->saw_mpc ? MPTCP_SUB_LEN_DSM_ALIGN : 0),
+ &req->rcv_wnd,
+ &req->window_clamp,
+ ireq->wscale_ok,
+ &rcv_wscale,
+- dst_metric(dst, RTAX_INITRWND));
++ dst_metric(dst, RTAX_INITRWND), sk);
+ ireq->rcv_wscale = rcv_wscale;
+ }
+
+@@ -2785,7 +2913,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+
+ /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
+ th->window = htons(min(req->rcv_wnd, 65535U));
+- tcp_options_write((__be32 *)(th + 1), tp, &opts);
++ tcp_options_write((__be32 *)(th + 1), tp, &opts, skb);
+ th->doff = (tcp_header_size >> 2);
+ TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
+
+@@ -2839,13 +2967,13 @@ static void tcp_connect_init(struct sock *sk)
+ (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
+ tp->window_clamp = tcp_full_space(sk);
+
+- tcp_select_initial_window(tcp_full_space(sk),
++ tp->select_initial_window(tcp_full_space(sk),
+ tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
+ &tp->rcv_wnd,
+ &tp->window_clamp,
+ sysctl_tcp_window_scaling,
+ &rcv_wscale,
+- dst_metric(dst, RTAX_INITRWND));
++ dst_metric(dst, RTAX_INITRWND), sk);
+
+ tp->rx_opt.rcv_wscale = rcv_wscale;
+ tp->rcv_ssthresh = tp->rcv_wnd;
+@@ -2869,6 +2997,38 @@ static void tcp_connect_init(struct sock *sk)
+ inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
+ inet_csk(sk)->icsk_retransmits = 0;
+ tcp_clear_retrans(tp);
++
++#ifdef CONFIG_MPTCP
++ if (sysctl_mptcp_enabled && mptcp_doit(sk)) {
++ if (is_master_tp(tp)) {
++ tp->request_mptcp = 1;
++ mptcp_connect_init(sk);
++ } else if (tp->mptcp) {
++ struct inet_sock *inet = inet_sk(sk);
++
++ tp->mptcp->snt_isn = tp->write_seq;
++ tp->mptcp->init_rcv_wnd = tp->rcv_wnd;
++
++ /* Set nonce for new subflows */
++ if (sk->sk_family == AF_INET)
++ tp->mptcp->mptcp_loc_nonce = mptcp_v4_get_nonce(
++ inet->inet_saddr,
++ inet->inet_daddr,
++ inet->inet_sport,
++ inet->inet_dport,
++ tp->write_seq);
++#if IS_ENABLED(CONFIG_IPV6)
++ else
++ tp->mptcp->mptcp_loc_nonce = mptcp_v6_get_nonce(
++ inet6_sk(sk)->saddr.s6_addr32,
++ sk->sk_v6_daddr.s6_addr32,
++ inet->inet_sport,
++ inet->inet_dport,
++ tp->write_seq);
++#endif
++ }
++ }
++#endif
+ }
+
+ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
+@@ -3111,6 +3271,7 @@ void tcp_send_ack(struct sock *sk)
+ TCP_SKB_CB(buff)->when = tcp_time_stamp;
+ tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
+ }
++EXPORT_SYMBOL(tcp_send_ack);
+
+ /* This routine sends a packet with an out of date sequence
+ * number. It assumes the other end will try to ack it.
+@@ -3123,7 +3284,7 @@ void tcp_send_ack(struct sock *sk)
+ * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
+ * out-of-date with SND.UNA-1 to probe window.
+ */
+-static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
++int tcp_xmit_probe_skb(struct sock *sk, int urgent)
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
+@@ -3161,6 +3322,9 @@ int tcp_write_wakeup(struct sock *sk)
+ if (sk->sk_state == TCP_CLOSE)
+ return -1;
+
++ if (is_meta_sk(sk))
++ return mptcp_write_wakeup(sk);
++
+ if ((skb = tcp_send_head(sk)) != NULL &&
+ before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
+ int err;
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 64f0354..7b55b9a 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -20,6 +20,7 @@
+
+ #include <linux/module.h>
+ #include <linux/gfp.h>
++#include <net/mptcp.h>
+ #include <net/tcp.h>
+
+ int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
+@@ -32,7 +33,7 @@ int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
+ int sysctl_tcp_orphan_retries __read_mostly;
+ int sysctl_tcp_thin_linear_timeouts __read_mostly;
+
+-static void tcp_write_err(struct sock *sk)
++void tcp_write_err(struct sock *sk)
+ {
+ sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
+ sk->sk_error_report(sk);
+@@ -124,10 +125,8 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
+ * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
+ * syn_set flag is set.
+ */
+-static bool retransmits_timed_out(struct sock *sk,
+- unsigned int boundary,
+- unsigned int timeout,
+- bool syn_set)
++bool retransmits_timed_out(struct sock *sk, unsigned int boundary,
++ unsigned int timeout, bool syn_set)
+ {
+ unsigned int linear_backoff_thresh, start_ts;
+ unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
+@@ -153,7 +152,7 @@ static bool retransmits_timed_out(struct sock *sk,
+ }
+
+ /* A write timeout has occurred. Process the after effects. */
+-static int tcp_write_timeout(struct sock *sk)
++int tcp_write_timeout(struct sock *sk)
+ {
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+@@ -168,6 +167,10 @@ static int tcp_write_timeout(struct sock *sk)
+ }
+ retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
+ syn_set = true;
++ /* Stop retransmitting MP_CAPABLE options in SYN if timed out. */
++ if (tcp_sk(sk)->request_mptcp &&
++ icsk->icsk_retransmits >= mptcp_sysctl_syn_retries())
++ tcp_sk(sk)->request_mptcp = 0;
+ } else {
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
+ /* Black hole detection */
+@@ -248,18 +251,22 @@ out:
+ static void tcp_delack_timer(unsigned long data)
+ {
+ struct sock *sk = (struct sock *)data;
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
+
+- bh_lock_sock(sk);
+- if (!sock_owned_by_user(sk)) {
++ bh_lock_sock(meta_sk);
++ if (!sock_owned_by_user(meta_sk)) {
+ tcp_delack_timer_handler(sk);
+ } else {
+ inet_csk(sk)->icsk_ack.blocked = 1;
+- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
++ NET_INC_STATS_BH(sock_net(meta_sk), LINUX_MIB_DELAYEDACKLOCKED);
+ /* deleguate our work to tcp_release_cb() */
+ if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
+ sock_hold(sk);
++ if (tp->mpc)
++ mptcp_tsq_flags(sk);
+ }
+- bh_unlock_sock(sk);
++ bh_unlock_sock(meta_sk);
+ sock_put(sk);
+ }
+
+@@ -421,6 +428,9 @@ void tcp_retransmit_timer(struct sock *sk)
+
+ tcp_enter_loss(sk, 0);
+
++ if (tp->mpc)
++ mptcp_reinject_data(sk, 1);
++
+ if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
+ /* Retransmission failed because of local congestion,
+ * do not backoff.
+@@ -471,6 +481,8 @@ out_reset_timer:
+ /* Use normal (exponential) backoff */
+ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+ }
++ if (tp->mpc)
++ mptcp_set_rto(sk);
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
+ __sk_dst_reset(sk);
+@@ -502,7 +514,10 @@ void tcp_write_timer_handler(struct sock *sk)
+ break;
+ case ICSK_TIME_RETRANS:
+ icsk->icsk_pending = 0;
+- tcp_retransmit_timer(sk);
++ if (is_meta_sk(sk))
++ mptcp_retransmit_timer(sk);
++ else
++ tcp_retransmit_timer(sk);
+ break;
+ case ICSK_TIME_PROBE0:
+ icsk->icsk_pending = 0;
+@@ -517,16 +532,19 @@ out:
+ static void tcp_write_timer(unsigned long data)
+ {
+ struct sock *sk = (struct sock *)data;
++ struct sock *meta_sk = tcp_sk(sk)->mpc ? mptcp_meta_sk(sk) : sk;
+
+- bh_lock_sock(sk);
+- if (!sock_owned_by_user(sk)) {
++ bh_lock_sock(meta_sk);
++ if (!sock_owned_by_user(meta_sk)) {
+ tcp_write_timer_handler(sk);
+ } else {
+ /* deleguate our work to tcp_release_cb() */
+ if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
+ sock_hold(sk);
++ if (tcp_sk(sk)->mpc)
++ mptcp_tsq_flags(sk);
+ }
+- bh_unlock_sock(sk);
++ bh_unlock_sock(meta_sk);
+ sock_put(sk);
+ }
+
+@@ -563,11 +581,12 @@ static void tcp_keepalive_timer (unsigned long data)
+ struct sock *sk = (struct sock *) data;
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
++ struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
+ u32 elapsed;
+
+ /* Only process if socket is not in use. */
+- bh_lock_sock(sk);
+- if (sock_owned_by_user(sk)) {
++ bh_lock_sock(meta_sk);
++ if (sock_owned_by_user(meta_sk)) {
+ /* Try again later. */
+ inet_csk_reset_keepalive_timer (sk, HZ/20);
+ goto out;
+@@ -578,6 +597,29 @@ static void tcp_keepalive_timer (unsigned long data)
+ goto out;
+ }
+
++ if (tp->send_mp_fclose) {
++ /* MUST do this before tcp_write_timeout, because retrans_stamp
++ * may have been set to 0 in another part while we are
++ * retransmitting MP_FASTCLOSE. Then, we would crash, because
++ * retransmits_timed_out accesses the meta-write-queue.
++ *
++ * We make sure that the timestamp is != 0.
++ */
++ if (!tp->retrans_stamp)
++ tp->retrans_stamp = tcp_time_stamp ? : 1;
++
++ if (tcp_write_timeout(sk))
++ goto out;
++
++ tcp_send_ack(sk);
++ icsk->icsk_backoff++;
++ icsk->icsk_retransmits++;
++
++ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
++ elapsed = icsk->icsk_rto;
++ goto resched;
++ }
++
+ if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
+ if (tp->linger2 >= 0) {
+ const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
+@@ -639,7 +681,7 @@ death:
+ tcp_done(sk);
+
+ out:
+- bh_unlock_sock(sk);
++ bh_unlock_sock(meta_sk);
+ sock_put(sk);
+ }
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 6c7fa08..733d602 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -765,6 +765,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
+
+ kfree_rcu(ifp, rcu);
+ }
++EXPORT_SYMBOL(inet6_ifa_finish_destroy);
+
+ static void
+ ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index d935889..9f0fd80 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -97,8 +97,7 @@ static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
+ return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
+ }
+
+-static int inet6_create(struct net *net, struct socket *sock, int protocol,
+- int kern)
++int inet6_create(struct net *net, struct socket *sock, int protocol, int kern)
+ {
+ struct inet_sock *inet;
+ struct ipv6_pinfo *np;
+diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
+index c913818..2f5b4c5 100644
+--- a/net/ipv6/inet6_connection_sock.c
++++ b/net/ipv6/inet6_connection_sock.c
+@@ -96,8 +96,8 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
+ /*
+ * request_sock (formerly open request) hash tables.
+ */
+-static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
+- const u32 rnd, const u32 synq_hsize)
++u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
++ const u32 rnd, const u32 synq_hsize)
+ {
+ u32 c;
+
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index bb53a5e7..0d29995 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -181,7 +181,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+
+ /* check for timestamp cookie support */
+ memset(&tcp_opt, 0, sizeof(tcp_opt));
+- tcp_parse_options(skb, &tcp_opt, 0, NULL);
++ tcp_parse_options(skb, &tcp_opt, NULL, 0, NULL);
+
+ if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
+ goto out;
+@@ -253,10 +253,10 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ }
+
+ req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
+- tcp_select_initial_window(tcp_full_space(sk), req->mss,
++ tp->select_initial_window(tcp_full_space(sk), req->mss,
+ &req->rcv_wnd, &req->window_clamp,
+ ireq->wscale_ok, &rcv_wscale,
+- dst_metric(dst, RTAX_INITRWND));
++ dst_metric(dst, RTAX_INITRWND), sk);
+
+ ireq->rcv_wscale = rcv_wscale;
+
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 889079b..d7f8b5f 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -63,6 +63,8 @@
+ #include <net/inet_common.h>
+ #include <net/secure_seq.h>
+ #include <net/tcp_memcontrol.h>
++#include <net/mptcp.h>
++#include <net/mptcp_v6.h>
+ #include <net/busy_poll.h>
+
+ #include <asm/uaccess.h>
+@@ -73,14 +75,6 @@
+ #include <linux/crypto.h>
+ #include <linux/scatterlist.h>
+
+-static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
+-static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+- struct request_sock *req);
+-
+-static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
+-
+-static const struct inet_connection_sock_af_ops ipv6_mapped;
+-static const struct inet_connection_sock_af_ops ipv6_specific;
+ #ifdef CONFIG_TCP_MD5SIG
+ static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
+ static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
+@@ -92,7 +86,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
+ }
+ #endif
+
+-static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
++void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+ {
+ struct dst_entry *dst = skb_dst(skb);
+ const struct rt6_info *rt = (const struct rt6_info *)dst;
+@@ -104,7 +98,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+ inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
+ }
+
+-static void tcp_v6_hash(struct sock *sk)
++void tcp_v6_hash(struct sock *sk)
+ {
+ if (sk->sk_state != TCP_CLOSE) {
+ if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
+@@ -117,7 +111,7 @@ static void tcp_v6_hash(struct sock *sk)
+ }
+ }
+
+-static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
++__u32 tcp_v6_init_sequence(const struct sk_buff *skb)
+ {
+ return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
+ ipv6_hdr(skb)->saddr.s6_addr32,
+@@ -125,7 +119,7 @@ static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
+ tcp_hdr(skb)->source);
+ }
+
+-static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
++int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ int addr_len)
+ {
+ struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
+@@ -339,7 +333,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
+ const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
+ struct ipv6_pinfo *np;
+- struct sock *sk;
++ struct sock *sk, *meta_sk;
+ int err;
+ struct tcp_sock *tp;
+ __u32 seq;
+@@ -359,8 +353,14 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ return;
+ }
+
+- bh_lock_sock(sk);
+- if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
++ tp = tcp_sk(sk);
++ if (tp->mpc)
++ meta_sk = mptcp_meta_sk(sk);
++ else
++ meta_sk = sk;
++
++ bh_lock_sock(meta_sk);
++ if (sock_owned_by_user(meta_sk) && type != ICMPV6_PKT_TOOBIG)
+ NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+
+ if (sk->sk_state == TCP_CLOSE)
+@@ -371,7 +371,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ goto out;
+ }
+
+- tp = tcp_sk(sk);
+ seq = ntohl(th->seq);
+ if (sk->sk_state != TCP_LISTEN &&
+ !between(seq, tp->snd_una, tp->snd_nxt)) {
+@@ -401,11 +400,15 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ goto out;
+
+ tp->mtu_info = ntohl(info);
+- if (!sock_owned_by_user(sk))
++ if (!sock_owned_by_user(meta_sk))
+ tcp_v6_mtu_reduced(sk);
+- else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
++ else {
++ if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
+ &tp->tsq_flags))
+- sock_hold(sk);
++ sock_hold(sk);
++ if (tp->mpc)
++ mptcp_tsq_flags(sk);
++ }
+ goto out;
+ }
+
+@@ -415,7 +418,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ switch (sk->sk_state) {
+ struct request_sock *req, **prev;
+ case TCP_LISTEN:
+- if (sock_owned_by_user(sk))
++ if (sock_owned_by_user(meta_sk))
+ goto out;
+
+ req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
+@@ -440,7 +443,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ case TCP_SYN_SENT:
+ case TCP_SYN_RECV: /* Cannot happen.
+ It can, it SYNs are crossed. --ANK */
+- if (!sock_owned_by_user(sk)) {
++ if (!sock_owned_by_user(meta_sk)) {
+ sk->sk_err = err;
+ sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
+
+@@ -450,22 +453,22 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ goto out;
+ }
+
+- if (!sock_owned_by_user(sk) && np->recverr) {
++ if (!sock_owned_by_user(meta_sk) && np->recverr) {
+ sk->sk_err = err;
+ sk->sk_error_report(sk);
+ } else
+ sk->sk_err_soft = err;
+
+ out:
+- bh_unlock_sock(sk);
++ bh_unlock_sock(meta_sk);
+ sock_put(sk);
+ }
+
+
+-static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
+- struct flowi6 *fl6,
+- struct request_sock *req,
+- u16 queue_mapping)
++int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
++ struct flowi6 *fl6,
++ struct request_sock *req,
++ u16 queue_mapping)
+ {
+ struct inet_request_sock *ireq = inet_rsk(req);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+@@ -495,7 +498,7 @@ done:
+ return err;
+ }
+
+-static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
++int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
+ {
+ struct flowi6 fl6;
+ int res;
+@@ -506,7 +509,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
+ return res;
+ }
+
+-static void tcp_v6_reqsk_destructor(struct request_sock *req)
++void tcp_v6_reqsk_destructor(struct request_sock *req)
+ {
+ kfree_skb(inet_rsk(req)->pktopts);
+ }
+@@ -719,16 +722,16 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
+ };
+
+ #ifdef CONFIG_TCP_MD5SIG
+-static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
++const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
+ .md5_lookup = tcp_v6_reqsk_md5_lookup,
+ .calc_md5_hash = tcp_v6_md5_hash_skb,
+ };
+ #endif
+
+-static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
+- u32 tsval, u32 tsecr,
++static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack,
++ u32 data_ack, u32 win, u32 tsval, u32 tsecr,
+ struct tcp_md5sig_key *key, int rst, u8 tclass,
+- u32 label)
++ u32 label, int mptcp)
+ {
+ const struct tcphdr *th = tcp_hdr(skb);
+ struct tcphdr *t1;
+@@ -746,7 +749,10 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
+ if (key)
+ tot_len += TCPOLEN_MD5SIG_ALIGNED;
+ #endif
+-
++#ifdef CONFIG_MPTCP
++ if (mptcp)
++ tot_len += MPTCP_SUB_LEN_DSS + MPTCP_SUB_LEN_ACK;
++#endif
+ buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
+ GFP_ATOMIC);
+ if (buff == NULL)
+@@ -784,6 +790,17 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
+ tcp_v6_md5_hash_hdr((__u8 *)topt, key,
+ &ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr, t1);
++ topt += 4;
++ }
++#endif
++#ifdef CONFIG_MPTCP
++ if (mptcp) {
++ /* Construction of 32-bit data_ack */
++ *topt++ = htonl((TCPOPT_MPTCP << 24) |
++ ((MPTCP_SUB_LEN_DSS + MPTCP_SUB_LEN_ACK) << 16) |
++ (0x20 << 8) |
++ (0x01));
++ *topt++ = htonl(data_ack);
+ }
+ #endif
+
+@@ -821,7 +838,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
+ kfree_skb(buff);
+ }
+
+-static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
++void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
+ {
+ const struct tcphdr *th = tcp_hdr(skb);
+ u32 seq = 0, ack_seq = 0;
+@@ -876,7 +893,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
+ ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
+ (th->doff << 2);
+
+- tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, key, 1, 0, 0);
++ tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, 0, key, 1, 0, 0, 0);
+
+ #ifdef CONFIG_TCP_MD5SIG
+ release_sk1:
+@@ -887,40 +904,47 @@ release_sk1:
+ #endif
+ }
+
+-static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
++static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 data_ack,
+ u32 win, u32 tsval, u32 tsecr,
+- struct tcp_md5sig_key *key, u8 tclass,
+- u32 label)
++ struct tcp_md5sig_key *key, u8 tclass, u32 label,
++ int mptcp)
+ {
+- tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, key, 0, tclass,
+- label);
++ tcp_v6_send_response(skb, seq, ack, data_ack, win, tsval, tsecr, key, 0,
++ tclass, label, mptcp);
+ }
+
+ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ {
+ struct inet_timewait_sock *tw = inet_twsk(sk);
+ struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
++ u32 data_ack = 0;
++ int mptcp = 0;
+
++ if (tcptw->mptcp_tw && tcptw->mptcp_tw->meta_tw) {
++ data_ack = (u32)tcptw->mptcp_tw->rcv_nxt;
++ mptcp = 1;
++ }
+ tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
++ data_ack,
+ tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+ tcp_time_stamp + tcptw->tw_ts_offset,
+ tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
+- tw->tw_tclass, (tw->tw_flowlabel << 12));
++ tw->tw_tclass, (tw->tw_flowlabel << 12), mptcp);
+
+ inet_twsk_put(tw);
+ }
+
+-static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+- struct request_sock *req)
++void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
++ struct request_sock *req)
+ {
+ tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
+- req->rcv_wnd, tcp_time_stamp, req->ts_recent,
++ 0, req->rcv_wnd, tcp_time_stamp, req->ts_recent,
+ tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
+- 0, 0);
++ 0, 0, 0);
+ }
+
+
+-static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
++struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
+ {
+ struct request_sock *req, **prev;
+ const struct tcphdr *th = tcp_hdr(skb);
+@@ -939,7 +963,13 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
+
+ if (nsk) {
+ if (nsk->sk_state != TCP_TIME_WAIT) {
++ /* Don't lock again the meta-sk. It has been locked
++ * before mptcp_v6_do_rcv.
++ */
++ if (tcp_sk(nsk)->mpc && !is_meta_sk(sk))
++ bh_lock_sock(mptcp_meta_sk(nsk));
+ bh_lock_sock(nsk);
++
+ return nsk;
+ }
+ inet_twsk_put(inet_twsk(nsk));
+@@ -959,6 +989,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
+ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+ struct tcp_options_received tmp_opt;
++ struct mptcp_options_received mopt;
+ struct request_sock *req;
+ struct inet_request_sock *ireq;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+@@ -971,6 +1002,23 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ if (skb->protocol == htons(ETH_P_IP))
+ return tcp_v4_conn_request(sk, skb);
+
++ tcp_clear_options(&tmp_opt);
++ tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
++ tmp_opt.user_mss = tp->rx_opt.user_mss;
++ mptcp_init_mp_opt(&mopt);
++ tcp_parse_options(skb, &tmp_opt, &mopt, 0, NULL);
++
++#ifdef CONFIG_MPTCP
++ /*MPTCP structures not initialized, so return error */
++ if (mptcp_init_failed)
++ mptcp_init_mp_opt(&mopt);
++
++ if (mopt.is_mp_join)
++ return mptcp_do_join_short(skb, &mopt, &tmp_opt, sock_net(sk));
++ if (mopt.drop_me)
++ goto drop;
++#endif
++
+ if (!ipv6_unicast_destination(skb))
+ goto drop;
+
+@@ -986,7 +1034,22 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ goto drop;
+ }
+
+- req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
++#ifdef CONFIG_MPTCP
++ if (sysctl_mptcp_enabled == MPTCP_APP && !tp->mptcp_enabled)
++ mopt.saw_mpc = 0;
++ if (mopt.saw_mpc && !want_cookie) {
++ req = inet6_reqsk_alloc(&mptcp6_request_sock_ops);
++
++ if (req == NULL)
++ goto drop;
++
++ mptcp_rsk(req)->mpcb = NULL;
++ mptcp_rsk(req)->dss_csum = mopt.dss_csum;
++ mptcp_rsk(req)->collide_tk.pprev = NULL;
++ } else
++#endif
++ req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
++
+ if (req == NULL)
+ goto drop;
+
+@@ -994,17 +1057,15 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
+ #endif
+
+- tcp_clear_options(&tmp_opt);
+- tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
+- tmp_opt.user_mss = tp->rx_opt.user_mss;
+- tcp_parse_options(skb, &tmp_opt, 0, NULL);
+-
+ if (want_cookie && !tmp_opt.saw_tstamp)
+ tcp_clear_options(&tmp_opt);
+
+ tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
+ tcp_openreq_init(req, &tmp_opt, skb);
+
++ if (mopt.saw_mpc && !want_cookie)
++ mptcp_reqsk_new_mptcp(req, &tmp_opt, &mopt, skb);
++
+ ireq = inet_rsk(req);
+ ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+@@ -1094,9 +1155,9 @@ drop:
+ return 0; /* don't send reset */
+ }
+
+-static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+- struct request_sock *req,
+- struct dst_entry *dst)
++struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
++ struct request_sock *req,
++ struct dst_entry *dst)
+ {
+ struct inet_request_sock *ireq;
+ struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
+@@ -1317,7 +1378,7 @@ static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
+ * This is because we cannot sleep with the original spinlock
+ * held.
+ */
+-static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
++int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+ {
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct tcp_sock *tp;
+@@ -1339,6 +1400,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+ goto discard;
+ #endif
+
++ if (is_meta_sk(sk))
++ return mptcp_v6_do_rcv(sk, skb);
++
+ if (sk_filter(sk, skb))
+ goto discard;
+
+@@ -1460,7 +1524,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+ {
+ const struct tcphdr *th;
+ const struct ipv6hdr *hdr;
+- struct sock *sk;
++ struct sock *sk, *meta_sk = NULL;
+ int ret;
+ struct net *net = dev_net(skb->dev);
+
+@@ -1491,18 +1555,43 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+ TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+ skb->len - th->doff*4);
+ TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
++#ifdef CONFIG_MPTCP
++ TCP_SKB_CB(skb)->mptcp_flags = 0;
++ TCP_SKB_CB(skb)->dss_off = 0;
++#endif
+ TCP_SKB_CB(skb)->when = 0;
+ TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
+ TCP_SKB_CB(skb)->sacked = 0;
+
+ sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
+- if (!sk)
+- goto no_tcp_socket;
+
+ process:
+- if (sk->sk_state == TCP_TIME_WAIT)
++ if (sk && sk->sk_state == TCP_TIME_WAIT)
+ goto do_time_wait;
+
++#ifdef CONFIG_MPTCP
++ if (!sk && th->syn && !th->ack) {
++ int ret = mptcp_lookup_join(skb, NULL);
++
++ if (ret < 0) {
++ tcp_v6_send_reset(NULL, skb);
++ goto discard_it;
++ } else if (ret > 0) {
++ return 0;
++ }
++ }
++
++ /* Is there a pending request sock for this segment ? */
++ if ((!sk || sk->sk_state == TCP_LISTEN) && mptcp_check_req(skb, net)) {
++ if (sk)
++ sock_put(sk);
++ return 0;
++ }
++#endif
++
++ if (!sk)
++ goto no_tcp_socket;
++
+ if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+ goto discard_and_relse;
+@@ -1517,11 +1606,21 @@ process:
+ sk_mark_napi_id(sk, skb);
+ skb->dev = NULL;
+
+- bh_lock_sock_nested(sk);
++ if (tcp_sk(sk)->mpc) {
++ meta_sk = mptcp_meta_sk(sk);
++
++ bh_lock_sock_nested(meta_sk);
++ if (sock_owned_by_user(meta_sk))
++ skb->sk = sk;
++ } else {
++ meta_sk = sk;
++ bh_lock_sock_nested(sk);
++ }
++
+ ret = 0;
+- if (!sock_owned_by_user(sk)) {
++ if (!sock_owned_by_user(meta_sk)) {
+ #ifdef CONFIG_NET_DMA
+- struct tcp_sock *tp = tcp_sk(sk);
++ struct tcp_sock *tp = tcp_sk(meta_sk);
+ if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
+ tp->ucopy.dma_chan = net_dma_find_channel();
+ if (tp->ucopy.dma_chan)
+@@ -1529,16 +1628,17 @@ process:
+ else
+ #endif
+ {
+- if (!tcp_prequeue(sk, skb))
++ if (!tcp_prequeue(meta_sk, skb))
+ ret = tcp_v6_do_rcv(sk, skb);
+ }
+- } else if (unlikely(sk_add_backlog(sk, skb,
+- sk->sk_rcvbuf + sk->sk_sndbuf))) {
+- bh_unlock_sock(sk);
++ } else if (unlikely(sk_add_backlog(meta_sk, skb,
++ meta_sk->sk_rcvbuf + meta_sk->sk_sndbuf))) {
++ bh_unlock_sock(meta_sk);
+ NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
+ goto discard_and_relse;
+ }
+- bh_unlock_sock(sk);
++
++ bh_unlock_sock(meta_sk);
+
+ sock_put(sk);
+ return ret ? -1 : 0;
+@@ -1595,6 +1695,18 @@ do_time_wait:
+ sk = sk2;
+ goto process;
+ }
++#ifdef CONFIG_MPTCP
++ if (th->syn && !th->ack) {
++ int ret = mptcp_lookup_join(skb, inet_twsk(sk));
++
++ if (ret < 0) {
++ tcp_v6_send_reset(NULL, skb);
++ goto discard_it;
++ } else if (ret > 0) {
++ return 0;
++ }
++ }
++#endif
+ /* Fall through to ACK */
+ }
+ case TCP_TW_ACK:
+@@ -1644,13 +1756,13 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
+ }
+ }
+
+-static struct timewait_sock_ops tcp6_timewait_sock_ops = {
++struct timewait_sock_ops tcp6_timewait_sock_ops = {
+ .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
+ .twsk_unique = tcp_twsk_unique,
+ .twsk_destructor= tcp_twsk_destructor,
+ };
+
+-static const struct inet_connection_sock_af_ops ipv6_specific = {
++const struct inet_connection_sock_af_ops ipv6_specific = {
+ .queue_xmit = inet6_csk_xmit,
+ .send_check = tcp_v6_send_check,
+ .rebuild_header = inet6_sk_rebuild_header,
+@@ -1682,7 +1794,7 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
+ * TCP over IPv4 via INET6 API
+ */
+
+-static const struct inet_connection_sock_af_ops ipv6_mapped = {
++const struct inet_connection_sock_af_ops ipv6_mapped = {
+ .queue_xmit = ip_queue_xmit,
+ .send_check = tcp_v4_send_check,
+ .rebuild_header = inet_sk_rebuild_header,
+@@ -1727,7 +1839,7 @@ static int tcp_v6_init_sock(struct sock *sk)
+ return 0;
+ }
+
+-static void tcp_v6_destroy_sock(struct sock *sk)
++void tcp_v6_destroy_sock(struct sock *sk)
+ {
+ tcp_v4_destroy_sock(sk);
+ inet6_destroy_sock(sk);
+diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig
+new file mode 100644
+index 0000000..88a05b1
+--- /dev/null
++++ b/net/mptcp/Kconfig
+@@ -0,0 +1,58 @@
++#
++# MPTCP configuration
++#
++config MPTCP
++ bool "MPTCP protocol"
++ depends on (IPV6=y || IPV6=n)
++ ---help---
++ This replaces the normal TCP stack with a Multipath TCP stack,
++ able to use several paths at once.
++
++menuconfig MPTCP_PM_ADVANCED
++ bool "MPTCP: advanced path-manager control"
++ depends on MPTCP=y
++ ---help---
++ Support for selection of different path-managers. You should choose 'Y' here,
++ because otherwise you will not actively create new MPTCP-subflows.
++
++if MPTCP_PM_ADVANCED
++
++config MPTCP_FULLMESH
++ tristate "MPTCP Full-Mesh Path-Manager"
++ depends on MPTCP=y
++ ---help---
++ This path-management module will create a full-mesh among all IP-addresses.
++
++config MPTCP_NDIFFPORTS
++ tristate "MPTCP ndiff-ports"
++ depends on MPTCP=y
++ ---help---
++ This path-management module will create multiple subflows between the same
++ pair of IP-addresses, modifying the source-port. You can set the number
++ of subflows via the mptcp_ndiffports-sysctl.
++
++choice
++ prompt "Default MPTCP Path-Manager"
++ default DEFAULT
++ help
++ Select the Path-Manager of your choice
++
++ config DEFAULT_FULLMESH
++ bool "Full mesh" if MPTCP_FULLMESH=y
++
++ config DEFAULT_NDIFFPORTS
++ bool "ndiff-ports" if MPTCP_NDIFFPORTS=y
++
++ config DEFAULT_DUMMY
++ bool "Default"
++
++endchoice
++
++endif
++
++config DEFAULT_MPTCP_PM
++ string
++ default "default" if DEFAULT_DUMMY
++ default "fullmesh" if DEFAULT_FULLMESH
++ default "ndiffports" if DEFAULT_NDIFFPORTS
++ default "default"
+diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile
+new file mode 100644
+index 0000000..e7238f5
+--- /dev/null
++++ b/net/mptcp/Makefile
+@@ -0,0 +1,18 @@
++#
++## Makefile for MultiPath TCP support code.
++#
++#
++
++obj-$(CONFIG_MPTCP) += mptcp.o
++
++mptcp-y := mptcp_ctrl.o mptcp_ipv4.o mptcp_ofo_queue.o mptcp_pm.o \
++ mptcp_output.o mptcp_input.o
++
++obj-$(CONFIG_TCP_CONG_COUPLED) += mptcp_coupled.o
++obj-$(CONFIG_TCP_CONG_OLIA) += mptcp_olia.o
++obj-$(CONFIG_TCP_CONG_WVEGAS) += mptcp_wvegas.o
++obj-$(CONFIG_MPTCP_FULLMESH) += mptcp_fullmesh.o
++obj-$(CONFIG_MPTCP_NDIFFPORTS) += mptcp_ndiffports.o
++
++mptcp-$(subst m,y,$(CONFIG_IPV6)) += mptcp_ipv6.o
++
+diff --git a/net/mptcp/mptcp_coupled.c b/net/mptcp/mptcp_coupled.c
+new file mode 100644
+index 0000000..d71f96e
+--- /dev/null
++++ b/net/mptcp/mptcp_coupled.c
+@@ -0,0 +1,273 @@
++/*
++ * MPTCP implementation - Coupled Congestion Control
++ *
++ * Initial Design & Implementation:
++ * Sébastien Barré <sebastien.barre@uclouvain.be>
++ *
++ * Current Maintainer & Author:
++ * Christoph Paasch <christoph.paasch@uclouvain.be>
++ *
++ * Additional authors:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ * Gregory Detal <gregory.detal@uclouvain.be>
++ * Fabien Duchêne <fabien.duchene@uclouvain.be>
++ * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de>
++ * Lavkesh Lahngir <lavkesh51@gmail.com>
++ * Andreas Ripke <ripke@neclab.eu>
++ * Vlad Dogaru <vlad.dogaru@intel.com>
++ * Octavian Purdila <octavian.purdila@intel.com>
++ * John Ronan <jronan@tssg.org>
++ * Catalin Nicutar <catalin.nicutar@gmail.com>
++ * Brandon Heller <brandonh@stanford.edu>
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++#include <net/tcp.h>
++#include <net/mptcp.h>
++
++#include <linux/module.h>
++
++/* Scaling is done in the numerator with alpha_scale_num and in the denominator
++ * with alpha_scale_den.
++ *
++ * To downscale, we just need to use alpha_scale.
++ *
++ * We have: alpha_scale = alpha_scale_num / (alpha_scale_den ^ 2)
++ */
++static int alpha_scale_den = 10;
++static int alpha_scale_num = 32;
++static int alpha_scale = 12;
++
++struct mptcp_ccc {
++ u64 alpha;
++ bool forced_update;
++};
++
++static inline int mptcp_ccc_sk_can_send(const struct sock *sk)
++{
++ return mptcp_sk_can_send(sk) && tcp_sk(sk)->srtt;
++}
++
++static inline u64 mptcp_get_alpha(struct sock *meta_sk)
++{
++ struct mptcp_ccc *mptcp_ccc = inet_csk_ca(meta_sk);
++ return mptcp_ccc->alpha;
++}
++
++static inline void mptcp_set_alpha(struct sock *meta_sk, u64 alpha)
++{
++ struct mptcp_ccc *mptcp_ccc = inet_csk_ca(meta_sk);
++ mptcp_ccc->alpha = alpha;
++}
++
++static inline u64 mptcp_ccc_scale(u32 val, int scale)
++{
++ return (u64) val << scale;
++}
++
++static inline bool mptcp_get_forced(struct sock *meta_sk)
++{
++ struct mptcp_ccc *mptcp_ccc = inet_csk_ca(meta_sk);
++ return mptcp_ccc->forced_update;
++}
++
++static inline void mptcp_set_forced(struct sock *meta_sk, bool force)
++{
++ struct mptcp_ccc *mptcp_ccc = inet_csk_ca(meta_sk);
++ mptcp_ccc->forced_update = force;
++}
++
++static void mptcp_ccc_recalc_alpha(struct sock *sk)
++{
++ struct mptcp_cb *mpcb = tcp_sk(sk)->mpcb;
++ struct sock *sub_sk;
++ int best_cwnd = 0, best_rtt = 0, can_send = 0;
++ u64 max_numerator = 0, sum_denominator = 0, alpha = 1;
++
++ if (!mpcb)
++ return;
++
++ /* Only one subflow left - fall back to normal reno-behavior
++ * (set alpha to 1) */
++ if (mpcb->cnt_established <= 1)
++ goto exit;
++
++ /* Do regular alpha-calculation for multiple subflows */
++
++ /* Find the max numerator of the alpha-calculation */
++ mptcp_for_each_sk(mpcb, sub_sk) {
++ struct tcp_sock *sub_tp = tcp_sk(sub_sk);
++ u64 tmp;
++
++ if (!mptcp_ccc_sk_can_send(sub_sk))
++ continue;
++
++ can_send++;
++
++ /* We need to look for the path, that provides the max-value.
++ * Integer-overflow is not possible here, because
++ * tmp will be in u64.
++ */
++ tmp = div64_u64(mptcp_ccc_scale(sub_tp->snd_cwnd,
++ alpha_scale_num), (u64)sub_tp->srtt * sub_tp->srtt);
++
++ if (tmp >= max_numerator) {
++ max_numerator = tmp;
++ best_cwnd = sub_tp->snd_cwnd;
++ best_rtt = sub_tp->srtt;
++ }
++ }
++
++ /* No subflow is able to send - we don't care anymore */
++ if (unlikely(!can_send))
++ goto exit;
++
++ /* Calculate the denominator */
++ mptcp_for_each_sk(mpcb, sub_sk) {
++ struct tcp_sock *sub_tp = tcp_sk(sub_sk);
++
++ if (!mptcp_ccc_sk_can_send(sub_sk))
++ continue;
++
++ sum_denominator += div_u64(
++ mptcp_ccc_scale(sub_tp->snd_cwnd,
++ alpha_scale_den) * best_rtt,
++ sub_tp->srtt);
++ }
++ sum_denominator *= sum_denominator;
++ if (unlikely(!sum_denominator)) {
++ pr_err("%s: sum_denominator == 0, cnt_established:%d\n",
++ __func__, mpcb->cnt_established);
++ mptcp_for_each_sk(mpcb, sub_sk) {
++ struct tcp_sock *sub_tp = tcp_sk(sub_sk);
++ pr_err("%s: pi:%d, state:%d\n, rtt:%u, cwnd: %u",
++ __func__, sub_tp->mptcp->path_index,
++ sub_sk->sk_state, sub_tp->srtt,
++ sub_tp->snd_cwnd);
++ }
++ }
++
++ alpha = div64_u64(mptcp_ccc_scale(best_cwnd, alpha_scale_num), sum_denominator);
++
++ if (unlikely(!alpha))
++ alpha = 1;
++
++exit:
++ mptcp_set_alpha(mptcp_meta_sk(sk), alpha);
++}
++
++static void mptcp_ccc_init(struct sock *sk)
++{
++ if (tcp_sk(sk)->mpc) {
++ mptcp_set_forced(mptcp_meta_sk(sk), 0);
++ mptcp_set_alpha(mptcp_meta_sk(sk), 1);
++ }
++ /* If we do not mptcp, behave like reno: return */
++}
++
++static void mptcp_ccc_cwnd_event(struct sock *sk, enum tcp_ca_event event)
++{
++ if (event == CA_EVENT_LOSS)
++ mptcp_ccc_recalc_alpha(sk);
++}
++
++static void mptcp_ccc_set_state(struct sock *sk, u8 ca_state)
++{
++ if (!tcp_sk(sk)->mpc)
++ return;
++
++ mptcp_set_forced(mptcp_meta_sk(sk), 1);
++}
++
++static void mptcp_ccc_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct mptcp_cb *mpcb = tp->mpcb;
++ int snd_cwnd;
++
++ if (!tp->mpc) {
++ tcp_reno_cong_avoid(sk, ack, acked, in_flight);
++ return;
++ }
++
++ if (!tcp_is_cwnd_limited(sk, in_flight))
++ return;
++
++ if (tp->snd_cwnd <= tp->snd_ssthresh) {
++ /* In "safe" area, increase. */
++ tcp_slow_start(tp, acked);
++ mptcp_ccc_recalc_alpha(sk);
++ return;
++ }
++
++ if (mptcp_get_forced(mptcp_meta_sk(sk))) {
++ mptcp_ccc_recalc_alpha(sk);
++ mptcp_set_forced(mptcp_meta_sk(sk), 0);
++ }
++
++ if (mpcb->cnt_established > 1) {
++ u64 alpha = mptcp_get_alpha(mptcp_meta_sk(sk));
++
++ /* This may happen, if at the initialization, the mpcb
++ * was not yet attached to the sock, and thus
++ * initializing alpha failed.
++ */
++ if (unlikely(!alpha))
++ alpha = 1;
++
++ snd_cwnd = (int) div_u64 ((u64) mptcp_ccc_scale(1, alpha_scale),
++ alpha);
++
++ /* snd_cwnd_cnt >= max (scale * tot_cwnd / alpha, cwnd)
++ * Thus, we select here the max value. */
++ if (snd_cwnd < tp->snd_cwnd)
++ snd_cwnd = tp->snd_cwnd;
++ } else {
++ snd_cwnd = tp->snd_cwnd;
++ }
++
++ if (tp->snd_cwnd_cnt >= snd_cwnd) {
++ if (tp->snd_cwnd < tp->snd_cwnd_clamp) {
++ tp->snd_cwnd++;
++ mptcp_ccc_recalc_alpha(sk);
++ }
++
++ tp->snd_cwnd_cnt = 0;
++ } else {
++ tp->snd_cwnd_cnt++;
++ }
++}
++
++static struct tcp_congestion_ops mptcp_ccc = {
++ .init = mptcp_ccc_init,
++ .ssthresh = tcp_reno_ssthresh,
++ .cong_avoid = mptcp_ccc_cong_avoid,
++ .cwnd_event = mptcp_ccc_cwnd_event,
++ .set_state = mptcp_ccc_set_state,
++ .min_cwnd = tcp_reno_min_cwnd,
++ .owner = THIS_MODULE,
++ .name = "coupled",
++};
++
++static int __init mptcp_ccc_register(void)
++{
++ BUILD_BUG_ON(sizeof(struct mptcp_ccc) > ICSK_CA_PRIV_SIZE);
++ return tcp_register_congestion_control(&mptcp_ccc);
++}
++
++static void __exit mptcp_ccc_unregister(void)
++{
++ tcp_unregister_congestion_control(&mptcp_ccc);
++}
++
++module_init(mptcp_ccc_register);
++module_exit(mptcp_ccc_unregister);
++
++MODULE_AUTHOR("Christoph Paasch, Sébastien Barré");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("MPTCP COUPLED CONGESTION CONTROL");
++MODULE_VERSION("0.1");
+diff --git a/net/mptcp/mptcp_ctrl.c b/net/mptcp/mptcp_ctrl.c
+new file mode 100644
+index 0000000..6a7654d
+--- /dev/null
++++ b/net/mptcp/mptcp_ctrl.c
+@@ -0,0 +1,2270 @@
++/*
++ * MPTCP implementation - MPTCP-control
++ *
++ * Initial Design & Implementation:
++ * Sébastien Barré <sebastien.barre@uclouvain.be>
++ *
++ * Current Maintainer & Author:
++ * Christoph Paasch <christoph.paasch@uclouvain.be>
++ *
++ * Additional authors:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ * Gregory Detal <gregory.detal@uclouvain.be>
++ * Fabien Duchêne <fabien.duchene@uclouvain.be>
++ * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de>
++ * Lavkesh Lahngir <lavkesh51@gmail.com>
++ * Andreas Ripke <ripke@neclab.eu>
++ * Vlad Dogaru <vlad.dogaru@intel.com>
++ * Octavian Purdila <octavian.purdila@intel.com>
++ * John Ronan <jronan@tssg.org>
++ * Catalin Nicutar <catalin.nicutar@gmail.com>
++ * Brandon Heller <brandonh@stanford.edu>
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <net/inet_common.h>
++#include <net/inet6_hashtables.h>
++#include <net/ipv6.h>
++#include <net/ip6_checksum.h>
++#include <net/mptcp.h>
++#include <net/mptcp_v4.h>
++#if IS_ENABLED(CONFIG_IPV6)
++#include <net/mptcp_v6.h>
++#endif
++#include <net/sock.h>
++#include <net/tcp.h>
++#include <net/tcp_states.h>
++#include <net/transp_v6.h>
++#include <net/xfrm.h>
++
++#include <linux/cryptohash.h>
++#include <linux/kconfig.h>
++#include <linux/module.h>
++#include <linux/netpoll.h>
++#include <linux/list.h>
++#include <linux/jhash.h>
++#include <linux/tcp.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/random.h>
++#include <linux/inetdevice.h>
++#include <linux/workqueue.h>
++#include <linux/atomic.h>
++#include <linux/sysctl.h>
++
++static struct kmem_cache *mptcp_sock_cache __read_mostly;
++static struct kmem_cache *mptcp_cb_cache __read_mostly;
++static struct kmem_cache *mptcp_tw_cache __read_mostly;
++
++int sysctl_mptcp_enabled __read_mostly = 1;
++int sysctl_mptcp_checksum __read_mostly = 1;
++int sysctl_mptcp_debug __read_mostly;
++EXPORT_SYMBOL(sysctl_mptcp_debug);
++int sysctl_mptcp_syn_retries __read_mostly = 3;
++
++bool mptcp_init_failed __read_mostly;
++
++static int proc_mptcp_path_manager(ctl_table *ctl, int write,
++ void __user *buffer, size_t *lenp,
++ loff_t *ppos)
++{
++ char val[MPTCP_PM_NAME_MAX];
++ ctl_table tbl = {
++ .data = val,
++ .maxlen = MPTCP_PM_NAME_MAX,
++ };
++ int ret;
++
++ mptcp_get_default_path_manager(val);
++
++ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
++ if (write && ret == 0)
++ ret = mptcp_set_default_path_manager(val);
++ return ret;
++}
++
++static struct ctl_table mptcp_table[] = {
++ {
++ .procname = "mptcp_enabled",
++ .data = &sysctl_mptcp_enabled,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++ {
++ .procname = "mptcp_checksum",
++ .data = &sysctl_mptcp_checksum,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++ {
++ .procname = "mptcp_debug",
++ .data = &sysctl_mptcp_debug,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++ {
++ .procname = "mptcp_syn_retries",
++ .data = &sysctl_mptcp_syn_retries,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++ {
++ .procname = "mptcp_path_manager",
++ .mode = 0644,
++ .maxlen = MPTCP_PM_NAME_MAX,
++ .proc_handler = proc_mptcp_path_manager,
++ },
++ { }
++};
++
++static inline u32 mptcp_hash_tk(u32 token)
++{
++ return token % MPTCP_HASH_SIZE;
++}
++
++struct hlist_nulls_head tk_hashtable[MPTCP_HASH_SIZE];
++EXPORT_SYMBOL(tk_hashtable);
++
++/* This second hashtable is needed to retrieve request socks
++ * created as a result of a join request. While the SYN contains
++ * the token, the final ack does not, so we need a separate hashtable
++ * to retrieve the mpcb.
++ */
++struct list_head mptcp_reqsk_htb[MPTCP_HASH_SIZE];
++spinlock_t mptcp_reqsk_hlock; /* hashtable protection */
++
++/* The following hash table is used to avoid collision of token */
++static struct hlist_nulls_head mptcp_reqsk_tk_htb[MPTCP_HASH_SIZE];
++spinlock_t mptcp_tk_hashlock; /* hashtable protection */
++
++static int mptcp_reqsk_find_tk(u32 token)
++{
++ u32 hash = mptcp_hash_tk(token);
++ struct mptcp_request_sock *mtreqsk;
++ const struct hlist_nulls_node *node;
++
++ hlist_nulls_for_each_entry_rcu(mtreqsk, node,
++ &mptcp_reqsk_tk_htb[hash], collide_tk) {
++ if (token == mtreqsk->mptcp_loc_token)
++ return 1;
++ }
++ return 0;
++}
++
++static void mptcp_reqsk_insert_tk(struct request_sock *reqsk, u32 token)
++{
++ u32 hash = mptcp_hash_tk(token);
++
++ hlist_nulls_add_head_rcu(&mptcp_rsk(reqsk)->collide_tk,
++ &mptcp_reqsk_tk_htb[hash]);
++}
++
++static void mptcp_reqsk_remove_tk(struct request_sock *reqsk)
++{
++ rcu_read_lock();
++ spin_lock(&mptcp_tk_hashlock);
++ hlist_nulls_del_init_rcu(&mptcp_rsk(reqsk)->collide_tk);
++ spin_unlock(&mptcp_tk_hashlock);
++ rcu_read_unlock();
++}
++
++void mptcp_reqsk_destructor(struct request_sock *req)
++{
++ if (!mptcp_rsk(req)->mpcb) {
++ if (in_softirq()) {
++ mptcp_reqsk_remove_tk(req);
++ } else {
++ rcu_read_lock_bh();
++ spin_lock(&mptcp_tk_hashlock);
++ hlist_nulls_del_init_rcu(&mptcp_rsk(req)->collide_tk);
++ spin_unlock(&mptcp_tk_hashlock);
++ rcu_read_unlock_bh();
++ }
++ } else {
++ mptcp_hash_request_remove(req);
++ }
++}
++
++static void __mptcp_hash_insert(struct tcp_sock *meta_tp, u32 token)
++{
++ u32 hash = mptcp_hash_tk(token);
++ hlist_nulls_add_head_rcu(&meta_tp->tk_table, &tk_hashtable[hash]);
++ meta_tp->inside_tk_table = 1;
++}
++
++static int mptcp_find_token(u32 token)
++{
++ u32 hash = mptcp_hash_tk(token);
++ struct tcp_sock *meta_tp;
++ const struct hlist_nulls_node *node;
++
++ hlist_nulls_for_each_entry_rcu(meta_tp, node, &tk_hashtable[hash], tk_table) {
++ if (token == meta_tp->mptcp_loc_token)
++ return 1;
++ }
++ return 0;
++}
++
++static void mptcp_set_key_reqsk(struct request_sock *req,
++ const struct sk_buff *skb)
++{
++ struct inet_request_sock *ireq = inet_rsk(req);
++ struct mptcp_request_sock *mtreq = mptcp_rsk(req);
++
++ if (skb->protocol == htons(ETH_P_IP)) {
++ mtreq->mptcp_loc_key = mptcp_v4_get_key(ip_hdr(skb)->saddr,
++ ip_hdr(skb)->daddr,
++ htons(ireq->ir_num),
++ ireq->ir_rmt_port);
++#if IS_ENABLED(CONFIG_IPV6)
++ } else {
++ mtreq->mptcp_loc_key = mptcp_v6_get_key(ipv6_hdr(skb)->saddr.s6_addr32,
++ ipv6_hdr(skb)->daddr.s6_addr32,
++ htons(ireq->ir_num),
++ ireq->ir_rmt_port);
++#endif
++ }
++
++ mptcp_key_sha1(mtreq->mptcp_loc_key, &mtreq->mptcp_loc_token, NULL);
++}
++
++/* New MPTCP-connection request, prepare a new token for the meta-socket that
++ * will be created in mptcp_check_req_master(), and store the received token.
++ */
++void mptcp_reqsk_new_mptcp(struct request_sock *req,
++ const struct tcp_options_received *rx_opt,
++ const struct mptcp_options_received *mopt,
++ const struct sk_buff *skb)
++{
++ struct mptcp_request_sock *mtreq = mptcp_rsk(req);
++
++ tcp_rsk(req)->saw_mpc = 1;
++
++ rcu_read_lock();
++ spin_lock(&mptcp_tk_hashlock);
++ do {
++ mptcp_set_key_reqsk(req, skb);
++ } while (mptcp_reqsk_find_tk(mtreq->mptcp_loc_token) ||
++ mptcp_find_token(mtreq->mptcp_loc_token));
++
++ mptcp_reqsk_insert_tk(req, mtreq->mptcp_loc_token);
++ spin_unlock(&mptcp_tk_hashlock);
++ rcu_read_unlock();
++ mtreq->mptcp_rem_key = mopt->mptcp_key;
++}
++
++static void mptcp_set_key_sk(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct inet_sock *isk = inet_sk(sk);
++
++ if (sk->sk_family == AF_INET)
++ tp->mptcp_loc_key = mptcp_v4_get_key(isk->inet_saddr,
++ isk->inet_daddr,
++ isk->inet_sport,
++ isk->inet_dport);
++#if IS_ENABLED(CONFIG_IPV6)
++ else
++ tp->mptcp_loc_key = mptcp_v6_get_key(inet6_sk(sk)->saddr.s6_addr32,
++ sk->sk_v6_daddr.s6_addr32,
++ isk->inet_sport,
++ isk->inet_dport);
++#endif
++
++ mptcp_key_sha1(tp->mptcp_loc_key,
++ &tp->mptcp_loc_token, NULL);
++}
++
++void mptcp_connect_init(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ rcu_read_lock_bh();
++ spin_lock(&mptcp_tk_hashlock);
++ do {
++ mptcp_set_key_sk(sk);
++ } while (mptcp_reqsk_find_tk(tp->mptcp_loc_token) ||
++ mptcp_find_token(tp->mptcp_loc_token));
++
++ __mptcp_hash_insert(tp, tp->mptcp_loc_token);
++ spin_unlock(&mptcp_tk_hashlock);
++ rcu_read_unlock_bh();
++}
++
++/**
++ * This function increments the refcount of the mpcb struct.
++ * It is the responsibility of the caller to decrement when releasing
++ * the structure.
++ */
++struct sock *mptcp_hash_find(struct net *net, u32 token)
++{
++ u32 hash = mptcp_hash_tk(token);
++ struct tcp_sock *meta_tp;
++ struct sock *meta_sk = NULL;
++ struct hlist_nulls_node *node;
++
++ rcu_read_lock();
++ hlist_nulls_for_each_entry_rcu(meta_tp, node, &tk_hashtable[hash],
++ tk_table) {
++ meta_sk = (struct sock *)meta_tp;
++ if (token == meta_tp->mptcp_loc_token &&
++ net_eq(net, sock_net(meta_sk)) &&
++ atomic_inc_not_zero(&meta_sk->sk_refcnt))
++ break;
++ meta_sk = NULL;
++ }
++ rcu_read_unlock();
++ return meta_sk;
++}
++
++void mptcp_hash_remove_bh(struct tcp_sock *meta_tp)
++{
++ /* remove from the token hashtable */
++ rcu_read_lock_bh();
++ spin_lock(&mptcp_tk_hashlock);
++ hlist_nulls_del_init_rcu(&meta_tp->tk_table);
++ meta_tp->inside_tk_table = 0;
++ spin_unlock(&mptcp_tk_hashlock);
++ rcu_read_unlock_bh();
++}
++
++void mptcp_hash_remove(struct tcp_sock *meta_tp)
++{
++ rcu_read_lock();
++ spin_lock(&mptcp_tk_hashlock);
++ hlist_nulls_del_init_rcu(&meta_tp->tk_table);
++ meta_tp->inside_tk_table = 0;
++ spin_unlock(&mptcp_tk_hashlock);
++ rcu_read_unlock();
++}
++
++static struct sock *mptcp_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
++ struct request_sock *req,
++ struct dst_entry *dst)
++{
++#if IS_ENABLED(CONFIG_IPV6)
++ if (sk->sk_family == AF_INET6)
++ return tcp_v6_syn_recv_sock(sk, skb, req, dst);
++
++ /* sk->sk_family == AF_INET */
++ if (req->rsk_ops->family == AF_INET6)
++ return mptcp_v6v4_syn_recv_sock(sk, skb, req, dst);
++#endif
++
++ /* sk->sk_family == AF_INET && req->rsk_ops->family == AF_INET */
++ return tcp_v4_syn_recv_sock(sk, skb, req, dst);
++}
++
++struct sock *mptcp_select_ack_sock(const struct sock *meta_sk, int copied)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct sock *sk, *subsk = NULL;
++ u32 max_data_seq = 0;
++ /* max_data_seq initialized to correct compiler-warning.
++ * But the initialization is handled by max_data_seq_set
++ */
++ short max_data_seq_set = 0;
++ u32 min_time = 0xffffffff;
++
++ /* How do we select the subflow to send the window-update on?
++ *
++ * 1. He has to be in a state where he can send an ack and is
++ * operational (pf = 0).
++ * 2. He has to be one of those subflow who recently
++ * contributed to the received stream
++ * (this guarantees a working subflow)
++ * a) its latest data_seq received is after the original
++ * copied_seq.
++ * We select the one with the lowest rtt, so that the
++ * window-update reaches our peer the fastest.
++ * b) if no subflow has this kind of data_seq (e.g., very
++ * strange meta-level retransmissions going on), we take
++ * the subflow who last sent the highest data_seq.
++ */
++ mptcp_for_each_sk(meta_tp->mpcb, sk) {
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ if (!mptcp_sk_can_send_ack(sk) || tp->pf)
++ continue;
++
++ /* Select among those who contributed to the
++ * current receive-queue.
++ */
++ if (copied && after(tp->mptcp->last_data_seq, meta_tp->copied_seq - copied)) {
++ if (tp->srtt < min_time) {
++ min_time = tp->srtt;
++ subsk = sk;
++ max_data_seq_set = 0;
++ }
++ continue;
++ }
++
++ if (!subsk && !max_data_seq_set) {
++ max_data_seq = tp->mptcp->last_data_seq;
++ max_data_seq_set = 1;
++ subsk = sk;
++ }
++
++ /* Otherwise, take the one with the highest data_seq */
++ if ((!subsk || max_data_seq_set) &&
++ after(tp->mptcp->last_data_seq, max_data_seq)) {
++ max_data_seq = tp->mptcp->last_data_seq;
++ subsk = sk;
++ }
++ }
++
++ if (!subsk) {
++ mptcp_debug("%s subsk is null, copied %d, cseq %u\n", __func__,
++ copied, meta_tp->copied_seq);
++ mptcp_for_each_sk(meta_tp->mpcb, sk) {
++ struct tcp_sock *tp = tcp_sk(sk);
++ mptcp_debug("%s pi %d state %u last_dseq %u\n",
++ __func__, tp->mptcp->path_index, sk->sk_state,
++ tp->mptcp->last_data_seq);
++ }
++ }
++
++ return subsk;
++}
++EXPORT_SYMBOL(mptcp_select_ack_sock);
++
++static void mptcp_sock_def_error_report(struct sock *sk)
++{
++ struct mptcp_cb *mpcb = tcp_sk(sk)->mpcb;
++
++ if (!sock_flag(sk, SOCK_DEAD))
++ mptcp_sub_close(sk, 0);
++
++ if (mpcb->infinite_mapping_rcv || mpcb->infinite_mapping_snd ||
++ mpcb->send_infinite_mapping) {
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++
++ meta_sk->sk_err = sk->sk_err;
++ meta_sk->sk_err_soft = sk->sk_err_soft;
++
++ if (!sock_flag(meta_sk, SOCK_DEAD))
++ meta_sk->sk_error_report(meta_sk);
++
++ tcp_done(meta_sk);
++ }
++
++ sk->sk_err = 0;
++ return;
++}
++
++static void mptcp_mpcb_put(struct mptcp_cb *mpcb)
++{
++ if (atomic_dec_and_test(&mpcb->mpcb_refcnt)) {
++ mptcp_cleanup_path_manager(mpcb);
++ kmem_cache_free(mptcp_cb_cache, mpcb);
++ }
++}
++
++static void mptcp_sock_destruct(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ inet_sock_destruct(sk);
++
++ BUG_ON(!list_empty(&tp->mptcp->cb_list));
++
++ kmem_cache_free(mptcp_sock_cache, tp->mptcp);
++ tp->mptcp = NULL;
++
++ if (!is_meta_sk(sk) && !tp->was_meta_sk) {
++ /* Taken when mpcb pointer was set */
++ sock_put(mptcp_meta_sk(sk));
++ mptcp_mpcb_put(tp->mpcb);
++ } else {
++ struct mptcp_cb *mpcb = tp->mpcb;
++ struct mptcp_tw *mptw;
++
++ /* The mpcb is disappearing - we can make the final
++ * update to the rcv_nxt of the time-wait-sock and remove
++ * its reference to the mpcb.
++ */
++ spin_lock_bh(&mpcb->tw_lock);
++ list_for_each_entry_rcu(mptw, &mpcb->tw_list, list) {
++ list_del_rcu(&mptw->list);
++ mptw->in_list = 0;
++ mptcp_mpcb_put(mpcb);
++ rcu_assign_pointer(mptw->mpcb, NULL);
++ }
++ spin_unlock_bh(&mpcb->tw_lock);
++
++ mptcp_mpcb_put(mpcb);
++
++ mptcp_debug("%s destroying meta-sk\n", __func__);
++ }
++}
++
++void mptcp_destroy_sock(struct sock *sk)
++{
++ if (is_meta_sk(sk)) {
++ struct sock *sk_it, *tmpsk;
++
++ __skb_queue_purge(&tcp_sk(sk)->mpcb->reinject_queue);
++ mptcp_purge_ofo_queue(tcp_sk(sk));
++
++ /* We have to close all remaining subflows. Normally, they
++ * should all be about to get closed. But, if the kernel is
++ * forcing a closure (e.g., tcp_write_err), the subflows might
++ * not have been closed properly (as we are waiting for the
++ * DATA_ACK of the DATA_FIN).
++ */
++ mptcp_for_each_sk_safe(tcp_sk(sk)->mpcb, sk_it, tmpsk) {
++ /* Already did call tcp_close - waiting for graceful
++ * closure, or if we are retransmitting fast-close on
++ * the subflow. The reset (or timeout) will kill the
++ * subflow..
++ */
++ if (tcp_sk(sk_it)->closing ||
++ tcp_sk(sk_it)->send_mp_fclose)
++ continue;
++
++ /* Allow the delayed work first to prevent time-wait state */
++ if (delayed_work_pending(&tcp_sk(sk_it)->mptcp->work))
++ continue;
++
++ mptcp_sub_close(sk_it, 0);
++ }
++ } else {
++ mptcp_del_sock(sk);
++ }
++}
++
++static void mptcp_set_state(struct sock *sk)
++{
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++
++ /* Meta is not yet established - wake up the application */
++ if ((1 << meta_sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV) &&
++ sk->sk_state == TCP_ESTABLISHED) {
++ tcp_set_state(meta_sk, TCP_ESTABLISHED);
++
++ if (!sock_flag(meta_sk, SOCK_DEAD)) {
++ meta_sk->sk_state_change(meta_sk);
++ sk_wake_async(meta_sk, SOCK_WAKE_IO, POLL_OUT);
++ }
++ }
++
++ if (sk->sk_state == TCP_ESTABLISHED) {
++ tcp_sk(sk)->mptcp->establish_increased = 1;
++ tcp_sk(sk)->mpcb->cnt_established++;
++ }
++}
++
++u32 mptcp_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
++u32 mptcp_key_seed = 0;
++
++void mptcp_key_sha1(u64 key, u32 *token, u64 *idsn)
++{
++ u32 workspace[SHA_WORKSPACE_WORDS];
++ u32 mptcp_hashed_key[SHA_DIGEST_WORDS];
++ u8 input[64];
++ int i;
++
++ memset(workspace, 0, sizeof(workspace));
++
++ /* Initialize input with appropriate padding */
++ memset(&input[9], 0, sizeof(input) - 10); /* -10, because the last byte
++ * is explicitly set too */
++ memcpy(input, &key, sizeof(key)); /* Copy key to the msg beginning */
++ input[8] = 0x80; /* Padding: First bit after message = 1 */
++ input[63] = 0x40; /* Padding: Length of the message = 64 bits */
++
++ sha_init(mptcp_hashed_key);
++ sha_transform(mptcp_hashed_key, input, workspace);
++
++ for (i = 0; i < 5; i++)
++ mptcp_hashed_key[i] = cpu_to_be32(mptcp_hashed_key[i]);
++
++ if (token)
++ *token = mptcp_hashed_key[0];
++ if (idsn)
++ *idsn = *((u64 *)&mptcp_hashed_key[3]);
++}
++
++void mptcp_hmac_sha1(u8 *key_1, u8 *key_2, u8 *rand_1, u8 *rand_2,
++ u32 *hash_out)
++{
++ u32 workspace[SHA_WORKSPACE_WORDS];
++ u8 input[128]; /* 2 512-bit blocks */
++ int i;
++
++ memset(workspace, 0, sizeof(workspace));
++
++ /* Generate key xored with ipad */
++ memset(input, 0x36, 64);
++ for (i = 0; i < 8; i++)
++ input[i] ^= key_1[i];
++ for (i = 0; i < 8; i++)
++ input[i + 8] ^= key_2[i];
++
++ memcpy(&input[64], rand_1, 4);
++ memcpy(&input[68], rand_2, 4);
++ input[72] = 0x80; /* Padding: First bit after message = 1 */
++ memset(&input[73], 0, 53);
++
++ /* Padding: Length of the message = 512 + 64 bits */
++ input[126] = 0x02;
++ input[127] = 0x40;
++
++ sha_init(hash_out);
++ sha_transform(hash_out, input, workspace);
++ memset(workspace, 0, sizeof(workspace));
++
++ sha_transform(hash_out, &input[64], workspace);
++ memset(workspace, 0, sizeof(workspace));
++
++ for (i = 0; i < 5; i++)
++ hash_out[i] = cpu_to_be32(hash_out[i]);
++
++ /* Prepare second part of hmac */
++ memset(input, 0x5C, 64);
++ for (i = 0; i < 8; i++)
++ input[i] ^= key_1[i];
++ for (i = 0; i < 8; i++)
++ input[i + 8] ^= key_2[i];
++
++ memcpy(&input[64], hash_out, 20);
++ input[84] = 0x80;
++ memset(&input[85], 0, 41);
++
++ /* Padding: Length of the message = 512 + 160 bits */
++ input[126] = 0x02;
++ input[127] = 0xA0;
++
++ sha_init(hash_out);
++ sha_transform(hash_out, input, workspace);
++ memset(workspace, 0, sizeof(workspace));
++
++ sha_transform(hash_out, &input[64], workspace);
++
++ for (i = 0; i < 5; i++)
++ hash_out[i] = cpu_to_be32(hash_out[i]);
++}
++
++static void mptcp_mpcb_inherit_sockopts(struct sock *meta_sk, struct sock *master_sk)
++{
++ /* Socket-options handled by mptcp_inherit_sk while creating the meta-sk.
++ * ======
++ * SO_SNDBUF, SO_SNDBUFFORCE, SO_RCVBUF, SO_RCVBUFFORCE, SO_RCVLOWAT,
++ * SO_RCVTIMEO, SO_SNDTIMEO, SO_ATTACH_FILTER, SO_DETACH_FILTER,
++ * TCP_NODELAY, TCP_CORK
++ *
++ * Socket-options handled in this function here
++ * ======
++ * TCP_DEFER_ACCEPT
++ *
++ * Socket-options on the todo-list
++ * ======
++ * SO_BINDTODEVICE - should probably prevent creation of new subsocks
++ * across other devices. - what about the api-draft?
++ * SO_DEBUG
++ * SO_REUSEADDR - probably we don't care about this
++ * SO_DONTROUTE, SO_BROADCAST
++ * SO_OOBINLINE
++ * SO_LINGER
++ * SO_TIMESTAMP* - I don't think this is of concern for a SOCK_STREAM
++ * SO_PASSSEC - I don't think this is of concern for a SOCK_STREAM
++ * SO_RXQ_OVFL
++ * TCP_COOKIE_TRANSACTIONS
++ * TCP_MAXSEG
++ * TCP_THIN_* - Handled by mptcp_inherit_sk, but we need to support this
++ * in mptcp_retransmit_timer. AND we need to check what is
++ * about the subsockets.
++ * TCP_LINGER2
++ * TCP_WINDOW_CLAMP
++ * TCP_USER_TIMEOUT
++ * TCP_MD5SIG
++ *
++ * Socket-options of no concern for the meta-socket (but for the subsocket)
++ * ======
++ * SO_PRIORITY
++ * SO_MARK
++ * TCP_CONGESTION
++ * TCP_SYNCNT
++ * TCP_QUICKACK
++ * SO_KEEPALIVE
++ */
++
++ /****** DEFER_ACCEPT-handler ******/
++
++ /* DEFER_ACCEPT is not of concern for new subflows - we always accept
++ * them
++ */
++ inet_csk(meta_sk)->icsk_accept_queue.rskq_defer_accept = 0;
++}
++
++static void mptcp_sub_inherit_sockopts(struct sock *meta_sk, struct sock *sub_sk)
++{
++ /* IP_TOS also goes to the subflow. */
++ if (inet_sk(sub_sk)->tos != inet_sk(meta_sk)->tos) {
++ inet_sk(sub_sk)->tos = inet_sk(meta_sk)->tos;
++ sub_sk->sk_priority = meta_sk->sk_priority;
++ sk_dst_reset(sub_sk);
++ }
++
++ /* Inherit SO_REUSEADDR */
++ sub_sk->sk_reuse = meta_sk->sk_reuse;
++
++ /* Inherit snd/rcv-buffer locks */
++ sub_sk->sk_userlocks = meta_sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
++}
++
++int mptcp_backlog_rcv(struct sock *meta_sk, struct sk_buff *skb)
++{
++ /* skb-sk may be NULL if we receive a packet immediatly after the
++ * SYN/ACK + MP_CAPABLE.
++ */
++ struct sock *sk = skb->sk ? skb->sk : meta_sk;
++ int ret = 0;
++
++ skb->sk = NULL;
++
++ if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
++ kfree_skb(skb);
++ return 0;
++ }
++
++ if (sk->sk_family == AF_INET)
++ ret = tcp_v4_do_rcv(sk, skb);
++#if IS_ENABLED(CONFIG_IPV6)
++ else
++ ret = tcp_v6_do_rcv(sk, skb);
++#endif
++
++ sock_put(sk);
++ return ret;
++}
++
++struct lock_class_key meta_key;
++struct lock_class_key meta_slock_key;
++
++/* Code heavily inspired from sk_clone() */
++static int mptcp_inherit_sk(const struct sock *sk, struct sock *newsk,
++ int family, const gfp_t flags)
++{
++ struct sk_filter *filter;
++ struct proto *prot = newsk->sk_prot;
++ const struct inet_connection_sock_af_ops *af_ops = inet_csk(newsk)->icsk_af_ops;
++#ifdef CONFIG_SECURITY_NETWORK
++ void *sptr = newsk->sk_security;
++#endif
++
++ if (sk->sk_family == AF_INET) {
++ memcpy(newsk, sk, offsetof(struct sock, sk_dontcopy_begin));
++ memcpy(&newsk->sk_dontcopy_end, &sk->sk_dontcopy_end,
++ sizeof(struct tcp_sock) - offsetof(struct sock, sk_dontcopy_end));
++ } else {
++ memcpy(newsk, sk, offsetof(struct sock, sk_dontcopy_begin));
++ memcpy(&newsk->sk_dontcopy_end, &sk->sk_dontcopy_end,
++ sizeof(struct tcp6_sock) - offsetof(struct sock, sk_dontcopy_end));
++ }
++
++#ifdef CONFIG_SECURITY_NETWORK
++ newsk->sk_security = sptr;
++ security_sk_clone(sk, newsk);
++#endif
++
++ /* Has been changed by sock_copy above - we may need an IPv6-socket */
++ newsk->sk_family = family;
++ newsk->sk_prot = prot;
++ newsk->sk_prot_creator = prot;
++ inet_csk(newsk)->icsk_af_ops = af_ops;
++
++ /* We don't yet have the mptcp-point. Thus we still need inet_sock_destruct */
++ newsk->sk_destruct = inet_sock_destruct;
++
++ /* SANITY */
++ get_net(sock_net(newsk));
++ sk_node_init(&newsk->sk_node);
++ sock_lock_init_class_and_name(newsk, "slock-AF_INET-MPTCP",
++ &meta_slock_key, "sk_lock-AF_INET-MPTCP",
++ &meta_key);
++
++ /* Unlocks are in:
++ *
++ * 1. If we are creating the master-sk
++ * * on client-side in tcp_rcv_state_process, "case TCP_SYN_SENT"
++ * * on server-side in tcp_child_process
++ * 2. If we are creating another subsock
++ * * Also in tcp_child_process
++ */
++ bh_lock_sock(newsk);
++ newsk->sk_backlog.head = NULL;
++ newsk->sk_backlog.tail = NULL;
++ newsk->sk_backlog.len = 0;
++
++ atomic_set(&newsk->sk_rmem_alloc, 0);
++ atomic_set(&newsk->sk_wmem_alloc, 1);
++ atomic_set(&newsk->sk_omem_alloc, 0);
++
++ skb_queue_head_init(&newsk->sk_receive_queue);
++ skb_queue_head_init(&newsk->sk_write_queue);
++#ifdef CONFIG_NET_DMA
++ skb_queue_head_init(&newsk->sk_async_wait_queue);
++#endif
++
++ spin_lock_init(&newsk->sk_dst_lock);
++ rwlock_init(&newsk->sk_callback_lock);
++ lockdep_set_class_and_name(&newsk->sk_callback_lock,
++ af_callback_keys + newsk->sk_family,
++ af_family_clock_key_strings[newsk->sk_family]);
++ newsk->sk_dst_cache = NULL;
++ newsk->sk_rx_dst = NULL;
++ newsk->sk_wmem_queued = 0;
++ newsk->sk_forward_alloc = 0;
++ newsk->sk_send_head = NULL;
++ newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
++
++ tcp_sk(newsk)->mptcp = NULL;
++
++ sock_reset_flag(newsk, SOCK_DONE);
++ skb_queue_head_init(&newsk->sk_error_queue);
++
++ filter = rcu_dereference_protected(newsk->sk_filter, 1);
++ if (filter != NULL)
++ sk_filter_charge(newsk, filter);
++
++ if (unlikely(xfrm_sk_clone_policy(newsk))) {
++ /* It is still raw copy of parent, so invalidate
++ * destructor and make plain sk_free()
++ */
++ newsk->sk_destruct = NULL;
++ bh_unlock_sock(newsk);
++ sk_free(newsk);
++ newsk = NULL;
++ return -ENOMEM;
++ }
++
++ newsk->sk_err = 0;
++ newsk->sk_priority = 0;
++ /* Before updating sk_refcnt, we must commit prior changes to memory
++ * (Documentation/RCU/rculist_nulls.txt for details)
++ */
++ smp_wmb();
++ atomic_set(&newsk->sk_refcnt, 2);
++
++ /* Increment the counter in the same struct proto as the master
++ * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
++ * is the same as sk->sk_prot->socks, as this field was copied
++ * with memcpy).
++ *
++ * This _changes_ the previous behaviour, where
++ * tcp_create_openreq_child always was incrementing the
++ * equivalent to tcp_prot->socks (inet_sock_nr), so this have
++ * to be taken into account in all callers. -acme
++ */
++ sk_refcnt_debug_inc(newsk);
++ sk_set_socket(newsk, NULL);
++ newsk->sk_wq = NULL;
++
++ if (newsk->sk_prot->sockets_allocated)
++ percpu_counter_inc(newsk->sk_prot->sockets_allocated);
++
++ if (sock_flag(newsk, SOCK_TIMESTAMP) ||
++ sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
++ net_enable_timestamp();
++
++ return 0;
++}
++
++int mptcp_alloc_mpcb(struct sock *meta_sk, __u64 remote_key, u32 window)
++{
++ struct mptcp_cb *mpcb;
++ struct sock *master_sk;
++ struct inet_connection_sock *master_icsk, *meta_icsk = inet_csk(meta_sk);
++ struct tcp_sock *master_tp, *meta_tp = tcp_sk(meta_sk);
++ struct sk_buff *skb, *tmp;
++ u64 idsn;
++
++ master_sk = sk_prot_alloc(meta_sk->sk_prot, GFP_ATOMIC | __GFP_ZERO,
++ meta_sk->sk_family);
++ if (!master_sk)
++ return -ENOBUFS;
++
++ master_tp = tcp_sk(master_sk);
++ master_icsk = inet_csk(master_sk);
++
++ /* Need to set this here - it is needed by mptcp_inherit_sk */
++ master_sk->sk_prot = meta_sk->sk_prot;
++ master_sk->sk_prot_creator = meta_sk->sk_prot;
++ master_icsk->icsk_af_ops = meta_icsk->icsk_af_ops;
++
++ mpcb = kmem_cache_zalloc(mptcp_cb_cache, GFP_ATOMIC);
++ if (!mpcb) {
++ sk_free(master_sk);
++ return -ENOBUFS;
++ }
++
++ /* master_sk inherits from meta_sk */
++ if (mptcp_inherit_sk(meta_sk, master_sk, meta_sk->sk_family, GFP_ATOMIC)) {
++ kmem_cache_free(mptcp_cb_cache, mpcb);
++ return -ENOBUFS;
++ }
++
++#if IS_ENABLED(CONFIG_IPV6)
++ if (meta_icsk->icsk_af_ops == &ipv6_mapped) {
++ struct ipv6_pinfo *newnp, *np = inet6_sk(meta_sk);
++
++ inet_sk(master_sk)->pinet6 = &((struct tcp6_sock *)master_sk)->inet6;
++
++ newnp = inet6_sk(master_sk);
++ memcpy(newnp, np, sizeof(struct ipv6_pinfo));
++
++ newnp->ipv6_mc_list = NULL;
++ newnp->ipv6_ac_list = NULL;
++ newnp->ipv6_fl_list = NULL;
++ newnp->opt = NULL;
++ newnp->pktoptions = NULL;
++ (void)xchg(&newnp->rxpmtu, NULL);
++ } else if (meta_sk->sk_family == AF_INET6) {
++ struct ipv6_pinfo *newnp, *np = inet6_sk(meta_sk);
++
++ inet_sk(master_sk)->pinet6 = &((struct tcp6_sock *)master_sk)->inet6;
++
++ newnp = inet6_sk(master_sk);
++ memcpy(newnp, np, sizeof(struct ipv6_pinfo));
++
++ newnp->hop_limit = -1;
++ newnp->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
++ newnp->mc_loop = 1;
++ newnp->pmtudisc = IPV6_PMTUDISC_WANT;
++ newnp->ipv6only = sock_net(master_sk)->ipv6.sysctl.bindv6only;
++ }
++#endif
++
++ meta_tp->mptcp = kmem_cache_zalloc(mptcp_sock_cache, GFP_ATOMIC);
++ if (!meta_tp->mptcp) {
++ kmem_cache_free(mptcp_cb_cache, mpcb);
++ sk_free(master_sk);
++ return -ENOBUFS;
++ }
++
++ INIT_LIST_HEAD(&meta_tp->mptcp->cb_list);
++
++ /* Store the keys and generate the peer's token */
++ mpcb->mptcp_loc_key = meta_tp->mptcp_loc_key;
++ mpcb->mptcp_loc_token = meta_tp->mptcp_loc_token;
++
++ /* Generate Initial data-sequence-numbers */
++ mptcp_key_sha1(mpcb->mptcp_loc_key, NULL, &idsn);
++ idsn = ntohll(idsn) + 1;
++ mpcb->snd_high_order[0] = idsn >> 32;
++ mpcb->snd_high_order[1] = mpcb->snd_high_order[0] - 1;
++
++ meta_tp->write_seq = (u32)idsn;
++ meta_tp->snd_sml = meta_tp->write_seq;
++ meta_tp->snd_una = meta_tp->write_seq;
++ meta_tp->snd_nxt = meta_tp->write_seq;
++ meta_tp->pushed_seq = meta_tp->write_seq;
++ meta_tp->snd_up = meta_tp->write_seq;
++
++ mpcb->mptcp_rem_key = remote_key;
++ mptcp_key_sha1(mpcb->mptcp_rem_key, &mpcb->mptcp_rem_token, &idsn);
++ idsn = ntohll(idsn) + 1;
++ mpcb->rcv_high_order[0] = idsn >> 32;
++ mpcb->rcv_high_order[1] = mpcb->rcv_high_order[0] + 1;
++ meta_tp->copied_seq = (u32) idsn;
++ meta_tp->rcv_nxt = (u32) idsn;
++ meta_tp->rcv_wup = (u32) idsn;
++
++ meta_tp->snd_wl1 = meta_tp->rcv_nxt - 1;
++ meta_tp->snd_wnd = window;
++ meta_tp->retrans_stamp = 0; /* Set in tcp_connect() */
++
++ meta_tp->packets_out = 0;
++ meta_tp->mptcp->snt_isn = meta_tp->write_seq; /* Initial data-sequence-number */
++ meta_icsk->icsk_probes_out = 0;
++
++ /* Set mptcp-pointers */
++ master_tp->mpcb = mpcb;
++ master_tp->meta_sk = meta_sk;
++ meta_tp->mpcb = mpcb;
++ meta_tp->meta_sk = meta_sk;
++ mpcb->meta_sk = meta_sk;
++ mpcb->master_sk = master_sk;
++
++ set_mpc(meta_tp);
++ meta_tp->mptcp->attached = 0;
++ meta_tp->was_meta_sk = 0;
++
++ /* Initialize the queues */
++ skb_queue_head_init(&mpcb->reinject_queue);
++ skb_queue_head_init(&master_tp->out_of_order_queue);
++ tcp_prequeue_init(master_tp);
++ INIT_LIST_HEAD(&master_tp->tsq_node);
++
++ master_tp->tsq_flags = 0;
++
++ /* Copy the write-queue from the meta down to the master.
++ * This is necessary to get the SYN to the master-write-queue.
++ * No other data can be queued, before tcp_sendmsg waits for the
++ * connection to finish.
++ */
++ skb_queue_walk_safe(&meta_sk->sk_write_queue, skb, tmp) {
++ skb_unlink(skb, &meta_sk->sk_write_queue);
++ skb_queue_tail(&master_sk->sk_write_queue, skb);
++
++ master_sk->sk_wmem_queued += skb->truesize;
++ sk_mem_charge(master_sk, skb->truesize);
++ }
++
++ meta_sk->sk_wmem_queued = 0;
++ meta_sk->sk_forward_alloc = 0;
++
++ mutex_init(&mpcb->mpcb_mutex);
++
++ /* Init the accept_queue structure, we support a queue of 32 pending
++ * connections, it does not need to be huge, since we only store here
++ * pending subflow creations.
++ */
++ if (reqsk_queue_alloc(&meta_icsk->icsk_accept_queue, 32, GFP_ATOMIC)) {
++ inet_put_port(master_sk);
++ kmem_cache_free(mptcp_sock_cache, meta_tp->mptcp);
++ kmem_cache_free(mptcp_cb_cache, mpcb);
++ sk_free(master_sk);
++ reset_mpc(meta_tp);
++ return -ENOMEM;
++ }
++
++ /* Redefine function-pointers as the meta-sk is now fully ready */
++ meta_sk->sk_backlog_rcv = mptcp_backlog_rcv;
++ meta_sk->sk_destruct = mptcp_sock_destruct;
++ mpcb->syn_recv_sock = mptcp_syn_recv_sock;
++
++ /* Meta-level retransmit timer */
++ meta_icsk->icsk_rto *= 2; /* Double of initial - rto */
++
++ tcp_init_xmit_timers(master_sk);
++ /* Has been set for sending out the SYN */
++ inet_csk_clear_xmit_timer(meta_sk, ICSK_TIME_RETRANS);
++
++ if (!meta_tp->inside_tk_table) {
++ /* Adding the meta_tp in the token hashtable - coming from server-side */
++ rcu_read_lock();
++ spin_lock(&mptcp_tk_hashlock);
++
++ __mptcp_hash_insert(meta_tp, mpcb->mptcp_loc_token);
++
++ spin_unlock(&mptcp_tk_hashlock);
++ rcu_read_unlock();
++ }
++ master_tp->inside_tk_table = 0;
++
++ /* Init time-wait stuff */
++ INIT_LIST_HEAD(&mpcb->tw_list);
++ spin_lock_init(&mpcb->tw_lock);
++
++ INIT_LIST_HEAD(&mpcb->callback_list);
++
++ mptcp_mpcb_inherit_sockopts(meta_sk, master_sk);
++
++ mpcb->orig_sk_rcvbuf = meta_sk->sk_rcvbuf;
++ mpcb->orig_sk_sndbuf = meta_sk->sk_sndbuf;
++ mpcb->orig_window_clamp = meta_tp->window_clamp;
++
++ /* The meta is directly linked - set refcnt to 1 */
++ atomic_set(&mpcb->mpcb_refcnt, 1);
++
++ mptcp_init_path_manager(mpcb);
++
++ mptcp_debug("%s: created mpcb with token %#x\n",
++ __func__, mpcb->mptcp_loc_token);
++
++ return 0;
++}
++
++struct sock *mptcp_sk_clone(const struct sock *sk, int family,
++ const gfp_t priority)
++{
++ struct sock *newsk = NULL;
++
++ if (family == AF_INET && sk->sk_family == AF_INET) {
++ newsk = sk_prot_alloc(&tcp_prot, priority, family);
++ if (!newsk)
++ return NULL;
++
++ /* Set these pointers - they are needed by mptcp_inherit_sk */
++ newsk->sk_prot = &tcp_prot;
++ newsk->sk_prot_creator = &tcp_prot;
++ inet_csk(newsk)->icsk_af_ops = &ipv4_specific;
++ newsk->sk_family = AF_INET;
++ }
++#if IS_ENABLED(CONFIG_IPV6)
++ else {
++ newsk = sk_prot_alloc(&tcpv6_prot, priority, family);
++ if (!newsk)
++ return NULL;
++
++ newsk->sk_prot = &tcpv6_prot;
++ newsk->sk_prot_creator = &tcpv6_prot;
++ if (family == AF_INET)
++ inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
++ else
++ inet_csk(newsk)->icsk_af_ops = &ipv6_specific;
++ newsk->sk_family = AF_INET6;
++ }
++#endif
++
++ if (mptcp_inherit_sk(sk, newsk, family, priority))
++ return NULL;
++
++ return newsk;
++}
++
++void mptcp_fallback_meta_sk(struct sock *meta_sk)
++{
++ kfree(inet_csk(meta_sk)->icsk_accept_queue.listen_opt);
++ kmem_cache_free(mptcp_sock_cache, tcp_sk(meta_sk)->mptcp);
++ kmem_cache_free(mptcp_cb_cache, tcp_sk(meta_sk)->mpcb);
++}
++
++int mptcp_add_sock(struct sock *meta_sk, struct sock *sk, u8 loc_id, u8 rem_id,
++ gfp_t flags)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ tp->mptcp = kmem_cache_zalloc(mptcp_sock_cache, flags);
++ if (!tp->mptcp)
++ return -ENOMEM;
++
++ tp->mptcp->path_index = mptcp_set_new_pathindex(mpcb);
++ /* No more space for more subflows? */
++ if (!tp->mptcp->path_index) {
++ kmem_cache_free(mptcp_sock_cache, tp->mptcp);
++ return -EPERM;
++ }
++
++ INIT_LIST_HEAD(&tp->mptcp->cb_list);
++
++ tp->mptcp->tp = tp;
++ tp->mpcb = mpcb;
++ tp->meta_sk = meta_sk;
++ set_mpc(tp);
++ tp->mptcp->loc_id = loc_id;
++ tp->mptcp->rem_id = rem_id;
++ tp->mptcp->last_rbuf_opti = tcp_time_stamp;
++
++ /* The corresponding sock_put is in mptcp_sock_destruct(). It cannot be
++ * included in mptcp_del_sock(), because the mpcb must remain alive
++ * until the last subsocket is completely destroyed.
++ */
++ sock_hold(meta_sk);
++ atomic_inc(&mpcb->mpcb_refcnt);
++
++ tp->mptcp->next = mpcb->connection_list;
++ mpcb->connection_list = tp;
++ tp->mptcp->attached = 1;
++
++ mpcb->cnt_subflows++;
++ atomic_add(atomic_read(&((struct sock *)tp)->sk_rmem_alloc),
++ &meta_sk->sk_rmem_alloc);
++
++ mptcp_sub_inherit_sockopts(meta_sk, sk);
++ INIT_DELAYED_WORK(&tp->mptcp->work, mptcp_sub_close_wq);
++
++ /* As we successfully allocated the mptcp_tcp_sock, we have to
++ * change the function-pointers here (for sk_destruct to work correctly)
++ */
++ sk->sk_error_report = mptcp_sock_def_error_report;
++ sk->sk_data_ready = mptcp_data_ready;
++ sk->sk_write_space = mptcp_write_space;
++ sk->sk_state_change = mptcp_set_state;
++ sk->sk_destruct = mptcp_sock_destruct;
++
++ if (sk->sk_family == AF_INET)
++ mptcp_debug("%s: token %#x pi %d, src_addr:%pI4:%d dst_addr:%pI4:%d, cnt_subflows now %d\n",
++ __func__ , mpcb->mptcp_loc_token,
++ tp->mptcp->path_index,
++ &((struct inet_sock *)tp)->inet_saddr,
++ ntohs(((struct inet_sock *)tp)->inet_sport),
++ &((struct inet_sock *)tp)->inet_daddr,
++ ntohs(((struct inet_sock *)tp)->inet_dport),
++ mpcb->cnt_subflows);
++#if IS_ENABLED(CONFIG_IPV6)
++ else
++ mptcp_debug("%s: token %#x pi %d, src_addr:%pI6:%d dst_addr:%pI6:%d, cnt_subflows now %d\n",
++ __func__ , mpcb->mptcp_loc_token,
++ tp->mptcp->path_index, &inet6_sk(sk)->saddr,
++ ntohs(((struct inet_sock *)tp)->inet_sport),
++ &sk->sk_v6_daddr,
++ ntohs(((struct inet_sock *)tp)->inet_dport),
++ mpcb->cnt_subflows);
++#endif
++
++ return 0;
++}
++
++void mptcp_del_sock(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk), *tp_prev;
++ struct mptcp_cb *mpcb;
++
++ if (!tp->mptcp || !tp->mptcp->attached)
++ return;
++
++ mpcb = tp->mpcb;
++ tp_prev = mpcb->connection_list;
++
++ mptcp_debug("%s: Removing subsock tok %#x pi:%d state %d is_meta? %d\n",
++ __func__, mpcb->mptcp_loc_token, tp->mptcp->path_index,
++ sk->sk_state, is_meta_sk(sk));
++
++ if (tp_prev == tp) {
++ mpcb->connection_list = tp->mptcp->next;
++ } else {
++ for (; tp_prev && tp_prev->mptcp->next; tp_prev = tp_prev->mptcp->next) {
++ if (tp_prev->mptcp->next == tp) {
++ tp_prev->mptcp->next = tp->mptcp->next;
++ break;
++ }
++ }
++ }
++ mpcb->cnt_subflows--;
++ if (tp->mptcp->establish_increased)
++ mpcb->cnt_established--;
++
++ tp->mptcp->next = NULL;
++ tp->mptcp->attached = 0;
++ mpcb->path_index_bits &= ~(1 << tp->mptcp->path_index);
++
++ if (!skb_queue_empty(&sk->sk_write_queue))
++ mptcp_reinject_data(sk, 0);
++
++ if (is_master_tp(tp))
++ mpcb->master_sk = NULL;
++ else if (tp->mptcp->pre_established)
++ sk_stop_timer(sk, &tp->mptcp->mptcp_ack_timer);
++
++ rcu_assign_pointer(inet_sk(sk)->inet_opt, NULL);
++}
++
++/* Updates the metasocket ULID/port data, based on the given sock.
++ * The argument sock must be the sock accessible to the application.
++ * In this function, we update the meta socket info, based on the changes
++ * in the application socket (bind, address allocation, ...)
++ */
++void mptcp_update_metasocket(struct sock *sk, struct sock *meta_sk)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ union inet_addr addr;
++ int index;
++
++ /* Get the index of the local address */
++ if (sk->sk_family == AF_INET || mptcp_v6_is_v4_mapped(sk)) {
++ addr.ip = inet_sk(sk)->inet_saddr;
++ index = mpcb->pm_ops->get_local_index(AF_INET, &addr, sock_net(meta_sk));
++ } else {
++ addr.in6 = inet6_sk(sk)->saddr;
++ index = mpcb->pm_ops->get_local_index(AF_INET6, &addr, sock_net(meta_sk));
++ }
++
++ if (sk->sk_family == AF_INET || mptcp_v6_is_v4_mapped(sk)) {
++ mptcp_v4_add_raddress(mpcb,
++ (struct in_addr *)&inet_sk(sk)->inet_daddr,
++ 0, 0);
++ if (index >= 0)
++ mptcp_v4_set_init_addr_bit(mpcb, inet_sk(sk)->inet_daddr, index);
++ } else {
++#if IS_ENABLED(CONFIG_IPV6)
++ mptcp_v6_add_raddress(mpcb, &sk->sk_v6_daddr, 0, 0);
++ if (index >= 0)
++ mptcp_v6_set_init_addr_bit(mpcb, &sk->sk_v6_daddr, index);
++#endif
++ }
++
++ if (mpcb->pm_ops->new_session)
++ mpcb->pm_ops->new_session(meta_sk, index);
++
++ tcp_sk(sk)->mptcp->send_mp_prio = tcp_sk(sk)->mptcp->low_prio;
++}
++
++/* Clean up the receive buffer for full frames taken by the user,
++ * then send an ACK if necessary. COPIED is the number of bytes
++ * tcp_recvmsg has given to the user so far, it speeds up the
++ * calculation of whether or not we must ACK for the sake of
++ * a window update.
++ */
++void mptcp_cleanup_rbuf(struct sock *meta_sk, int copied)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct sock *sk;
++ __u32 rcv_window_now = 0;
++
++ if (copied > 0 && !(meta_sk->sk_shutdown & RCV_SHUTDOWN)) {
++ rcv_window_now = tcp_receive_window(meta_tp);
++
++ if (2 * rcv_window_now > meta_tp->window_clamp)
++ rcv_window_now = 0;
++ }
++
++ mptcp_for_each_sk(meta_tp->mpcb, sk) {
++ struct tcp_sock *tp = tcp_sk(sk);
++ const struct inet_connection_sock *icsk = inet_csk(sk);
++
++ if (!mptcp_sk_can_send_ack(sk))
++ continue;
++
++ if (!inet_csk_ack_scheduled(sk))
++ goto second_part;
++ /* Delayed ACKs frequently hit locked sockets during bulk
++ * receive.
++ */
++ if (icsk->icsk_ack.blocked ||
++ /* Once-per-two-segments ACK was not sent by tcp_input.c */
++ tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
++ /* If this read emptied read buffer, we send ACK, if
++ * connection is not bidirectional, user drained
++ * receive buffer and there was a small segment
++ * in queue.
++ */
++ (copied > 0 &&
++ ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
++ ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
++ !icsk->icsk_ack.pingpong)) &&
++ !atomic_read(&meta_sk->sk_rmem_alloc))) {
++ tcp_send_ack(sk);
++ continue;
++ }
++
++second_part:
++ /* This here is the second part of tcp_cleanup_rbuf */
++ if (rcv_window_now) {
++ __u32 new_window = tp->__select_window(sk);
++
++ /* Send ACK now, if this read freed lots of space
++ * in our buffer. Certainly, new_window is new window.
++ * We can advertise it now, if it is not less than
++ * current one.
++ * "Lots" means "at least twice" here.
++ */
++ if (new_window && new_window >= 2 * rcv_window_now)
++ tcp_send_ack(sk);
++ }
++ }
++}
++
++static int mptcp_sub_send_fin(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct sk_buff *skb = tcp_write_queue_tail(sk);
++ int mss_now;
++
++ /* Optimization, tack on the FIN if we have a queue of
++ * unsent frames. But be careful about outgoing SACKS
++ * and IP options.
++ */
++ mss_now = tcp_current_mss(sk);
++
++ if (tcp_send_head(sk) != NULL) {
++ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
++ TCP_SKB_CB(skb)->end_seq++;
++ tp->write_seq++;
++ } else {
++ skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_ATOMIC);
++ if (!skb)
++ return 1;
++
++ /* Reserve space for headers and prepare control bits. */
++ skb_reserve(skb, MAX_TCP_HEADER);
++ /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
++ tcp_init_nondata_skb(skb, tp->write_seq,
++ TCPHDR_ACK | TCPHDR_FIN);
++ tcp_queue_skb(sk, skb);
++ }
++ __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
++
++ return 0;
++}
++
++void mptcp_sub_close_wq(struct work_struct *work)
++{
++ struct mptcp_tcp_sock *mptcp = container_of(work, struct mptcp_tcp_sock, work.work);
++ struct tcp_sock *tp = mptcp->tp;
++ struct sock *sk = (struct sock *)tp;
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++
++ mutex_lock(&tp->mpcb->mpcb_mutex);
++ lock_sock_nested(meta_sk, SINGLE_DEPTH_NESTING);
++
++ if (sock_flag(sk, SOCK_DEAD))
++ goto exit;
++
++ /* We come from tcp_disconnect. We are sure that meta_sk is set */
++ if (!tp->mpc) {
++ tp->closing = 1;
++ sock_rps_reset_flow(sk);
++ tcp_close(sk, 0);
++ goto exit;
++ }
++
++ if (meta_sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) {
++ tp->closing = 1;
++ sock_rps_reset_flow(sk);
++ tcp_close(sk, 0);
++ } else if (tcp_close_state(sk)) {
++ sk->sk_shutdown |= SEND_SHUTDOWN;
++ tcp_send_fin(sk);
++ }
++
++exit:
++ release_sock(meta_sk);
++ mutex_unlock(&tp->mpcb->mpcb_mutex);
++ sock_put(sk);
++}
++
++void mptcp_sub_close(struct sock *sk, unsigned long delay)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct delayed_work *work = &tcp_sk(sk)->mptcp->work;
++
++ /* We are already closing - e.g., call from sock_def_error_report upon
++ * tcp_disconnect in tcp_close.
++ */
++ if (tp->closing)
++ return;
++
++ /* Work already scheduled ? */
++ if (work_pending(&work->work)) {
++ /* Work present - who will be first ? */
++ if (jiffies + delay > work->timer.expires)
++ return;
++
++ /* Try canceling - if it fails, work will be executed soon */
++ if (!cancel_delayed_work(work))
++ return;
++ sock_put(sk);
++ }
++
++ if (!delay) {
++ unsigned char old_state = sk->sk_state;
++
++ /* If we are in user-context we can directly do the closing
++ * procedure. No need to schedule a work-queue.
++ */
++ if (!in_softirq()) {
++ if (sock_flag(sk, SOCK_DEAD))
++ return;
++
++ if (!tp->mpc) {
++ tp->closing = 1;
++ sock_rps_reset_flow(sk);
++ tcp_close(sk, 0);
++ return;
++ }
++
++ if (mptcp_meta_sk(sk)->sk_shutdown == SHUTDOWN_MASK ||
++ sk->sk_state == TCP_CLOSE) {
++ tp->closing = 1;
++ sock_rps_reset_flow(sk);
++ tcp_close(sk, 0);
++ } else if (tcp_close_state(sk)) {
++ sk->sk_shutdown |= SEND_SHUTDOWN;
++ tcp_send_fin(sk);
++ }
++
++ return;
++ }
++
++ /* We directly send the FIN. Because it may take so a long time,
++ * untile the work-queue will get scheduled...
++ *
++ * If mptcp_sub_send_fin returns 1, it failed and thus we reset
++ * the old state so that tcp_close will finally send the fin
++ * in user-context.
++ */
++ if (!sk->sk_err && old_state != TCP_CLOSE &&
++ tcp_close_state(sk) && mptcp_sub_send_fin(sk)) {
++ if (old_state == TCP_ESTABLISHED)
++ TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
++ sk->sk_state = old_state;
++ }
++ }
++
++ sock_hold(sk);
++ queue_delayed_work(mptcp_wq, work, delay);
++}
++
++void mptcp_sub_force_close(struct sock *sk)
++{
++ /* The below tcp_done may have freed the socket, if he is already dead.
++ * Thus, we are not allowed to access it afterwards. That's why
++ * we have to store the dead-state in this local variable.
++ */
++ int sock_is_dead = sock_flag(sk, SOCK_DEAD);
++
++ tcp_sk(sk)->mp_killed = 1;
++
++ if (sk->sk_state != TCP_CLOSE)
++ tcp_done(sk);
++
++ if (!sock_is_dead)
++ mptcp_sub_close(sk, 0);
++}
++EXPORT_SYMBOL(mptcp_sub_force_close);
++
++/* Update the mpcb send window, based on the contributions
++ * of each subflow
++ */
++void mptcp_update_sndbuf(struct mptcp_cb *mpcb)
++{
++ struct sock *meta_sk = mpcb->meta_sk, *sk;
++ int new_sndbuf = 0, old_sndbuf = meta_sk->sk_sndbuf;
++ mptcp_for_each_sk(mpcb, sk) {
++ if (!mptcp_sk_can_send(sk))
++ continue;
++
++ new_sndbuf += sk->sk_sndbuf;
++
++ if (new_sndbuf > sysctl_tcp_wmem[2] || new_sndbuf < 0) {
++ new_sndbuf = sysctl_tcp_wmem[2];
++ break;
++ }
++ }
++ meta_sk->sk_sndbuf = max(min(new_sndbuf, sysctl_tcp_wmem[2]), meta_sk->sk_sndbuf);
++
++ /* The subflow's call to sk_write_space in tcp_new_space ends up in
++ * mptcp_write_space.
++ * It has nothing to do with waking up the application.
++ * So, we do it here.
++ */
++ if (old_sndbuf != meta_sk->sk_sndbuf)
++ meta_sk->sk_write_space(meta_sk);
++}
++
++void mptcp_close(struct sock *meta_sk, long timeout)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct sock *sk_it, *tmpsk;
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ struct sk_buff *skb;
++ int data_was_unread = 0;
++ int state;
++
++ mptcp_debug("%s: Close of meta_sk with tok %#x\n",
++ __func__, mpcb->mptcp_loc_token);
++
++ mutex_lock(&mpcb->mpcb_mutex);
++ lock_sock(meta_sk);
++
++ if (meta_tp->inside_tk_table) {
++ /* Detach the mpcb from the token hashtable */
++ mptcp_hash_remove_bh(meta_tp);
++ reqsk_queue_destroy(&inet_csk(meta_sk)->icsk_accept_queue);
++ }
++
++ meta_sk->sk_shutdown = SHUTDOWN_MASK;
++ /* We need to flush the recv. buffs. We do this only on the
++ * descriptor close, not protocol-sourced closes, because the
++ * reader process may not have drained the data yet!
++ */
++ while ((skb = __skb_dequeue(&meta_sk->sk_receive_queue)) != NULL) {
++ u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
++ tcp_hdr(skb)->fin;
++ data_was_unread += len;
++ __kfree_skb(skb);
++ }
++
++ sk_mem_reclaim(meta_sk);
++
++ /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
++ if (meta_sk->sk_state == TCP_CLOSE) {
++ mptcp_for_each_sk_safe(mpcb, sk_it, tmpsk) {
++ if (tcp_sk(sk_it)->send_mp_fclose)
++ continue;
++ mptcp_sub_close(sk_it, 0);
++ }
++ goto adjudge_to_death;
++ }
++
++ if (data_was_unread) {
++ /* Unread data was tossed, zap the connection. */
++ NET_INC_STATS_USER(sock_net(meta_sk), LINUX_MIB_TCPABORTONCLOSE);
++ tcp_set_state(meta_sk, TCP_CLOSE);
++ tcp_send_active_reset(meta_sk, meta_sk->sk_allocation);
++ } else if (sock_flag(meta_sk, SOCK_LINGER) && !meta_sk->sk_lingertime) {
++ /* Check zero linger _after_ checking for unread data. */
++ meta_sk->sk_prot->disconnect(meta_sk, 0);
++ NET_INC_STATS_USER(sock_net(meta_sk), LINUX_MIB_TCPABORTONDATA);
++ } else if (tcp_close_state(meta_sk)) {
++ mptcp_send_fin(meta_sk);
++ } else if (meta_tp->snd_una == meta_tp->write_seq) {
++ /* The DATA_FIN has been sent and acknowledged
++ * (e.g., by sk_shutdown). Close all the other subflows
++ */
++ mptcp_for_each_sk_safe(mpcb, sk_it, tmpsk) {
++ unsigned long delay = 0;
++ /* If we are the passive closer, don't trigger
++ * subflow-fin until the subflow has been finned
++ * by the peer. - thus we add a delay
++ */
++ if (mpcb->passive_close &&
++ sk_it->sk_state == TCP_ESTABLISHED)
++ delay = inet_csk(sk_it)->icsk_rto << 3;
++
++ mptcp_sub_close(sk_it, delay);
++ }
++ }
++
++ sk_stream_wait_close(meta_sk, timeout);
++
++adjudge_to_death:
++ state = meta_sk->sk_state;
++ sock_hold(meta_sk);
++ sock_orphan(meta_sk);
++
++ /* socket will be freed after mptcp_close - we have to prevent
++ * access from the subflows.
++ */
++ mptcp_for_each_sk(mpcb, sk_it) {
++ /* Similar to sock_orphan, but we don't set it DEAD, because
++ * the callbacks are still set and must be called.
++ */
++ write_lock_bh(&sk_it->sk_callback_lock);
++ sk_set_socket(sk_it, NULL);
++ sk_it->sk_wq = NULL;
++ write_unlock_bh(&sk_it->sk_callback_lock);
++ }
++
++ /* It is the last release_sock in its life. It will remove backlog. */
++ release_sock(meta_sk);
++
++ /* Now socket is owned by kernel and we acquire BH lock
++ * to finish close. No need to check for user refs.
++ */
++ local_bh_disable();
++ bh_lock_sock(meta_sk);
++ WARN_ON(sock_owned_by_user(meta_sk));
++
++ percpu_counter_inc(meta_sk->sk_prot->orphan_count);
++
++ /* Have we already been destroyed by a softirq or backlog? */
++ if (state != TCP_CLOSE && meta_sk->sk_state == TCP_CLOSE)
++ goto out;
++
++ /* This is a (useful) BSD violating of the RFC. There is a
++ * problem with TCP as specified in that the other end could
++ * keep a socket open forever with no application left this end.
++ * We use a 3 minute timeout (about the same as BSD) then kill
++ * our end. If they send after that then tough - BUT: long enough
++ * that we won't make the old 4*rto = almost no time - whoops
++ * reset mistake.
++ *
++ * Nope, it was not mistake. It is really desired behaviour
++ * f.e. on http servers, when such sockets are useless, but
++ * consume significant resources. Let's do it with special
++ * linger2 option. --ANK
++ */
++
++ if (meta_sk->sk_state == TCP_FIN_WAIT2) {
++ if (meta_tp->linger2 < 0) {
++ tcp_set_state(meta_sk, TCP_CLOSE);
++ tcp_send_active_reset(meta_sk, GFP_ATOMIC);
++ NET_INC_STATS_BH(sock_net(meta_sk),
++ LINUX_MIB_TCPABORTONLINGER);
++ } else {
++ const int tmo = tcp_fin_time(meta_sk);
++
++ if (tmo > TCP_TIMEWAIT_LEN) {
++ inet_csk_reset_keepalive_timer(meta_sk,
++ tmo - TCP_TIMEWAIT_LEN);
++ } else {
++ tcp_time_wait(meta_sk, TCP_FIN_WAIT2, tmo);
++ goto out;
++ }
++ }
++ }
++ if (meta_sk->sk_state != TCP_CLOSE) {
++ sk_mem_reclaim(meta_sk);
++ if (tcp_too_many_orphans(meta_sk, 0)) {
++ if (net_ratelimit())
++ pr_info("MPTCP: too many of orphaned sockets\n");
++ tcp_set_state(meta_sk, TCP_CLOSE);
++ tcp_send_active_reset(meta_sk, GFP_ATOMIC);
++ NET_INC_STATS_BH(sock_net(meta_sk),
++ LINUX_MIB_TCPABORTONMEMORY);
++ }
++ }
++
++
++ if (meta_sk->sk_state == TCP_CLOSE)
++ inet_csk_destroy_sock(meta_sk);
++ /* Otherwise, socket is reprieved until protocol close. */
++
++out:
++ bh_unlock_sock(meta_sk);
++ local_bh_enable();
++ mutex_unlock(&mpcb->mpcb_mutex);
++ sock_put(meta_sk); /* Taken by sock_hold */
++}
++
++void mptcp_disconnect(struct sock *sk)
++{
++ struct sock *subsk, *tmpsk;
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ __skb_queue_purge(&tp->mpcb->reinject_queue);
++
++ if (tp->inside_tk_table) {
++ mptcp_hash_remove_bh(tp);
++ reqsk_queue_destroy(&inet_csk(tp->meta_sk)->icsk_accept_queue);
++ }
++
++ local_bh_disable();
++ mptcp_for_each_sk_safe(tp->mpcb, subsk, tmpsk) {
++ /* The socket will get removed from the subsocket-list
++ * and made non-mptcp by setting mpc to 0.
++ *
++ * This is necessary, because tcp_disconnect assumes
++ * that the connection is completly dead afterwards.
++ * Thus we need to do a mptcp_del_sock. Due to this call
++ * we have to make it non-mptcp.
++ *
++ * We have to lock the socket, because we set mpc to 0.
++ * An incoming packet would take the subsocket's lock
++ * and go on into the receive-path.
++ * This would be a race.
++ */
++
++ bh_lock_sock(subsk);
++ mptcp_del_sock(subsk);
++ reset_mpc(tcp_sk(subsk));
++ mptcp_sub_force_close(subsk);
++ bh_unlock_sock(subsk);
++ }
++ local_bh_enable();
++
++ tp->was_meta_sk = 1;
++ reset_mpc(tp);
++}
++
++
++/* Returns 1 if we should enable MPTCP for that socket. */
++int mptcp_doit(struct sock *sk)
++{
++ /* Do not allow MPTCP enabling if the MPTCP initialization failed */
++ if (mptcp_init_failed)
++ return 0;
++
++ if (sysctl_mptcp_enabled == MPTCP_APP && !tcp_sk(sk)->mptcp_enabled)
++ return 0;
++
++ /* Socket may already be established (e.g., called from tcp_recvmsg) */
++ if (tcp_sk(sk)->mpc || tcp_sk(sk)->request_mptcp)
++ return 1;
++
++ /* Don't do mptcp over loopback */
++ if (sk->sk_family == AF_INET &&
++ (ipv4_is_loopback(inet_sk(sk)->inet_daddr) ||
++ ipv4_is_loopback(inet_sk(sk)->inet_saddr)))
++ return 0;
++#if IS_ENABLED(CONFIG_IPV6)
++ if (sk->sk_family == AF_INET6 &&
++ (ipv6_addr_loopback(&sk->sk_v6_daddr) ||
++ ipv6_addr_loopback(&inet6_sk(sk)->saddr)))
++ return 0;
++#endif
++ if (mptcp_v6_is_v4_mapped(sk) &&
++ ipv4_is_loopback(inet_sk(sk)->inet_saddr))
++ return 0;
++
++#ifdef CONFIG_TCP_MD5SIG
++ /* If TCP_MD5SIG is enabled, do not do MPTCP - there is no Option-Space */
++ if (tcp_sk(sk)->af_specific->md5_lookup(sk, sk))
++ return 0;
++#endif
++
++ return 1;
++}
++
++int mptcp_create_master_sk(struct sock *meta_sk, __u64 remote_key, u32 window)
++{
++ struct tcp_sock *master_tp;
++ struct sock *master_sk;
++
++ if (mptcp_alloc_mpcb(meta_sk, remote_key, window))
++ goto err_alloc_mpcb;
++
++ master_sk = tcp_sk(meta_sk)->mpcb->master_sk;
++ master_tp = tcp_sk(master_sk);
++
++ if (mptcp_add_sock(meta_sk, master_sk, 0, 0, GFP_ATOMIC))
++ goto err_add_sock;
++
++ if (__inet_inherit_port(meta_sk, master_sk) < 0)
++ goto err_add_sock;
++
++ meta_sk->sk_prot->unhash(meta_sk);
++
++ if (master_sk->sk_family == AF_INET || mptcp_v6_is_v4_mapped(master_sk))
++ __inet_hash_nolisten(master_sk, NULL);
++#if IS_ENABLED(CONFIG_IPV6)
++ else
++ __inet6_hash(master_sk, NULL);
++#endif
++
++ master_tp->mptcp->init_rcv_wnd = master_tp->rcv_wnd;
++
++ return 0;
++
++err_add_sock:
++ mptcp_fallback_meta_sk(meta_sk);
++
++ inet_csk_prepare_forced_close(master_sk);
++ tcp_done(master_sk);
++ inet_csk_prepare_forced_close(meta_sk);
++ tcp_done(meta_sk);
++
++err_alloc_mpcb:
++ return -ENOBUFS;
++}
++
++int mptcp_check_req_master(struct sock *sk, struct sock *child,
++ struct request_sock *req,
++ struct request_sock **prev,
++ struct mptcp_options_received *mopt)
++{
++ struct tcp_sock *child_tp = tcp_sk(child);
++ struct sock *meta_sk = child;
++ struct mptcp_cb *mpcb;
++ struct mptcp_request_sock *mtreq;
++
++ if (!tcp_rsk(req)->saw_mpc)
++ return 1;
++
++ /* Just set this values to pass them to mptcp_alloc_mpcb */
++ mtreq = mptcp_rsk(req);
++ child_tp->mptcp_loc_key = mtreq->mptcp_loc_key;
++ child_tp->mptcp_loc_token = mtreq->mptcp_loc_token;
++
++ if (mptcp_create_master_sk(meta_sk, mtreq->mptcp_rem_key,
++ child_tp->snd_wnd))
++ return -ENOBUFS;
++
++ child = tcp_sk(child)->mpcb->master_sk;
++ child_tp = tcp_sk(child);
++ mpcb = child_tp->mpcb;
++
++ child_tp->mptcp->snt_isn = tcp_rsk(req)->snt_isn;
++ child_tp->mptcp->rcv_isn = tcp_rsk(req)->rcv_isn;
++
++ mpcb->dss_csum = mtreq->dss_csum;
++ mpcb->server_side = 1;
++
++ /* Will be moved to ESTABLISHED by tcp_rcv_state_process() */
++ mptcp_update_metasocket(child, meta_sk);
++
++ /* Needs to be done here additionally, because when accepting a
++ * new connection we pass by __reqsk_free and not reqsk_free.
++ */
++ mptcp_reqsk_remove_tk(req);
++
++ /* Hold when creating the meta-sk in tcp_vX_syn_recv_sock. */
++ sock_put(meta_sk);
++
++ inet_csk_reqsk_queue_unlink(sk, req, prev);
++ inet_csk_reqsk_queue_removed(sk, req);
++ inet_csk_reqsk_queue_add(sk, req, meta_sk);
++
++ return 0;
++}
++
++struct sock *mptcp_check_req_child(struct sock *meta_sk, struct sock *child,
++ struct request_sock *req,
++ struct request_sock **prev,
++ struct mptcp_options_received *mopt)
++{
++ struct tcp_sock *child_tp = tcp_sk(child);
++ struct mptcp_request_sock *mtreq = mptcp_rsk(req);
++ struct mptcp_cb *mpcb = mtreq->mpcb;
++ u8 hash_mac_check[20];
++
++ child_tp->inside_tk_table = 0;
++
++ if (!mopt->join_ack)
++ goto teardown;
++
++ mptcp_hmac_sha1((u8 *)&mpcb->mptcp_rem_key,
++ (u8 *)&mpcb->mptcp_loc_key,
++ (u8 *)&mtreq->mptcp_rem_nonce,
++ (u8 *)&mtreq->mptcp_loc_nonce,
++ (u32 *)hash_mac_check);
++
++ if (memcmp(hash_mac_check, (char *)&mopt->mptcp_recv_mac, 20))
++ goto teardown;
++
++ /* Point it to the same struct socket and wq as the meta_sk */
++ sk_set_socket(child, meta_sk->sk_socket);
++ child->sk_wq = meta_sk->sk_wq;
++
++ if (mptcp_add_sock(meta_sk, child, mtreq->loc_id, mtreq->rem_id, GFP_ATOMIC)) {
++ reset_mpc(child_tp); /* Has been inherited, but now
++ * child_tp->mptcp is NULL
++ */
++ /* TODO when we support acking the third ack for new subflows,
++ * we should silently discard this third ack, by returning NULL.
++ *
++ * Maybe, at the retransmission we will have enough memory to
++ * fully add the socket to the meta-sk.
++ */
++ goto teardown;
++ }
++
++ /* The child is a clone of the meta socket, we must now reset
++ * some of the fields
++ */
++ child_tp->mptcp->rcv_low_prio = mtreq->low_prio;
++
++ /* We should allow proper increase of the snd/rcv-buffers. Thus, we
++ * use the original values instead of the bloated up ones from the
++ * clone.
++ */
++ child->sk_sndbuf = mpcb->orig_sk_sndbuf;
++ child->sk_rcvbuf = mpcb->orig_sk_rcvbuf;
++
++ child_tp->mptcp->slave_sk = 1;
++ child_tp->mptcp->snt_isn = tcp_rsk(req)->snt_isn;
++ child_tp->mptcp->rcv_isn = tcp_rsk(req)->rcv_isn;
++ child_tp->mptcp->init_rcv_wnd = req->rcv_wnd;
++
++ child_tp->tsq_flags = 0;
++
++ /* Subflows do not use the accept queue, as they
++ * are attached immediately to the mpcb.
++ */
++ inet_csk_reqsk_queue_drop(meta_sk, req, prev);
++ return child;
++
++teardown:
++ /* Drop this request - sock creation failed. */
++ inet_csk_reqsk_queue_drop(meta_sk, req, prev);
++ inet_csk_prepare_forced_close(child);
++ tcp_done(child);
++ return meta_sk;
++}
++
++int mptcp_time_wait(struct sock *sk, struct tcp_timewait_sock *tw)
++{
++ struct mptcp_tw *mptw;
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct mptcp_cb *mpcb = tp->mpcb;
++
++ /* Alloc MPTCP-tw-sock */
++ mptw = kmem_cache_alloc(mptcp_tw_cache, GFP_ATOMIC);
++ if (!mptw)
++ return -ENOBUFS;
++
++ atomic_inc(&mpcb->mpcb_refcnt);
++
++ tw->mptcp_tw = mptw;
++ mptw->loc_key = mpcb->mptcp_loc_key;
++ mptw->meta_tw = mpcb->in_time_wait;
++ if (mptw->meta_tw) {
++ mptw->rcv_nxt = mptcp_get_rcv_nxt_64(mptcp_meta_tp(tp));
++ if (mpcb->mptw_state != TCP_TIME_WAIT)
++ mptw->rcv_nxt++;
++ }
++ rcu_assign_pointer(mptw->mpcb, mpcb);
++
++ spin_lock(&mpcb->tw_lock);
++ list_add_rcu(&mptw->list, &tp->mpcb->tw_list);
++ mptw->in_list = 1;
++ spin_unlock(&mpcb->tw_lock);
++
++ return 0;
++}
++
++void mptcp_twsk_destructor(struct tcp_timewait_sock *tw)
++{
++ struct mptcp_cb *mpcb;
++
++ rcu_read_lock();
++ mpcb = rcu_dereference(tw->mptcp_tw->mpcb);
++
++ /* If we are still holding a ref to the mpcb, we have to remove ourself
++ * from the list and drop the ref properly.
++ */
++ if (mpcb && atomic_inc_not_zero(&mpcb->mpcb_refcnt)) {
++ spin_lock(&mpcb->tw_lock);
++ if (tw->mptcp_tw->in_list) {
++ list_del_rcu(&tw->mptcp_tw->list);
++ tw->mptcp_tw->in_list = 0;
++ }
++ spin_unlock(&mpcb->tw_lock);
++
++ /* Twice, because we increased it above */
++ mptcp_mpcb_put(mpcb);
++ mptcp_mpcb_put(mpcb);
++ }
++
++ rcu_read_unlock();
++
++ kmem_cache_free(mptcp_tw_cache, tw->mptcp_tw);
++}
++
++/* Updates the rcv_nxt of the time-wait-socks and allows them to ack a
++ * data-fin.
++ */
++void mptcp_update_tw_socks(const struct tcp_sock *tp, int state)
++{
++ struct mptcp_tw *mptw;
++
++ /* Used for sockets that go into tw after the meta
++ * (see mptcp_time_wait())
++ */
++ tp->mpcb->in_time_wait = 1;
++ tp->mpcb->mptw_state = state;
++
++ /* Update the time-wait-sock's information */
++ rcu_read_lock_bh();
++ list_for_each_entry_rcu(mptw, &tp->mpcb->tw_list, list) {
++ mptw->meta_tw = 1;
++ mptw->rcv_nxt = mptcp_get_rcv_nxt_64(tp);
++
++ /* We want to ack a DATA_FIN, but are yet in FIN_WAIT_2 -
++ * pretend as if the DATA_FIN has already reached us, that way
++ * the checks in tcp_timewait_state_process will be good as the
++ * DATA_FIN comes in.
++ */
++ if (state != TCP_TIME_WAIT)
++ mptw->rcv_nxt++;
++ }
++ rcu_read_unlock_bh();
++}
++
++void mptcp_tsq_flags(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++
++ /* It will be handled as a regular deferred-call */
++ if (is_meta_sk(sk))
++ return;
++
++ if (list_empty(&tp->mptcp->cb_list)) {
++ list_add(&tp->mptcp->cb_list, &tp->mpcb->callback_list);
++ /* We need to hold it here, as the sock_hold is not assured
++ * by the release_sock as it is done in regular TCP.
++ *
++ * The subsocket may get inet_csk_destroy'd while it is inside
++ * the callback_list.
++ */
++ sock_hold(sk);
++ }
++
++ if (!test_and_set_bit(MPTCP_SUB_DEFERRED, &tcp_sk(meta_sk)->tsq_flags))
++ sock_hold(meta_sk);
++}
++
++void mptcp_tsq_sub_deferred(struct sock *meta_sk)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct mptcp_tcp_sock *mptcp, *tmp;
++
++ BUG_ON(!is_meta_sk(meta_sk) && !meta_tp->was_meta_sk);
++
++ __sock_put(meta_sk);
++ list_for_each_entry_safe(mptcp, tmp, &meta_tp->mpcb->callback_list, cb_list) {
++ struct tcp_sock *tp = mptcp->tp;
++ struct sock *sk = (struct sock *)tp;
++
++ list_del_init(&mptcp->cb_list);
++ sk->sk_prot->release_cb(sk);
++ /* Final sock_put (cfr. mptcp_tsq_flags */
++ sock_put(sk);
++ }
++}
++
++struct workqueue_struct *mptcp_wq;
++EXPORT_SYMBOL(mptcp_wq);
++
++/* Output /proc/net/mptcp */
++static int mptcp_pm_seq_show(struct seq_file *seq, void *v)
++{
++ struct tcp_sock *meta_tp;
++ struct net *net = seq->private;
++ int i, n = 0;
++
++ seq_printf(seq, " sl loc_tok rem_tok v6 "
++ "local_address "
++ "remote_address "
++ "st ns tx_queue rx_queue inode");
++ seq_putc(seq, '\n');
++
++ for (i = 0; i < MPTCP_HASH_SIZE; i++) {
++ struct hlist_nulls_node *node;
++ rcu_read_lock_bh();
++ hlist_nulls_for_each_entry_rcu(meta_tp, node,
++ &tk_hashtable[i], tk_table) {
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ struct sock *meta_sk = (struct sock *)meta_tp;
++ struct inet_sock *isk = inet_sk(meta_sk);
++
++ if (!meta_tp->mpc || !net_eq(net, sock_net(meta_sk)))
++ continue;
++
++ seq_printf(seq, "%4d: %04X %04X ", n++,
++ mpcb->mptcp_loc_token,
++ mpcb->mptcp_rem_token);
++ if (meta_sk->sk_family == AF_INET ||
++ mptcp_v6_is_v4_mapped(meta_sk)) {
++ seq_printf(seq, " 0 %08X:%04X %08X:%04X ",
++ isk->inet_saddr,
++ ntohs(isk->inet_sport),
++ isk->inet_daddr,
++ ntohs(isk->inet_dport));
++#if IS_ENABLED(CONFIG_IPV6)
++ } else if (meta_sk->sk_family == AF_INET6) {
++ struct in6_addr *src = &isk->pinet6->saddr;
++ struct in6_addr *dst = &meta_sk->sk_v6_daddr;
++ seq_printf(seq, " 1 %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X",
++ src->s6_addr32[0], src->s6_addr32[1],
++ src->s6_addr32[2], src->s6_addr32[3],
++ ntohs(isk->inet_sport),
++ dst->s6_addr32[0], dst->s6_addr32[1],
++ dst->s6_addr32[2], dst->s6_addr32[3],
++ ntohs(isk->inet_dport));
++#endif
++ }
++ seq_printf(seq, " %02X %02X %08X:%08X %lu",
++ meta_sk->sk_state, mpcb->cnt_subflows,
++ meta_tp->write_seq - meta_tp->snd_una,
++ max_t(int, meta_tp->rcv_nxt -
++ meta_tp->copied_seq, 0),
++ sock_i_ino(meta_sk));
++ seq_putc(seq, '\n');
++ }
++ rcu_read_unlock_bh();
++ }
++
++ return 0;
++}
++
++static int mptcp_pm_seq_open(struct inode *inode, struct file *file)
++{
++ return single_open_net(inode, file, mptcp_pm_seq_show);
++}
++
++static const struct file_operations mptcp_pm_seq_fops = {
++ .owner = THIS_MODULE,
++ .open = mptcp_pm_seq_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release_net,
++};
++
++static int mptcp_pm_init_net(struct net *net)
++{
++ if (!proc_create("mptcp", S_IRUGO, net->proc_net, &mptcp_pm_seq_fops))
++ return -ENOMEM;
++
++ return 0;
++}
++
++static void mptcp_pm_exit_net(struct net *net)
++{
++ remove_proc_entry("mptcp", net->proc_net);
++}
++
++static struct pernet_operations mptcp_pm_proc_ops = {
++ .init = mptcp_pm_init_net,
++ .exit = mptcp_pm_exit_net,
++};
++
++/* General initialization of mptcp */
++void __init mptcp_init(void)
++{
++ int i;
++ struct ctl_table_header *mptcp_sysctl;
++
++ mptcp_sock_cache = kmem_cache_create("mptcp_sock",
++ sizeof(struct mptcp_tcp_sock),
++ 0, SLAB_HWCACHE_ALIGN,
++ NULL);
++ if (!mptcp_sock_cache)
++ goto mptcp_sock_cache_failed;
++
++ mptcp_cb_cache = kmem_cache_create("mptcp_cb", sizeof(struct mptcp_cb),
++ 0, SLAB_DESTROY_BY_RCU|SLAB_HWCACHE_ALIGN,
++ NULL);
++ if (!mptcp_cb_cache)
++ goto mptcp_cb_cache_failed;
++
++ mptcp_tw_cache = kmem_cache_create("mptcp_tw", sizeof(struct mptcp_tw),
++ 0, SLAB_DESTROY_BY_RCU|SLAB_HWCACHE_ALIGN,
++ NULL);
++ if (!mptcp_tw_cache)
++ goto mptcp_tw_cache_failed;
++
++ get_random_bytes(mptcp_secret, sizeof(mptcp_secret));
++
++ mptcp_wq = alloc_workqueue("mptcp_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 8);
++ if (!mptcp_wq)
++ goto alloc_workqueue_failed;
++
++ for (i = 0; i < MPTCP_HASH_SIZE; i++) {
++ INIT_HLIST_NULLS_HEAD(&tk_hashtable[i], i);
++ INIT_LIST_HEAD(&mptcp_reqsk_htb[i]);
++ INIT_HLIST_NULLS_HEAD(&mptcp_reqsk_tk_htb[i], i);
++ }
++
++ spin_lock_init(&mptcp_reqsk_hlock);
++ spin_lock_init(&mptcp_tk_hashlock);
++
++ if (register_pernet_subsys(&mptcp_pm_proc_ops))
++ goto pernet_failed;
++
++#if IS_ENABLED(CONFIG_IPV6)
++ if (mptcp_pm_v6_init())
++ goto mptcp_pm_v6_failed;
++#endif
++ if (mptcp_pm_v4_init())
++ goto mptcp_pm_v4_failed;
++
++ mptcp_sysctl = register_net_sysctl(&init_net, "net/mptcp", mptcp_table);
++ if (!mptcp_sysctl)
++ goto register_sysctl_failed;
++
++ if (mptcp_register_path_manager(&mptcp_pm_default))
++ goto register_pm_failed;
++
++ pr_info("MPTCP: Stable release v0.89.0-rc");
++
++ mptcp_init_failed = false;
++
++ return;
++
++register_pm_failed:
++ unregister_net_sysctl_table(mptcp_sysctl);
++register_sysctl_failed:
++ mptcp_pm_v4_undo();
++mptcp_pm_v4_failed:
++#if IS_ENABLED(CONFIG_IPV6)
++ mptcp_pm_v6_undo();
++mptcp_pm_v6_failed:
++#endif
++ unregister_pernet_subsys(&mptcp_pm_proc_ops);
++pernet_failed:
++ destroy_workqueue(mptcp_wq);
++alloc_workqueue_failed:
++ kmem_cache_destroy(mptcp_tw_cache);
++mptcp_tw_cache_failed:
++ kmem_cache_destroy(mptcp_cb_cache);
++mptcp_cb_cache_failed:
++ kmem_cache_destroy(mptcp_sock_cache);
++mptcp_sock_cache_failed:
++ mptcp_init_failed = true;
++}
+diff --git a/net/mptcp/mptcp_fullmesh.c b/net/mptcp/mptcp_fullmesh.c
+new file mode 100644
+index 0000000..49bddf3
+--- /dev/null
++++ b/net/mptcp/mptcp_fullmesh.c
+@@ -0,0 +1,1313 @@
++#include <linux/module.h>
++
++#include <net/mptcp.h>
++#include <net/mptcp_v4.h>
++
++#if IS_ENABLED(CONFIG_IPV6)
++#include <net/mptcp_v6.h>
++#include <net/addrconf.h>
++#endif
++
++enum {
++ MPTCP_EVENT_ADD = 1,
++ MPTCP_EVENT_DEL,
++ MPTCP_EVENT_MOD,
++};
++
++struct mptcp_loc_addr {
++ struct mptcp_loc4 locaddr4[MPTCP_MAX_ADDR];
++ u8 loc4_bits;
++ u8 next_v4_index;
++
++ struct mptcp_loc6 locaddr6[MPTCP_MAX_ADDR];
++ u8 loc6_bits;
++ u8 next_v6_index;
++};
++
++struct mptcp_addr_event {
++ struct list_head list;
++ unsigned short family;
++ u8 code:7,
++ low_prio:1;
++ union inet_addr addr;
++};
++
++struct fullmesh_priv {
++ /* Worker struct for subflow establishment */
++ struct work_struct subflow_work;
++ /* Delayed worker, when the routing-tables are not yet ready. */
++ struct delayed_work subflow_retry_work;
++
++ struct mptcp_cb *mpcb;
++
++ u16 remove_addrs; /* Addresses to remove */
++ u8 announced_addrs_v4; /* IPv4 Addresses we did announce */
++ u8 announced_addrs_v6; /* IPv4 Addresses we did announce */
++
++ u8 add_addr; /* Are we sending an add_addr? */
++};
++
++struct mptcp_fm_ns {
++ struct mptcp_loc_addr __rcu *local;
++ spinlock_t local_lock; /* Protecting the above pointer */
++ struct list_head events;
++ struct delayed_work address_worker;
++
++ struct net *net;
++};
++
++static struct mptcp_pm_ops full_mesh __read_mostly;
++
++static struct mptcp_fm_ns *fm_get_ns(struct net *net)
++{
++ return (struct mptcp_fm_ns *)net->mptcp.path_managers[MPTCP_PM_FULLMESH];
++}
++
++static void full_mesh_create_subflows(struct sock *meta_sk);
++
++static void retry_subflow_worker(struct work_struct *work)
++{
++ struct delayed_work *delayed_work = container_of(work,
++ struct delayed_work,
++ work);
++ struct fullmesh_priv *pm_priv = container_of(delayed_work,
++ struct fullmesh_priv,
++ subflow_retry_work);
++ struct mptcp_cb *mpcb = pm_priv->mpcb;
++ struct sock *meta_sk = mpcb->meta_sk;
++ struct mptcp_loc_addr *mptcp_local;
++ struct mptcp_fm_ns *fm_ns = fm_get_ns(sock_net(meta_sk));
++ int iter = 0, i;
++
++ /* We need a local (stable) copy of the address-list. Really, it is not
++ * such a big deal, if the address-list is not 100% up-to-date.
++ */
++ rcu_read_lock_bh();
++ mptcp_local = rcu_dereference_bh(fm_ns->local);
++ mptcp_local = kmemdup(mptcp_local, sizeof(*mptcp_local), GFP_ATOMIC);
++ rcu_read_unlock_bh();
++
++ if (!mptcp_local)
++ return;
++
++next_subflow:
++ if (iter) {
++ release_sock(meta_sk);
++ mutex_unlock(&mpcb->mpcb_mutex);
++
++ yield();
++ }
++ mutex_lock(&mpcb->mpcb_mutex);
++ lock_sock_nested(meta_sk, SINGLE_DEPTH_NESTING);
++
++ iter++;
++
++ if (sock_flag(meta_sk, SOCK_DEAD))
++ goto exit;
++
++ mptcp_for_each_bit_set(mpcb->rem4_bits, i) {
++ struct mptcp_rem4 *rem = &mpcb->remaddr4[i];
++ /* Do we need to retry establishing a subflow ? */
++ if (rem->retry_bitfield) {
++ int i = mptcp_find_free_index(~rem->retry_bitfield);
++
++ rem->bitfield |= (1 << i);
++ rem->retry_bitfield &= ~(1 << i);
++
++ mptcp_init4_subsockets(meta_sk, &mptcp_local->locaddr4[i], rem);
++ goto next_subflow;
++ }
++ }
++
++#if IS_ENABLED(CONFIG_IPV6)
++ mptcp_for_each_bit_set(mpcb->rem6_bits, i) {
++ struct mptcp_rem6 *rem = &mpcb->remaddr6[i];
++
++ /* Do we need to retry establishing a subflow ? */
++ if (rem->retry_bitfield) {
++ int i = mptcp_find_free_index(~rem->retry_bitfield);
++
++ rem->bitfield |= (1 << i);
++ rem->retry_bitfield &= ~(1 << i);
++
++ mptcp_init6_subsockets(meta_sk, &mptcp_local->locaddr6[i], rem);
++ goto next_subflow;
++ }
++ }
++#endif
++
++exit:
++ kfree(mptcp_local);
++ release_sock(meta_sk);
++ mutex_unlock(&mpcb->mpcb_mutex);
++ sock_put(meta_sk);
++}
++
++/**
++ * Create all new subflows, by doing calls to mptcp_initX_subsockets
++ *
++ * This function uses a goto next_subflow, to allow releasing the lock between
++ * new subflows and giving other processes a chance to do some work on the
++ * socket and potentially finishing the communication.
++ **/
++static void create_subflow_worker(struct work_struct *work)
++{
++ struct fullmesh_priv *pm_priv = container_of(work,
++ struct fullmesh_priv,
++ subflow_work);
++ struct mptcp_cb *mpcb = pm_priv->mpcb;
++ struct sock *meta_sk = mpcb->meta_sk;
++ struct mptcp_loc_addr *mptcp_local;
++ struct mptcp_fm_ns *fm_ns = fm_get_ns(sock_net(meta_sk));
++ int iter = 0, retry = 0;
++ int i;
++
++ /* We need a local (stable) copy of the address-list. Really, it is not
++ * such a big deal, if the address-list is not 100% up-to-date.
++ */
++ rcu_read_lock_bh();
++ mptcp_local = rcu_dereference_bh(fm_ns->local);
++ mptcp_local = kmemdup(mptcp_local, sizeof(*mptcp_local), GFP_ATOMIC);
++ rcu_read_unlock_bh();
++
++ if (!mptcp_local)
++ return;
++
++next_subflow:
++ if (iter) {
++ release_sock(meta_sk);
++ mutex_unlock(&mpcb->mpcb_mutex);
++
++ yield();
++ }
++ mutex_lock(&mpcb->mpcb_mutex);
++ lock_sock_nested(meta_sk, SINGLE_DEPTH_NESTING);
++
++ iter++;
++
++ if (sock_flag(meta_sk, SOCK_DEAD))
++ goto exit;
++
++ if (mpcb->master_sk &&
++ !tcp_sk(mpcb->master_sk)->mptcp->fully_established)
++ goto exit;
++
++ mptcp_for_each_bit_set(mpcb->rem4_bits, i) {
++ struct mptcp_rem4 *rem;
++ u8 remaining_bits;
++
++ rem = &mpcb->remaddr4[i];
++ remaining_bits = ~(rem->bitfield) & mptcp_local->loc4_bits;
++
++ /* Are there still combinations to handle? */
++ if (remaining_bits) {
++ int i = mptcp_find_free_index(~remaining_bits);
++
++ rem->bitfield |= (1 << i);
++
++ /* If a route is not yet available then retry once */
++ if (mptcp_init4_subsockets(meta_sk, &mptcp_local->locaddr4[i],
++ rem) == -ENETUNREACH)
++ retry = rem->retry_bitfield |= (1 << i);
++ goto next_subflow;
++ }
++ }
++
++#if IS_ENABLED(CONFIG_IPV6)
++ mptcp_for_each_bit_set(mpcb->rem6_bits, i) {
++ struct mptcp_rem6 *rem;
++ u8 remaining_bits;
++
++ rem = &mpcb->remaddr6[i];
++ remaining_bits = ~(rem->bitfield) & mptcp_local->loc6_bits;
++
++ /* Are there still combinations to handle? */
++ if (remaining_bits) {
++ int i = mptcp_find_free_index(~remaining_bits);
++
++ rem->bitfield |= (1 << i);
++
++ /* If a route is not yet available then retry once */
++ if (mptcp_init6_subsockets(meta_sk, &mptcp_local->locaddr6[i],
++ rem) == -ENETUNREACH)
++ retry = rem->retry_bitfield |= (1 << i);
++ goto next_subflow;
++ }
++ }
++#endif
++
++ if (retry && !delayed_work_pending(&pm_priv->subflow_retry_work)) {
++ sock_hold(meta_sk);
++ queue_delayed_work(mptcp_wq, &pm_priv->subflow_retry_work,
++ msecs_to_jiffies(MPTCP_SUBFLOW_RETRY_DELAY));
++ }
++
++exit:
++ kfree(mptcp_local);
++ release_sock(meta_sk);
++ mutex_unlock(&mpcb->mpcb_mutex);
++ sock_put(meta_sk);
++}
++
++static void update_remove_addrs(u8 addr_id, struct sock *meta_sk,
++ struct mptcp_loc_addr *mptcp_local)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct fullmesh_priv *fmp = (struct fullmesh_priv *)&mpcb->mptcp_pm[0];
++ struct sock *sk;
++ int i;
++
++ fmp->remove_addrs |= (1 << addr_id);
++ /* v4 goes from 0 to MPTCP_MAX_ADDR, v6 beyond */
++ if (addr_id < MPTCP_MAX_ADDR) {
++ fmp->announced_addrs_v4 &= ~(1 << addr_id);
++
++ mptcp_for_each_bit_set(mpcb->rem4_bits, i) {
++ mpcb->remaddr4[i].bitfield &= mptcp_local->loc4_bits;
++ mpcb->remaddr4[i].retry_bitfield &= mptcp_local->loc4_bits;
++ }
++ } else {
++ fmp->announced_addrs_v6 &= ~(1 << (addr_id - MPTCP_MAX_ADDR));
++
++ mptcp_for_each_bit_set(mpcb->rem6_bits, i) {
++ mpcb->remaddr6[i].bitfield &= mptcp_local->loc6_bits;
++ mpcb->remaddr6[i].retry_bitfield &= mptcp_local->loc6_bits;
++ }
++ }
++
++ sk = mptcp_select_ack_sock(meta_sk, 0);
++ if (sk)
++ tcp_send_ack(sk);
++}
++
++static int mptcp_find_address(struct mptcp_loc_addr *mptcp_local,
++ sa_family_t family, union inet_addr *addr)
++{
++ int i;
++ u8 loc_bits;
++ bool found = false;
++
++ if (family == AF_INET)
++ loc_bits = mptcp_local->loc4_bits;
++ else
++ loc_bits = mptcp_local->loc6_bits;
++
++ mptcp_for_each_bit_set(loc_bits, i) {
++ if (family == AF_INET &&
++ mptcp_local->locaddr4[i].addr.s_addr == addr->in.s_addr) {
++ found = true;
++ break;
++ }
++ if (family == AF_INET6 &&
++ ipv6_addr_equal(&mptcp_local->locaddr6[i].addr,
++ &addr->in6)) {
++ found = true;
++ break;
++ }
++ }
++
++ if (!found)
++ return -1;
++
++ return i;
++}
++
++static void mptcp_address_worker(struct work_struct *work)
++{
++ struct delayed_work *delayed_work = container_of(work,
++ struct delayed_work,
++ work);
++ struct mptcp_fm_ns *fm_ns = container_of(delayed_work,
++ struct mptcp_fm_ns,
++ address_worker);
++ struct net *net = fm_ns->net;
++ struct mptcp_addr_event *event = NULL;
++ struct mptcp_loc_addr *mptcp_local, *old;
++ int i, id = -1; /* id is used in the socket-code on a delete-event */
++ bool success; /* Used to indicate if we succeeded handling the event */
++
++next_event:
++ success = false;
++ kfree(event);
++
++ /* First, let's dequeue an event from our event-list */
++ rcu_read_lock_bh();
++ spin_lock(&fm_ns->local_lock);
++
++ event = list_first_entry_or_null(&fm_ns->events,
++ struct mptcp_addr_event, list);
++ if (!event) {
++ spin_unlock(&fm_ns->local_lock);
++ rcu_read_unlock_bh();
++ return;
++ }
++
++ list_del(&event->list);
++
++ mptcp_local = rcu_dereference_bh(fm_ns->local);
++
++ if (event->code == MPTCP_EVENT_DEL) {
++ id = mptcp_find_address(mptcp_local, event->family, &event->addr);
++
++ /* Not in the list - so we don't care */
++ if (id < 0)
++ goto duno;
++
++ old = mptcp_local;
++ mptcp_local = kmemdup(mptcp_local, sizeof(*mptcp_local),
++ GFP_ATOMIC);
++ if (!mptcp_local)
++ goto duno;
++
++ if (event->family == AF_INET)
++ mptcp_local->loc4_bits &= ~(1 << id);
++ else
++ mptcp_local->loc6_bits &= ~(1 << id);
++
++ rcu_assign_pointer(fm_ns->local, mptcp_local);
++ kfree(old);
++ } else {
++ int i = mptcp_find_address(mptcp_local, event->family, &event->addr);
++ int j = i;
++
++ if (j < 0) {
++ /* Not in the list, so we have to find an empty slot */
++ if (event->family == AF_INET)
++ i = __mptcp_find_free_index(mptcp_local->loc4_bits, -1,
++ mptcp_local->next_v4_index);
++ if (event->family == AF_INET6)
++ i = __mptcp_find_free_index(mptcp_local->loc6_bits, -1,
++ mptcp_local->next_v6_index);
++
++ if (i < 0) {
++ mptcp_debug("%s no more space\n", __func__);
++ goto duno;
++ }
++
++ /* It might have been a MOD-event. */
++ event->code = MPTCP_EVENT_ADD;
++ } else {
++ /* Let's check if anything changes */
++ if (event->family == AF_INET &&
++ event->low_prio == mptcp_local->locaddr4[i].low_prio)
++ goto duno;
++
++ if (event->family == AF_INET6 &&
++ event->low_prio == mptcp_local->locaddr6[i].low_prio)
++ goto duno;
++ }
++
++ old = mptcp_local;
++ mptcp_local = kmemdup(mptcp_local, sizeof(*mptcp_local),
++ GFP_ATOMIC);
++ if (!mptcp_local)
++ goto duno;
++
++ if (event->family == AF_INET) {
++ mptcp_local->locaddr4[i].addr.s_addr = event->addr.in.s_addr;
++ mptcp_local->locaddr4[i].loc4_id = i + 1;
++ mptcp_local->locaddr4[i].low_prio = event->low_prio;
++ } else {
++ mptcp_local->locaddr6[i].addr = event->addr.in6;
++ mptcp_local->locaddr6[i].loc6_id = i + MPTCP_MAX_ADDR;
++ mptcp_local->locaddr6[i].low_prio = event->low_prio;
++ }
++
++ if (j < 0) {
++ if (event->family == AF_INET) {
++ mptcp_local->loc4_bits |= (1 << i);
++ mptcp_local->next_v4_index = i + 1;
++ } else {
++ mptcp_local->loc6_bits |= (1 << i);
++ mptcp_local->next_v6_index = i + 1;
++ }
++ }
++
++ rcu_assign_pointer(fm_ns->local, mptcp_local);
++ kfree(old);
++ }
++ success = true;
++
++duno:
++ spin_unlock(&fm_ns->local_lock);
++ rcu_read_unlock_bh();
++
++ if (!success)
++ goto next_event;
++
++ /* Now we iterate over the MPTCP-sockets and apply the event. */
++ for (i = 0; i < MPTCP_HASH_SIZE; i++) {
++ struct hlist_nulls_node *node;
++ struct tcp_sock *meta_tp;
++
++ rcu_read_lock_bh();
++ hlist_nulls_for_each_entry_rcu(meta_tp, node, &tk_hashtable[i],
++ tk_table) {
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ struct sock *meta_sk = (struct sock *)meta_tp, *sk;
++ struct fullmesh_priv *fmp = (struct fullmesh_priv *)&mpcb->mptcp_pm[0];
++
++ if (sock_net(meta_sk) != net)
++ continue;
++
++ if (unlikely(!atomic_inc_not_zero(&meta_sk->sk_refcnt)))
++ continue;
++
++ bh_lock_sock(meta_sk);
++
++ if (!meta_tp->mpc || !is_meta_sk(meta_sk) ||
++ mpcb->infinite_mapping_snd ||
++ mpcb->infinite_mapping_rcv ||
++ mpcb->send_infinite_mapping)
++ goto next;
++
++ /* May be that the pm has changed in-between */
++ if (mpcb->pm_ops != &full_mesh)
++ goto next;
++
++ if (sock_owned_by_user(meta_sk)) {
++ if (!test_and_set_bit(MPTCP_PATH_MANAGER,
++ &meta_tp->tsq_flags))
++ sock_hold(meta_sk);
++
++ goto next;
++ }
++
++ if (event->code == MPTCP_EVENT_ADD) {
++ if (event->family == AF_INET)
++ fmp->add_addr++;
++#if IS_ENABLED(CONFIG_IPV6)
++ if (event->family == AF_INET6)
++ fmp->add_addr++;
++#endif
++
++ sk = mptcp_select_ack_sock(meta_sk, 0);
++ if (sk)
++ tcp_send_ack(sk);
++
++ full_mesh_create_subflows(meta_sk);
++ }
++
++ if (event->code == MPTCP_EVENT_DEL) {
++ struct sock *sk, *tmpsk;
++ struct mptcp_loc_addr *mptcp_local;
++ bool found = false;
++
++ mptcp_local = rcu_dereference_bh(fm_ns->local);
++
++ /* Look for the socket and remove him */
++ mptcp_for_each_sk_safe(mpcb, sk, tmpsk) {
++ if ((event->family == AF_INET6 &&
++ (sk->sk_family == AF_INET ||
++ mptcp_v6_is_v4_mapped(sk))) ||
++ (event->family == AF_INET &&
++ (sk->sk_family == AF_INET6 &&
++ !mptcp_v6_is_v4_mapped(sk))))
++ continue;
++
++ if (event->family == AF_INET &&
++ (sk->sk_family == AF_INET ||
++ mptcp_v6_is_v4_mapped(sk)) &&
++ inet_sk(sk)->inet_saddr != event->addr.in.s_addr)
++ continue;
++
++ if (event->family == AF_INET6 &&
++ sk->sk_family == AF_INET6 &&
++ !ipv6_addr_equal(&inet6_sk(sk)->saddr, &event->addr.in6))
++ continue;
++
++ /* Reinject, so that pf = 1 and so we
++ * won't select this one as the
++ * ack-sock.
++ */
++ mptcp_reinject_data(sk, 0);
++
++ /* A master is special, it has
++ * address-id 0
++ */
++ if (!tcp_sk(sk)->mptcp->loc_id)
++ update_remove_addrs(0, meta_sk, mptcp_local);
++ else if (tcp_sk(sk)->mptcp->loc_id != id)
++ update_remove_addrs(tcp_sk(sk)->mptcp->loc_id, meta_sk, mptcp_local);
++
++ mptcp_sub_force_close(sk);
++ found = true;
++ }
++
++ if (!found)
++ goto next;
++
++ /* The id may have been given by the event,
++ * matching on a local address. And it may not
++ * have matched on one of the above sockets,
++ * because the client never created a subflow.
++ * So, we have to finally remove it here.
++ */
++ if (id > 0)
++ update_remove_addrs(id, meta_sk, mptcp_local);
++ }
++
++ if (event->code == MPTCP_EVENT_MOD) {
++ struct sock *sk;
++
++ mptcp_for_each_sk(mpcb, sk) {
++ struct tcp_sock *tp = tcp_sk(sk);
++ if (event->family == AF_INET &&
++ (sk->sk_family == AF_INET ||
++ mptcp_v6_is_v4_mapped(sk)) &&
++ inet_sk(sk)->inet_saddr == event->addr.in.s_addr) {
++ if (event->low_prio != tp->mptcp->low_prio) {
++ tp->mptcp->send_mp_prio = 1;
++ tp->mptcp->low_prio = event->low_prio;
++
++ tcp_send_ack(sk);
++ }
++ }
++
++ if (event->family == AF_INET6 &&
++ sk->sk_family == AF_INET6 &&
++ !ipv6_addr_equal(&inet6_sk(sk)->saddr, &event->addr.in6)) {
++ if (event->low_prio != tp->mptcp->low_prio) {
++ tp->mptcp->send_mp_prio = 1;
++ tp->mptcp->low_prio = event->low_prio;
++
++ tcp_send_ack(sk);
++ }
++ }
++ }
++ }
++next:
++ bh_unlock_sock(meta_sk);
++ sock_put(meta_sk);
++ }
++ rcu_read_unlock_bh();
++ }
++ goto next_event;
++}
++
++static struct mptcp_addr_event *lookup_similar_event(struct net *net,
++ struct mptcp_addr_event *event)
++{
++ struct mptcp_addr_event *eventq;
++ struct mptcp_fm_ns *fm_ns = fm_get_ns(net);
++
++ list_for_each_entry(eventq, &fm_ns->events, list) {
++ if (eventq->family != event->family)
++ continue;
++ if (event->family == AF_INET) {
++ if (eventq->addr.in.s_addr == event->addr.in.s_addr)
++ return eventq;
++ } else {
++ if (ipv6_addr_equal(&eventq->addr.in6, &event->addr.in6))
++ return eventq;
++ }
++ }
++ return NULL;
++}
++
++/* We already hold the net-namespace MPTCP-lock */
++static void add_pm_event(struct net *net, struct mptcp_addr_event *event)
++{
++ struct mptcp_addr_event *eventq = lookup_similar_event(net, event);
++ struct mptcp_fm_ns *fm_ns = fm_get_ns(net);
++
++ if (eventq) {
++ switch (event->code) {
++ case MPTCP_EVENT_DEL:
++ list_del(&eventq->list);
++ kfree(eventq);
++ break;
++ case MPTCP_EVENT_ADD:
++ eventq->low_prio = event->low_prio;
++ eventq->code = MPTCP_EVENT_ADD;
++ return;
++ case MPTCP_EVENT_MOD:
++ eventq->low_prio = event->low_prio;
++ return;
++ }
++ }
++
++ /* OK, we have to add the new address to the wait queue */
++ eventq = kmemdup(event, sizeof(struct mptcp_addr_event), GFP_ATOMIC);
++ if (!eventq)
++ return;
++
++ list_add_tail(&eventq->list, &fm_ns->events);
++
++ /* Create work-queue */
++ if (!delayed_work_pending(&fm_ns->address_worker))
++ queue_delayed_work(mptcp_wq, &fm_ns->address_worker,
++ msecs_to_jiffies(500));
++}
++
++static void addr4_event_handler(struct in_ifaddr *ifa, unsigned long event,
++ struct net *net)
++{
++ struct net_device *netdev = ifa->ifa_dev->dev;
++ struct mptcp_fm_ns *fm_ns = fm_get_ns(net);
++ struct mptcp_addr_event mpevent;
++
++ if (ifa->ifa_scope > RT_SCOPE_LINK ||
++ ipv4_is_loopback(ifa->ifa_local))
++ return;
++
++ spin_lock_bh(&fm_ns->local_lock);
++
++ mpevent.family = AF_INET;
++ mpevent.addr.in.s_addr = ifa->ifa_local;
++ mpevent.low_prio = (netdev->flags & IFF_MPBACKUP) ? 1 : 0;
++
++ if (event == NETDEV_DOWN || !netif_running(netdev) ||
++ (netdev->flags & IFF_NOMULTIPATH))
++ mpevent.code = MPTCP_EVENT_DEL;
++ else if (event == NETDEV_UP)
++ mpevent.code = MPTCP_EVENT_ADD;
++ else if (event == NETDEV_CHANGE)
++ mpevent.code = MPTCP_EVENT_MOD;
++
++ add_pm_event(net, &mpevent);
++
++ spin_unlock_bh(&fm_ns->local_lock);
++ return;
++}
++
++/* React on IPv4-addr add/rem-events */
++static int mptcp_pm_inetaddr_event(struct notifier_block *this,
++ unsigned long event, void *ptr)
++{
++ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
++ struct net *net = dev_net(ifa->ifa_dev->dev);
++
++ addr4_event_handler(ifa, event, net);
++
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block mptcp_pm_inetaddr_notifier = {
++ .notifier_call = mptcp_pm_inetaddr_event,
++};
++
++#if IS_ENABLED(CONFIG_IPV6)
++
++/* IPV6-related address/interface watchers */
++struct mptcp_dad_data {
++ struct timer_list timer;
++ struct inet6_ifaddr *ifa;
++};
++
++static void dad_callback(unsigned long arg);
++static int inet6_addr_event(struct notifier_block *this,
++ unsigned long event, void *ptr);
++
++static int ipv6_is_in_dad_state(struct inet6_ifaddr *ifa)
++{
++ return ((ifa->flags & IFA_F_TENTATIVE) &&
++ ifa->state == INET6_IFADDR_STATE_DAD);
++}
++
++static void dad_init_timer(struct mptcp_dad_data *data,
++ struct inet6_ifaddr *ifa)
++{
++ data->ifa = ifa;
++ data->timer.data = (unsigned long)data;
++ data->timer.function = dad_callback;
++ if (ifa->idev->cnf.rtr_solicit_delay)
++ data->timer.expires = jiffies + ifa->idev->cnf.rtr_solicit_delay;
++ else
++ data->timer.expires = jiffies + (HZ/10);
++}
++
++static void dad_callback(unsigned long arg)
++{
++ struct mptcp_dad_data *data = (struct mptcp_dad_data *)arg;
++
++ if (ipv6_is_in_dad_state(data->ifa)) {
++ dad_init_timer(data, data->ifa);
++ add_timer(&data->timer);
++ } else {
++ inet6_addr_event(NULL, NETDEV_UP, data->ifa);
++ in6_ifa_put(data->ifa);
++ kfree(data);
++ }
++}
++
++static inline void dad_setup_timer(struct inet6_ifaddr *ifa)
++{
++ struct mptcp_dad_data *data;
++
++ data = kmalloc(sizeof(*data), GFP_ATOMIC);
++
++ if (!data)
++ return;
++
++ init_timer(&data->timer);
++ dad_init_timer(data, ifa);
++ add_timer(&data->timer);
++ in6_ifa_hold(ifa);
++}
++
++static void addr6_event_handler(struct inet6_ifaddr *ifa, unsigned long event,
++ struct net *net)
++{
++ struct net_device *netdev = ifa->idev->dev;
++ int addr_type = ipv6_addr_type(&ifa->addr);
++ struct mptcp_fm_ns *fm_ns = fm_get_ns(net);
++ struct mptcp_addr_event mpevent;
++
++ if (ifa->scope > RT_SCOPE_LINK ||
++ addr_type == IPV6_ADDR_ANY ||
++ (addr_type & IPV6_ADDR_LOOPBACK) ||
++ (addr_type & IPV6_ADDR_LINKLOCAL))
++ return;
++
++ spin_lock_bh(&fm_ns->local_lock);
++
++ mpevent.family = AF_INET6;
++ mpevent.addr.in6 = ifa->addr;
++ mpevent.low_prio = (netdev->flags & IFF_MPBACKUP) ? 1 : 0;
++
++ if (event == NETDEV_DOWN ||!netif_running(netdev) ||
++ (netdev->flags & IFF_NOMULTIPATH))
++ mpevent.code = MPTCP_EVENT_DEL;
++ else if (event == NETDEV_UP)
++ mpevent.code = MPTCP_EVENT_ADD;
++ else if (event == NETDEV_CHANGE)
++ mpevent.code = MPTCP_EVENT_MOD;
++
++ add_pm_event(net, &mpevent);
++
++ spin_unlock_bh(&fm_ns->local_lock);
++ return;
++}
++
++/* React on IPv6-addr add/rem-events */
++static int inet6_addr_event(struct notifier_block *this, unsigned long event,
++ void *ptr)
++{
++ struct inet6_ifaddr *ifa6 = (struct inet6_ifaddr *)ptr;
++ struct net *net = dev_net(ifa6->idev->dev);
++
++ if (ipv6_is_in_dad_state(ifa6))
++ dad_setup_timer(ifa6);
++ else
++ addr6_event_handler(ifa6, event, net);
++
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block inet6_addr_notifier = {
++ .notifier_call = inet6_addr_event,
++};
++
++#endif
++
++/* React on ifup/down-events */
++static int netdev_event(struct notifier_block *this, unsigned long event,
++ void *ptr)
++{
++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++ struct in_device *in_dev;
++#if IS_ENABLED(CONFIG_IPV6)
++ struct inet6_dev *in6_dev;
++#endif
++
++ if (!(event == NETDEV_UP || event == NETDEV_DOWN ||
++ event == NETDEV_CHANGE))
++ return NOTIFY_DONE;
++
++ rcu_read_lock();
++ in_dev = __in_dev_get_rtnl(dev);
++
++ if (in_dev) {
++ for_ifa(in_dev) {
++ mptcp_pm_inetaddr_event(NULL, event, ifa);
++ } endfor_ifa(in_dev);
++ }
++
++#if IS_ENABLED(CONFIG_IPV6)
++ in6_dev = __in6_dev_get(dev);
++
++ if (in6_dev) {
++ struct inet6_ifaddr *ifa6;
++ list_for_each_entry(ifa6, &in6_dev->addr_list, if_list)
++ inet6_addr_event(NULL, event, ifa6);
++ }
++#endif
++
++ rcu_read_unlock();
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block mptcp_pm_netdev_notifier = {
++ .notifier_call = netdev_event,
++};
++
++static void full_mesh_new_session(struct sock *meta_sk, int index)
++{
++ struct mptcp_loc_addr *mptcp_local;
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct fullmesh_priv *fmp = (struct fullmesh_priv *)&mpcb->mptcp_pm[0];
++ struct net *net = sock_net(meta_sk);
++ struct mptcp_fm_ns *fm_ns = fm_get_ns(net);
++ struct sock *sk;
++ int i;
++
++ if (index == -1) {
++ mptcp_fallback_default(mpcb);
++ return;
++ }
++
++ /* Initialize workqueue-struct */
++ INIT_WORK(&fmp->subflow_work, create_subflow_worker);
++ INIT_DELAYED_WORK(&fmp->subflow_retry_work, retry_subflow_worker);
++ fmp->mpcb = mpcb;
++
++ sk = mptcp_select_ack_sock(meta_sk, 0);
++
++ rcu_read_lock();
++ mptcp_local = rcu_dereference(fm_ns->local);
++
++ /* Look for the address among the local addresses */
++ mptcp_for_each_bit_set(mptcp_local->loc4_bits, i) {
++ __be32 ifa_address = mptcp_local->locaddr4[i].addr.s_addr;
++
++ /* We do not need to announce the initial subflow's address again */
++ if ((meta_sk->sk_family == AF_INET ||
++ mptcp_v6_is_v4_mapped(meta_sk)) &&
++ inet_sk(meta_sk)->inet_saddr == ifa_address)
++ continue;
++
++ fmp->add_addr++;
++
++ if (sk)
++ tcp_send_ack(sk);
++ }
++
++#if IS_ENABLED(CONFIG_IPV6)
++ mptcp_for_each_bit_set(mptcp_local->loc6_bits, i) {
++ struct in6_addr *ifa6 = &mptcp_local->locaddr6[i].addr;
++
++ /* We do not need to announce the initial subflow's address again */
++ if (meta_sk->sk_family == AF_INET6 &&
++ ipv6_addr_equal(&inet6_sk(meta_sk)->saddr, ifa6))
++ continue;
++
++ fmp->add_addr++;
++
++ if (sk)
++ tcp_send_ack(sk);
++ }
++#endif
++
++ rcu_read_unlock();
++
++ if (meta_sk->sk_family == AF_INET || mptcp_v6_is_v4_mapped(meta_sk))
++ fmp->announced_addrs_v4 |= (1 << index);
++ else
++ fmp->announced_addrs_v6 |= (1 << index);
++}
++
++static void full_mesh_create_subflows(struct sock *meta_sk)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct fullmesh_priv *pm_priv = (struct fullmesh_priv *)&mpcb->mptcp_pm[0];
++
++ if (mpcb->infinite_mapping_snd || mpcb->infinite_mapping_rcv ||
++ mpcb->send_infinite_mapping ||
++ mpcb->server_side || sock_flag(meta_sk, SOCK_DEAD))
++ return;
++
++ /* The master may not yet be fully established (address added through
++ * mptcp_update_metasocket). Then, we should not attempt to create new
++ * subflows.
++ */
++ if (mpcb->master_sk &&
++ !tcp_sk(mpcb->master_sk)->mptcp->fully_established)
++ return;
++
++ if (!work_pending(&pm_priv->subflow_work)) {
++ sock_hold(meta_sk);
++ queue_work(mptcp_wq, &pm_priv->subflow_work);
++ }
++}
++
++/* Called upon release_sock, if the socket was owned by the user during
++ * a path-management event.
++ */
++static void full_mesh_release_sock(struct sock *meta_sk)
++{
++ struct mptcp_loc_addr *mptcp_local;
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct fullmesh_priv *fmp = (struct fullmesh_priv *)&mpcb->mptcp_pm[0];
++ struct mptcp_fm_ns *fm_ns = fm_get_ns(sock_net(meta_sk));
++ struct sock *sk, *tmpsk;
++ int i;
++
++ rcu_read_lock();
++ mptcp_local = rcu_dereference(fm_ns->local);
++
++ /* First, detect modifications or additions */
++ mptcp_for_each_bit_set(mptcp_local->loc4_bits, i) {
++ struct in_addr ifa = mptcp_local->locaddr4[i].addr;
++ bool found = false;
++
++ mptcp_for_each_sk(mpcb, sk) {
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ if (sk->sk_family == AF_INET6 &&
++ !mptcp_v6_is_v4_mapped(sk))
++ continue;
++
++ if (inet_sk(sk)->inet_saddr != ifa.s_addr)
++ continue;
++
++ found = true;
++
++ if (mptcp_local->locaddr4[i].low_prio != tp->mptcp->low_prio) {
++ tp->mptcp->send_mp_prio = 1;
++ tp->mptcp->low_prio = mptcp_local->locaddr4[i].low_prio;
++
++ tcp_send_ack(sk);
++ }
++ }
++
++ if (!found) {
++ fmp->add_addr++;
++
++ sk = mptcp_select_ack_sock(meta_sk, 0);
++ if (sk)
++ tcp_send_ack(sk);
++ full_mesh_create_subflows(meta_sk);
++ }
++ }
++
++#if IS_ENABLED(CONFIG_IPV6)
++ mptcp_for_each_bit_set(mptcp_local->loc6_bits, i) {
++ struct in6_addr ifa = mptcp_local->locaddr6[i].addr;
++ bool found = false;
++
++ mptcp_for_each_sk(mpcb, sk) {
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ if (sk->sk_family == AF_INET ||
++ mptcp_v6_is_v4_mapped(sk))
++ continue;
++
++ if (!ipv6_addr_equal(&inet6_sk(sk)->saddr, &ifa))
++ continue;
++
++ found = true;
++
++ if (mptcp_local->locaddr6[i].low_prio != tp->mptcp->low_prio) {
++ tp->mptcp->send_mp_prio = 1;
++ tp->mptcp->low_prio = mptcp_local->locaddr6[i].low_prio;
++
++ tcp_send_ack(sk);
++ }
++ }
++
++ if (!found) {
++ fmp->add_addr++;
++
++ sk = mptcp_select_ack_sock(meta_sk, 0);
++ if (sk)
++ tcp_send_ack(sk);
++ full_mesh_create_subflows(meta_sk);
++ }
++ }
++#endif
++
++ /* Now, detect address-removals */
++ mptcp_for_each_sk_safe(mpcb, sk, tmpsk) {
++ bool shall_remove = true;
++
++ if (sk->sk_family == AF_INET || mptcp_v6_is_v4_mapped(sk)) {
++ mptcp_for_each_bit_set(mptcp_local->loc4_bits, i) {
++ if (inet_sk(sk)->inet_saddr == mptcp_local->locaddr4[i].addr.s_addr) {
++ shall_remove = false;
++ break;
++ }
++ }
++ } else {
++ mptcp_for_each_bit_set(mptcp_local->loc6_bits, i) {
++ if (ipv6_addr_equal(&inet6_sk(sk)->saddr, &mptcp_local->locaddr6[i].addr)) {
++ shall_remove = false;
++ break;
++ }
++ }
++ }
++
++ if (shall_remove) {
++ /* Reinject, so that pf = 1 and so we
++ * won't select this one as the
++ * ack-sock.
++ */
++ mptcp_reinject_data(sk, 0);
++
++ update_remove_addrs(tcp_sk(sk)->mptcp->loc_id, meta_sk,
++ mptcp_local);
++
++ if (mpcb->master_sk == sk)
++ update_remove_addrs(0, meta_sk, mptcp_local);
++
++ mptcp_sub_force_close(sk);
++ }
++ }
++ rcu_read_unlock();
++}
++
++static int full_mesh_get_local_index(sa_family_t family, union inet_addr *addr,
++ struct net *net)
++{
++ struct mptcp_loc_addr *mptcp_local;
++ struct mptcp_fm_ns *fm_ns = fm_get_ns(net);
++ int index;
++
++ /* Handle the backup-flows */
++ rcu_read_lock();
++ mptcp_local = rcu_dereference(fm_ns->local);
++
++ index = mptcp_find_address(mptcp_local, family, addr);
++
++ rcu_read_unlock();
++
++ return index;
++}
++
++static int full_mesh_get_local_id(sa_family_t family, union inet_addr *addr,
++ struct net *net)
++{
++ struct mptcp_loc_addr *mptcp_local;
++ struct mptcp_fm_ns *fm_ns = fm_get_ns(net);
++ int index, id = -1;
++
++ /* Handle the backup-flows */
++ rcu_read_lock();
++ mptcp_local = rcu_dereference(fm_ns->local);
++
++ index = mptcp_find_address(mptcp_local, family, addr);
++
++ if (index != -1) {
++ if (family == AF_INET)
++ id = mptcp_local->locaddr4[index].loc4_id;
++ else
++ id = mptcp_local->locaddr6[index].loc6_id;
++ }
++
++
++ rcu_read_unlock();
++
++ return id;
++}
++
++static void full_mesh_addr_signal(struct sock *sk, unsigned *size,
++ struct tcp_out_options *opts,
++ struct sk_buff *skb)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct mptcp_cb *mpcb = tp->mpcb;
++ struct fullmesh_priv *fmp = (struct fullmesh_priv *)&mpcb->mptcp_pm[0];
++ struct mptcp_loc_addr *mptcp_local;
++ struct mptcp_fm_ns *fm_ns = fm_get_ns(sock_net(sk));
++ int remove_addr_len;
++ u8 unannouncedv4, unannouncedv6;
++
++ if (likely(!fmp->add_addr))
++ goto remove_addr;
++
++ rcu_read_lock();
++ mptcp_local = rcu_dereference(fm_ns->local);
++
++ /* IPv4 */
++ unannouncedv4 = (~fmp->announced_addrs_v4) & mptcp_local->loc4_bits;
++ if (unannouncedv4 &&
++ MAX_TCP_OPTION_SPACE - *size >= MPTCP_SUB_LEN_ADD_ADDR4_ALIGN) {
++ int ind = mptcp_find_free_index(~unannouncedv4);
++
++ opts->options |= OPTION_MPTCP;
++ opts->mptcp_options |= OPTION_ADD_ADDR;
++ opts->add_addr4.addr_id = mptcp_local->locaddr4[ind].loc4_id;
++ opts->add_addr4.addr = mptcp_local->locaddr4[ind].addr;
++ opts->add_addr_v4 = 1;
++
++ if (skb) {
++ fmp->announced_addrs_v4 |= (1 << ind);
++ fmp->add_addr--;
++ }
++ *size += MPTCP_SUB_LEN_ADD_ADDR4_ALIGN;
++ }
++
++ /* IPv6 */
++ unannouncedv6 = (~fmp->announced_addrs_v6) & mptcp_local->loc6_bits;
++ if (unannouncedv6 &&
++ MAX_TCP_OPTION_SPACE - *size >= MPTCP_SUB_LEN_ADD_ADDR6_ALIGN) {
++ int ind = mptcp_find_free_index(~unannouncedv6);
++
++ opts->options |= OPTION_MPTCP;
++ opts->mptcp_options |= OPTION_ADD_ADDR;
++ opts->add_addr6.addr_id = mptcp_local->locaddr6[ind].loc6_id;
++ opts->add_addr6.addr = mptcp_local->locaddr6[ind].addr;
++ opts->add_addr_v6 = 1;
++
++ if (skb) {
++ fmp->announced_addrs_v6 |= (1 << ind);
++ fmp->add_addr--;
++ }
++ *size += MPTCP_SUB_LEN_ADD_ADDR6_ALIGN;
++ }
++
++ rcu_read_unlock();
++
++ if (!unannouncedv4 && !unannouncedv6 && skb) {
++ fmp->add_addr--;
++ }
++
++remove_addr:
++ if (likely(!fmp->remove_addrs))
++ return;
++
++ remove_addr_len = mptcp_sub_len_remove_addr_align(fmp->remove_addrs);
++ if (MAX_TCP_OPTION_SPACE - *size < remove_addr_len)
++ return;
++
++ opts->options |= OPTION_MPTCP;
++ opts->mptcp_options |= OPTION_REMOVE_ADDR;
++ opts->remove_addrs = fmp->remove_addrs;
++ *size += remove_addr_len;
++ if (skb)
++ fmp->remove_addrs = 0;
++}
++
++static int mptcp_fm_init_net(struct net *net)
++{
++ struct mptcp_loc_addr *mptcp_local;
++ struct mptcp_fm_ns *fm_ns;
++
++ fm_ns = kzalloc(sizeof(*fm_ns), GFP_KERNEL);
++ if (!fm_ns)
++ return -ENOBUFS;
++
++ mptcp_local = kzalloc(sizeof(*mptcp_local), GFP_KERNEL);
++ if (!mptcp_local) {
++ kfree(fm_ns);
++ return -ENOBUFS;
++ }
++
++ mptcp_local->next_v4_index = 1;
++
++ rcu_assign_pointer(fm_ns->local, mptcp_local);
++ INIT_DELAYED_WORK(&fm_ns->address_worker, mptcp_address_worker);
++ INIT_LIST_HEAD(&fm_ns->events);
++ spin_lock_init(&fm_ns->local_lock);
++ fm_ns->net = net;
++ net->mptcp.path_managers[MPTCP_PM_FULLMESH] = fm_ns;
++
++ return 0;
++}
++
++static void mptcp_fm_exit_net(struct net *net)
++{
++ struct mptcp_addr_event *eventq, *tmp;
++ struct mptcp_fm_ns *fm_ns;
++ struct mptcp_loc_addr *mptcp_local;
++
++ fm_ns = fm_get_ns(net);
++ cancel_delayed_work_sync(&fm_ns->address_worker);
++
++ rcu_read_lock_bh();
++
++ mptcp_local = rcu_dereference_bh(fm_ns->local);
++ kfree(mptcp_local);
++
++ spin_lock(&fm_ns->local_lock);
++ list_for_each_entry_safe(eventq, tmp, &fm_ns->events, list) {
++ list_del(&eventq->list);
++ kfree(eventq);
++ }
++ spin_unlock(&fm_ns->local_lock);
++
++ rcu_read_unlock_bh();
++
++ kfree(fm_ns);
++}
++
++static struct pernet_operations full_mesh_net_ops = {
++ .init = mptcp_fm_init_net,
++ .exit = mptcp_fm_exit_net,
++};
++
++static struct mptcp_pm_ops full_mesh __read_mostly = {
++ .new_session = full_mesh_new_session,
++ .release_sock = full_mesh_release_sock,
++ .fully_established = full_mesh_create_subflows,
++ .new_remote_address = full_mesh_create_subflows,
++ .get_local_index = full_mesh_get_local_index,
++ .get_local_id = full_mesh_get_local_id,
++ .addr_signal = full_mesh_addr_signal,
++ .name = "fullmesh",
++ .owner = THIS_MODULE,
++};
++
++/* General initialization of MPTCP_PM */
++static int __init full_mesh_register(void)
++{
++ int ret;
++
++ BUILD_BUG_ON(sizeof(struct fullmesh_priv) > MPTCP_PM_SIZE);
++
++ ret = register_pernet_subsys(&full_mesh_net_ops);
++ if (ret)
++ goto out;
++
++ ret = register_inetaddr_notifier(&mptcp_pm_inetaddr_notifier);
++ if (ret)
++ goto err_reg_inetaddr;
++ ret = register_netdevice_notifier(&mptcp_pm_netdev_notifier);
++ if (ret)
++ goto err_reg_netdev;
++
++#if IS_ENABLED(CONFIG_IPV6)
++ ret = register_inet6addr_notifier(&inet6_addr_notifier);
++ if (ret)
++ goto err_reg_inet6addr;
++#endif
++
++ ret = mptcp_register_path_manager(&full_mesh);
++ if (ret)
++ goto err_reg_pm;
++
++out:
++ return ret;
++
++
++err_reg_pm:
++#if IS_ENABLED(CONFIG_IPV6)
++ unregister_inet6addr_notifier(&inet6_addr_notifier);
++err_reg_inet6addr:
++#endif
++ unregister_netdevice_notifier(&mptcp_pm_netdev_notifier);
++err_reg_netdev:
++ unregister_inetaddr_notifier(&mptcp_pm_inetaddr_notifier);
++err_reg_inetaddr:
++ unregister_pernet_subsys(&full_mesh_net_ops);
++ goto out;
++}
++
++static void full_mesh_unregister(void)
++{
++#if IS_ENABLED(CONFIG_IPV6)
++ unregister_inet6addr_notifier(&inet6_addr_notifier);
++#endif
++ unregister_netdevice_notifier(&mptcp_pm_netdev_notifier);
++ unregister_inetaddr_notifier(&mptcp_pm_inetaddr_notifier);
++ unregister_pernet_subsys(&full_mesh_net_ops);
++ mptcp_unregister_path_manager(&full_mesh);
++}
++
++module_init(full_mesh_register);
++module_exit(full_mesh_unregister);
++
++MODULE_AUTHOR("Christoph Paasch");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Full-Mesh MPTCP");
++MODULE_VERSION("0.88");
+diff --git a/net/mptcp/mptcp_input.c b/net/mptcp/mptcp_input.c
+new file mode 100644
+index 0000000..f3c9057
+--- /dev/null
++++ b/net/mptcp/mptcp_input.c
+@@ -0,0 +1,2254 @@
++/*
++ * MPTCP implementation - Sending side
++ *
++ * Initial Design & Implementation:
++ * Sébastien Barré <sebastien.barre@uclouvain.be>
++ *
++ * Current Maintainer & Author:
++ * Christoph Paasch <christoph.paasch@uclouvain.be>
++ *
++ * Additional authors:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ * Gregory Detal <gregory.detal@uclouvain.be>
++ * Fabien Duchêne <fabien.duchene@uclouvain.be>
++ * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de>
++ * Lavkesh Lahngir <lavkesh51@gmail.com>
++ * Andreas Ripke <ripke@neclab.eu>
++ * Vlad Dogaru <vlad.dogaru@intel.com>
++ * Octavian Purdila <octavian.purdila@intel.com>
++ * John Ronan <jronan@tssg.org>
++ * Catalin Nicutar <catalin.nicutar@gmail.com>
++ * Brandon Heller <brandonh@stanford.edu>
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <asm/unaligned.h>
++
++#include <net/mptcp.h>
++#include <net/mptcp_v4.h>
++#include <net/mptcp_v6.h>
++
++#include <linux/kconfig.h>
++
++/* is seq1 < seq2 ? */
++static inline int before64(const u64 seq1, const u64 seq2)
++{
++ return (s64)(seq1 - seq2) < 0;
++}
++
++/* is seq1 > seq2 ? */
++#define after64(seq1, seq2) before64(seq2, seq1)
++
++static inline void mptcp_become_fully_estab(struct sock *sk)
++{
++ tcp_sk(sk)->mptcp->fully_established = 1;
++
++ if (is_master_tp(tcp_sk(sk)) &&
++ tcp_sk(sk)->mpcb->pm_ops->fully_established)
++ tcp_sk(sk)->mpcb->pm_ops->fully_established(mptcp_meta_sk(sk));
++}
++
++/* Similar to tcp_tso_acked without any memory accounting */
++static inline int mptcp_tso_acked_reinject(struct sock *sk, struct sk_buff *skb)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ u32 packets_acked, len;
++
++ BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una));
++
++ packets_acked = tcp_skb_pcount(skb);
++
++ if (skb_unclone(skb, GFP_ATOMIC))
++ return 0;
++
++ len = tp->snd_una - TCP_SKB_CB(skb)->seq;
++ __pskb_trim_head(skb, len);
++
++ TCP_SKB_CB(skb)->seq += len;
++ skb->ip_summed = CHECKSUM_PARTIAL;
++ skb->truesize -= len;
++
++ /* Any change of skb->len requires recalculation of tso factor. */
++ if (tcp_skb_pcount(skb) > 1)
++ tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
++ packets_acked -= tcp_skb_pcount(skb);
++
++ if (packets_acked) {
++ BUG_ON(tcp_skb_pcount(skb) == 0);
++ BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq));
++ }
++
++ return packets_acked;
++}
++
++/**
++ * Cleans the meta-socket retransmission queue and the reinject-queue.
++ * @sk must be the metasocket.
++ */
++static void mptcp_clean_rtx_queue(struct sock *meta_sk, u32 prior_snd_una)
++{
++ struct sk_buff *skb, *tmp;
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ bool acked = false;
++ u32 acked_pcount;
++
++ while ((skb = tcp_write_queue_head(meta_sk)) &&
++ skb != tcp_send_head(meta_sk)) {
++ bool fully_acked = true;
++
++ if (before(meta_tp->snd_una, TCP_SKB_CB(skb)->end_seq)) {
++ if (tcp_skb_pcount(skb) == 1 ||
++ !after(meta_tp->snd_una, TCP_SKB_CB(skb)->seq))
++ break;
++
++ acked_pcount = tcp_tso_acked(meta_sk, skb);
++ if (!acked_pcount)
++ break;
++
++ fully_acked = false;
++ } else {
++ acked_pcount = tcp_skb_pcount(skb);
++ }
++
++ acked = true;
++ meta_tp->packets_out -= acked_pcount;
++ meta_tp->retrans_stamp = 0;
++
++ if (!fully_acked)
++ break;
++
++ tcp_unlink_write_queue(skb, meta_sk);
++
++ if (mptcp_is_data_fin(skb)) {
++ struct sock *sk_it;
++
++ /* DATA_FIN has been acknowledged - now we can close
++ * the subflows
++ */
++ mptcp_for_each_sk(mpcb, sk_it) {
++ unsigned long delay = 0;
++
++ /* If we are the passive closer, don't trigger
++ * subflow-fin until the subflow has been finned
++ * by the peer - thus we add a delay.
++ */
++ if (mpcb->passive_close &&
++ sk_it->sk_state == TCP_ESTABLISHED)
++ delay = inet_csk(sk_it)->icsk_rto << 3;
++
++ mptcp_sub_close(sk_it, delay);
++ }
++ }
++ sk_wmem_free_skb(meta_sk, skb);
++ }
++ /* Remove acknowledged data from the reinject queue */
++ skb_queue_walk_safe(&mpcb->reinject_queue, skb, tmp) {
++ if (before(meta_tp->snd_una, TCP_SKB_CB(skb)->end_seq)) {
++ if (tcp_skb_pcount(skb) == 1 ||
++ !after(meta_tp->snd_una, TCP_SKB_CB(skb)->seq))
++ break;
++
++ mptcp_tso_acked_reinject(meta_sk, skb);
++ break;
++ }
++
++ __skb_unlink(skb, &mpcb->reinject_queue);
++ __kfree_skb(skb);
++ }
++
++ if (likely(between(meta_tp->snd_up, prior_snd_una, meta_tp->snd_una)))
++ meta_tp->snd_up = meta_tp->snd_una;
++
++ if (acked) {
++ tcp_rearm_rto(meta_sk);
++ /* Normally this is done in tcp_try_undo_loss - but MPTCP
++ * does not call this function.
++ */
++ inet_csk(meta_sk)->icsk_retransmits = 0;
++ }
++}
++
++/* Inspired by tcp_rcv_state_process */
++static int mptcp_rcv_state_process(struct sock *meta_sk, struct sock *sk,
++ const struct sk_buff *skb, u32 data_seq,
++ u16 data_len)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk), *tp = tcp_sk(sk);
++ struct tcphdr *th = tcp_hdr(skb);
++
++ /* State-machine handling if FIN has been enqueued and he has
++ * been acked (snd_una == write_seq) - it's important that this
++ * here is after sk_wmem_free_skb because otherwise
++ * sk_forward_alloc is wrong upon inet_csk_destroy_sock()
++ */
++ switch (meta_sk->sk_state) {
++ case TCP_FIN_WAIT1:
++ if (meta_tp->snd_una == meta_tp->write_seq) {
++ struct dst_entry *dst = __sk_dst_get(meta_sk);
++
++ tcp_set_state(meta_sk, TCP_FIN_WAIT2);
++ meta_sk->sk_shutdown |= SEND_SHUTDOWN;
++
++ dst = __sk_dst_get(sk);
++ if (dst)
++ dst_confirm(dst);
++
++ if (!sock_flag(meta_sk, SOCK_DEAD)) {
++ /* Wake up lingering close() */
++ meta_sk->sk_state_change(meta_sk);
++ } else {
++ int tmo;
++
++ if (meta_tp->linger2 < 0 ||
++ (data_len &&
++ after(data_seq + data_len - (mptcp_is_data_fin2(skb, tp) ? 1 : 0),
++ meta_tp->rcv_nxt))) {
++ mptcp_send_active_reset(meta_sk, GFP_ATOMIC);
++ tcp_done(meta_sk);
++ NET_INC_STATS_BH(sock_net(meta_sk), LINUX_MIB_TCPABORTONDATA);
++ return 1;
++ }
++
++ tmo = tcp_fin_time(meta_sk);
++ if (tmo > TCP_TIMEWAIT_LEN) {
++ inet_csk_reset_keepalive_timer(meta_sk, tmo - TCP_TIMEWAIT_LEN);
++ } else if (mptcp_is_data_fin2(skb, tp) ||
++ sock_owned_by_user(meta_sk)) {
++ /* Bad case. We could lose such FIN otherwise.
++ * It is not a big problem, but it looks confusing
++ * and not so rare event. We still can lose it now,
++ * if it spins in bh_lock_sock(), but it is really
++ * marginal case.
++ */
++ inet_csk_reset_keepalive_timer(meta_sk, tmo);
++ } else {
++ tcp_time_wait(meta_sk, TCP_FIN_WAIT2, tmo);
++ }
++ }
++ }
++ break;
++ case TCP_CLOSING:
++ case TCP_LAST_ACK:
++ if (meta_tp->snd_una == meta_tp->write_seq) {
++ tcp_done(meta_sk);
++ return 1;
++ }
++ break;
++ }
++
++ /* step 7: process the segment text */
++ switch (meta_sk->sk_state) {
++ case TCP_FIN_WAIT1:
++ case TCP_FIN_WAIT2:
++ /* RFC 793 says to queue data in these states,
++ * RFC 1122 says we MUST send a reset.
++ * BSD 4.4 also does reset.
++ */
++ if (meta_sk->sk_shutdown & RCV_SHUTDOWN) {
++ if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
++ after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt) &&
++ !mptcp_is_data_fin2(skb, tp)) {
++ NET_INC_STATS_BH(sock_net(meta_sk), LINUX_MIB_TCPABORTONDATA);
++ mptcp_send_active_reset(meta_sk, GFP_ATOMIC);
++ tcp_reset(meta_sk);
++ return 1;
++ }
++ }
++ break;
++ }
++
++ return 0;
++}
++
++/**
++ * @return:
++ * i) 1: Everything's fine.
++ * ii) -1: A reset has been sent on the subflow - csum-failure
++ * iii) 0: csum-failure but no reset sent, because it's the last subflow.
++ * Last packet should not be destroyed by the caller because it has
++ * been done here.
++ */
++static int mptcp_verif_dss_csum(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct sk_buff *tmp, *tmp1, *last = NULL;
++ __wsum csum_tcp = 0; /* cumulative checksum of pld + mptcp-header */
++ int ans = 1, overflowed = 0, offset = 0, dss_csum_added = 0;
++ int iter = 0;
++
++ skb_queue_walk_safe(&sk->sk_receive_queue, tmp, tmp1) {
++ unsigned int csum_len;
++
++ if (before(tp->mptcp->map_subseq + tp->mptcp->map_data_len, TCP_SKB_CB(tmp)->end_seq))
++ /* Mapping ends in the middle of the packet -
++ * csum only these bytes
++ */
++ csum_len = tp->mptcp->map_subseq + tp->mptcp->map_data_len - TCP_SKB_CB(tmp)->seq;
++ else
++ csum_len = tmp->len;
++
++ offset = 0;
++ if (overflowed) {
++ char first_word[4];
++ first_word[0] = 0;
++ first_word[1] = 0;
++ first_word[2] = 0;
++ first_word[3] = *(tmp->data);
++ csum_tcp = csum_partial(first_word, 4, csum_tcp);
++ offset = 1;
++ csum_len--;
++ overflowed = 0;
++ }
++
++ csum_tcp = skb_checksum(tmp, offset, csum_len, csum_tcp);
++
++ /* Was it on an odd-length? Then we have to merge the next byte
++ * correctly (see above)
++ */
++ if (csum_len != (csum_len & (~1)))
++ overflowed = 1;
++
++ if (mptcp_is_data_seq(tmp) && !dss_csum_added) {
++ __be32 data_seq = htonl((u32)(tp->mptcp->map_data_seq >> 32));
++
++ /* If a 64-bit dss is present, we increase the offset
++ * by 4 bytes, as the high-order 64-bits will be added
++ * in the final csum_partial-call.
++ */
++ u32 offset = skb_transport_offset(tmp) +
++ TCP_SKB_CB(tmp)->dss_off;
++ if (TCP_SKB_CB(tmp)->mptcp_flags & MPTCPHDR_SEQ64_SET)
++ offset += 4;
++
++ csum_tcp = skb_checksum(tmp, offset,
++ MPTCP_SUB_LEN_SEQ_CSUM,
++ csum_tcp);
++
++ csum_tcp = csum_partial(&data_seq,
++ sizeof(data_seq), csum_tcp);
++
++ dss_csum_added = 1; /* Just do it once */
++ }
++ last = tmp;
++ iter++;
++
++ if (!skb_queue_is_last(&sk->sk_receive_queue, tmp) &&
++ !before(TCP_SKB_CB(tmp1)->seq,
++ tp->mptcp->map_subseq + tp->mptcp->map_data_len))
++ break;
++ }
++
++ /* Now, checksum must be 0 */
++ if (unlikely(csum_fold(csum_tcp))) {
++ pr_err("%s csum is wrong: %#x data_seq %u dss_csum_added %d overflowed %d iterations %d\n",
++ __func__, csum_fold(csum_tcp),
++ TCP_SKB_CB(last)->seq, dss_csum_added, overflowed,
++ iter);
++
++ tp->mptcp->send_mp_fail = 1;
++
++ /* map_data_seq is the data-seq number of the
++ * mapping we are currently checking
++ */
++ tp->mpcb->csum_cutoff_seq = tp->mptcp->map_data_seq;
++
++ if (tp->mpcb->cnt_subflows > 1) {
++ mptcp_send_reset(sk);
++ ans = -1;
++ } else {
++ tp->mpcb->send_infinite_mapping = 1;
++
++ /* Need to purge the rcv-queue as it's no more valid */
++ while ((tmp = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
++ tp->copied_seq = TCP_SKB_CB(tmp)->end_seq;
++ kfree_skb(tmp);
++ }
++
++ ans = 0;
++ }
++ }
++
++ return ans;
++}
++
++static inline void mptcp_prepare_skb(struct sk_buff *skb, struct sk_buff *next,
++ struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
++ /* Adapt data-seq's to the packet itself. We kinda transform the
++ * dss-mapping to a per-packet granularity. This is necessary to
++ * correctly handle overlapping mappings coming from different
++ * subflows. Otherwise it would be a complete mess.
++ */
++ tcb->seq = ((u32)tp->mptcp->map_data_seq) + tcb->seq - tp->mptcp->map_subseq;
++ tcb->end_seq = tcb->seq + skb->len;
++
++ /* If cur is the last one in the rcv-queue (or the last one for this
++ * mapping), and data_fin is enqueued, the end_data_seq is +1.
++ */
++ if (skb_queue_is_last(&sk->sk_receive_queue, skb) ||
++ after(TCP_SKB_CB(next)->end_seq, tp->mptcp->map_subseq + tp->mptcp->map_data_len)) {
++ tcb->end_seq += tp->mptcp->map_data_fin;
++
++ /* We manually set the fin-flag if it is a data-fin. For easy
++ * processing in tcp_recvmsg.
++ */
++ if (mptcp_is_data_fin2(skb, tp))
++ tcp_hdr(skb)->fin = 1;
++ else
++ tcp_hdr(skb)->fin = 0;
++ } else {
++ /* We may have a subflow-fin with data but without data-fin */
++ tcp_hdr(skb)->fin = 0;
++ }
++}
++
++/**
++ * @return: 1 if the segment has been eaten and can be suppressed,
++ * otherwise 0.
++ */
++static inline int mptcp_direct_copy(struct sk_buff *skb, struct sock *meta_sk)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ int chunk = min_t(unsigned int, skb->len, meta_tp->ucopy.len);
++ int eaten = 0;
++
++ __set_current_state(TASK_RUNNING);
++
++ local_bh_enable();
++ if (!skb_copy_datagram_iovec(skb, 0, meta_tp->ucopy.iov, chunk)) {
++ meta_tp->ucopy.len -= chunk;
++ meta_tp->copied_seq += chunk;
++ eaten = (chunk == skb->len);
++ tcp_rcv_space_adjust(meta_sk);
++ }
++ local_bh_disable();
++ return eaten;
++}
++
++static inline void mptcp_reset_mapping(struct tcp_sock *tp)
++{
++ tp->mptcp->map_data_len = 0;
++ tp->mptcp->map_data_seq = 0;
++ tp->mptcp->map_subseq = 0;
++ tp->mptcp->map_data_fin = 0;
++ tp->mptcp->mapping_present = 0;
++}
++
++/* The DSS-mapping received on the sk only covers the second half of the skb
++ * (cut at seq). We trim the head from the skb.
++ * Data will be freed upon kfree().
++ *
++ * Inspired by tcp_trim_head().
++ */
++static void mptcp_skb_trim_head(struct sk_buff *skb, struct sock *sk, u32 seq)
++{
++ int len = seq - TCP_SKB_CB(skb)->seq;
++ u32 new_seq = TCP_SKB_CB(skb)->seq + len;
++
++ if (len < skb_headlen(skb))
++ __skb_pull(skb, len);
++ else
++ __pskb_trim_head(skb, len - skb_headlen(skb));
++
++ TCP_SKB_CB(skb)->seq = new_seq;
++
++ skb->truesize -= len;
++ atomic_sub(len, &sk->sk_rmem_alloc);
++ sk_mem_uncharge(sk, len);
++}
++
++/* The DSS-mapping received on the sk only covers the first half of the skb
++ * (cut at seq). We create a second skb (@return), and queue it in the rcv-queue
++ * as further packets may resolve the mapping of the second half of data.
++ *
++ * Inspired by tcp_fragment().
++ */
++static int mptcp_skb_split_tail(struct sk_buff *skb, struct sock *sk, u32 seq)
++{
++ struct sk_buff *buff;
++ int nsize;
++ int nlen, len;
++
++ len = seq - TCP_SKB_CB(skb)->seq;
++ nsize = skb_headlen(skb) - len + tcp_sk(sk)->tcp_header_len;
++ if (nsize < 0)
++ nsize = 0;
++
++ /* Get a new skb... force flag on. */
++ buff = alloc_skb(nsize, GFP_ATOMIC);
++ if (buff == NULL)
++ return -ENOMEM;
++
++ skb_reserve(buff, tcp_sk(sk)->tcp_header_len);
++ skb_reset_transport_header(buff);
++
++ tcp_hdr(buff)->fin = tcp_hdr(skb)->fin;
++ tcp_hdr(skb)->fin = 0;
++
++ /* We absolutly need to call skb_set_owner_r before refreshing the
++ * truesize of buff, otherwise the moved data will account twice.
++ */
++ skb_set_owner_r(buff, sk);
++ nlen = skb->len - len - nsize;
++ buff->truesize += nlen;
++ skb->truesize -= nlen;
++
++ /* Correct the sequence numbers. */
++ TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
++ TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
++ TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
++
++ skb_split(skb, buff, len);
++
++ __skb_queue_after(&sk->sk_receive_queue, skb, buff);
++
++ return 0;
++}
++
++/* @return: 0 everything is fine. Just continue processing
++ * 1 subflow is broken stop everything
++ * -1 this packet was broken - continue with the next one.
++ */
++static int mptcp_prevalidate_skb(struct sock *sk, struct sk_buff *skb)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ /* If we are in infinite mode, the subflow-fin is in fact a data-fin. */
++ if (!skb->len && tcp_hdr(skb)->fin && !mptcp_is_data_fin(skb) &&
++ !tp->mpcb->infinite_mapping_rcv) {
++ /* Remove a pure subflow-fin from the queue and increase
++ * copied_seq.
++ */
++ tp->copied_seq = TCP_SKB_CB(skb)->end_seq;
++ __skb_unlink(skb, &sk->sk_receive_queue);
++ __kfree_skb(skb);
++ return -1;
++ }
++
++ /* If we are not yet fully established and do not know the mapping for
++ * this segment, this path has to fallback to infinite or be torn down.
++ */
++ if (!tp->mptcp->fully_established && !mptcp_is_data_seq(skb) &&
++ !tp->mptcp->mapping_present && !tp->mpcb->infinite_mapping_rcv) {
++ pr_err("%s %#x will fallback - pi %d from %pS, seq %u\n",
++ __func__, tp->mpcb->mptcp_loc_token,
++ tp->mptcp->path_index, __builtin_return_address(0),
++ TCP_SKB_CB(skb)->seq);
++
++ if (!is_master_tp(tp)) {
++ mptcp_send_reset(sk);
++ return 1;
++ }
++
++ tp->mpcb->infinite_mapping_snd = 1;
++ tp->mpcb->infinite_mapping_rcv = 1;
++ tp->mptcp->fully_established = 1;
++ }
++
++ /* Receiver-side becomes fully established when a whole rcv-window has
++ * been received without the need to fallback due to the previous
++ * condition. */
++ if (!tp->mptcp->fully_established) {
++ tp->mptcp->init_rcv_wnd -= skb->len;
++ if (tp->mptcp->init_rcv_wnd < 0)
++ mptcp_become_fully_estab(sk);
++ }
++
++ return 0;
++}
++
++/* @return: 0 everything is fine. Just continue processing
++ * 1 subflow is broken stop everything
++ * -1 this packet was broken - continue with the next one.
++ */
++static int mptcp_detect_mapping(struct sock *sk, struct sk_buff *skb)
++{
++ struct tcp_sock *tp = tcp_sk(sk), *meta_tp = mptcp_meta_tp(tp);
++ struct mptcp_cb *mpcb = tp->mpcb;
++ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
++ u32 *ptr;
++ u32 data_seq, sub_seq, data_len, tcp_end_seq;
++
++ /* If we are in infinite-mapping-mode, the subflow is guaranteed to be
++ * in-order at the data-level. Thus data-seq-numbers can be inferred
++ * from what is expected at the data-level.
++ */
++ if (mpcb->infinite_mapping_rcv) {
++ tp->mptcp->map_data_seq = mptcp_get_rcv_nxt_64(meta_tp);
++ tp->mptcp->map_subseq = tcb->seq;
++ tp->mptcp->map_data_len = skb->len;
++ tp->mptcp->map_data_fin = tcp_hdr(skb)->fin;
++ tp->mptcp->mapping_present = 1;
++ return 0;
++ }
++
++ /* No mapping here? Exit - it is either already set or still on its way */
++ if (!mptcp_is_data_seq(skb)) {
++ /* Too many packets without a mapping - this subflow is broken */
++ if (!tp->mptcp->mapping_present &&
++ tp->rcv_nxt - tp->copied_seq > 65536) {
++ mptcp_send_reset(sk);
++ return 1;
++ }
++
++ return 0;
++ }
++
++ ptr = mptcp_skb_set_data_seq(skb, &data_seq, mpcb);
++ ptr++;
++ sub_seq = get_unaligned_be32(ptr) + tp->mptcp->rcv_isn;
++ ptr++;
++ data_len = get_unaligned_be16(ptr);
++
++ /* If it's an empty skb with DATA_FIN, sub_seq must get fixed.
++ * The draft sets it to 0, but we really would like to have the
++ * real value, to have an easy handling afterwards here in this
++ * function.
++ */
++ if (mptcp_is_data_fin(skb) && skb->len == 0)
++ sub_seq = TCP_SKB_CB(skb)->seq;
++
++ /* If there is already a mapping - we check if it maps with the current
++ * one. If not - we reset.
++ */
++ if (tp->mptcp->mapping_present &&
++ (data_seq != (u32)tp->mptcp->map_data_seq ||
++ sub_seq != tp->mptcp->map_subseq ||
++ data_len != tp->mptcp->map_data_len + tp->mptcp->map_data_fin ||
++ mptcp_is_data_fin(skb) != tp->mptcp->map_data_fin)) {
++ /* Mapping in packet is different from what we want */
++ pr_err("%s Mappings do not match!\n", __func__);
++ pr_err("%s dseq %u mdseq %u, sseq %u msseq %u dlen %u mdlen %u dfin %d mdfin %d\n",
++ __func__, data_seq, (u32)tp->mptcp->map_data_seq,
++ sub_seq, tp->mptcp->map_subseq, data_len,
++ tp->mptcp->map_data_len, mptcp_is_data_fin(skb),
++ tp->mptcp->map_data_fin);
++ mptcp_send_reset(sk);
++ return 1;
++ }
++
++ /* If the previous check was good, the current mapping is valid and we exit. */
++ if (tp->mptcp->mapping_present)
++ return 0;
++
++ /* Mapping not yet set on this subflow - we set it here! */
++
++ if (!data_len) {
++ mpcb->infinite_mapping_rcv = 1;
++ tp->mptcp->fully_established = 1;
++ /* We need to repeat mp_fail's until the sender felt
++ * back to infinite-mapping - here we stop repeating it.
++ */
++ tp->mptcp->send_mp_fail = 0;
++
++ /* We have to fixup data_len - it must be the same as skb->len */
++ data_len = skb->len + (mptcp_is_data_fin(skb) ? 1 : 0);
++ sub_seq = tcb->seq;
++
++ /* TODO kill all other subflows than this one */
++ /* data_seq and so on are set correctly */
++
++ /* At this point, the meta-ofo-queue has to be emptied,
++ * as the following data is guaranteed to be in-order at
++ * the data and subflow-level
++ */
++ mptcp_purge_ofo_queue(meta_tp);
++ }
++
++ /* We are sending mp-fail's and thus are in fallback mode.
++ * Ignore packets which do not announce the fallback and still
++ * want to provide a mapping.
++ */
++ if (tp->mptcp->send_mp_fail) {
++ tp->copied_seq = TCP_SKB_CB(skb)->end_seq;
++ __skb_unlink(skb, &sk->sk_receive_queue);
++ __kfree_skb(skb);
++ return -1;
++ }
++
++ /* FIN increased the mapping-length by 1 */
++ if (mptcp_is_data_fin(skb))
++ data_len--;
++
++ /* Subflow-sequences of packet must be
++ * (at least partially) be part of the DSS-mapping's
++ * subflow-sequence-space.
++ *
++ * Basically the mapping is not valid, if either of the
++ * following conditions is true:
++ *
++ * 1. It's not a data_fin and
++ * MPTCP-sub_seq >= TCP-end_seq
++ *
++ * 2. It's a data_fin and TCP-end_seq > TCP-seq and
++ * MPTCP-sub_seq >= TCP-end_seq
++ *
++ * The previous two can be merged into:
++ * TCP-end_seq > TCP-seq and MPTCP-sub_seq >= TCP-end_seq
++ * Because if it's not a data-fin, TCP-end_seq > TCP-seq
++ *
++ * 3. It's a data_fin and skb->len == 0 and
++ * MPTCP-sub_seq > TCP-end_seq
++ *
++ * 4. It's not a data_fin and TCP-end_seq > TCP-seq and
++ * MPTCP-sub_seq + MPTCP-data_len <= TCP-seq
++ *
++ * 5. MPTCP-sub_seq is prior to what we already copied (copied_seq)
++ */
++
++ /* subflow-fin is not part of the mapping - ignore it here ! */
++ tcp_end_seq = tcb->end_seq - tcp_hdr(skb)->fin;
++ if ((!before(sub_seq, tcb->end_seq) && after(tcp_end_seq, tcb->seq)) ||
++ (mptcp_is_data_fin(skb) && skb->len == 0 && after(sub_seq, tcb->end_seq)) ||
++ (!after(sub_seq + data_len, tcb->seq) && after(tcp_end_seq, tcb->seq)) ||
++ before(sub_seq, tp->copied_seq)) {
++ /* Subflow-sequences of packet is different from what is in the
++ * packet's dss-mapping. The peer is misbehaving - reset
++ */
++ pr_err("%s Packet's mapping does not map to the DSS sub_seq %u "
++ "end_seq %u, tcp_end_seq %u seq %u dfin %u len %u data_len %u"
++ "copied_seq %u\n", __func__, sub_seq, tcb->end_seq, tcp_end_seq, tcb->seq, mptcp_is_data_fin(skb),
++ skb->len, data_len, tp->copied_seq);
++ mptcp_send_reset(sk);
++ return 1;
++ }
++
++ /* Does the DSS had 64-bit seqnum's ? */
++ if (!(tcb->mptcp_flags & MPTCPHDR_SEQ64_SET)) {
++ /* Wrapped around? */
++ if (unlikely(after(data_seq, meta_tp->rcv_nxt) && data_seq < meta_tp->rcv_nxt)) {
++ tp->mptcp->map_data_seq = mptcp_get_data_seq_64(mpcb, !mpcb->rcv_hiseq_index, data_seq);
++ } else {
++ /* Else, access the default high-order bits */
++ tp->mptcp->map_data_seq = mptcp_get_data_seq_64(mpcb, mpcb->rcv_hiseq_index, data_seq);
++ }
++ } else {
++ tp->mptcp->map_data_seq = mptcp_get_data_seq_64(mpcb, (tcb->mptcp_flags & MPTCPHDR_SEQ64_INDEX) ? 1 : 0, data_seq);
++
++ if (unlikely(tcb->mptcp_flags & MPTCPHDR_SEQ64_OFO)) {
++ /* We make sure that the data_seq is invalid.
++ * It will be dropped later.
++ */
++ tp->mptcp->map_data_seq += 0xFFFFFFFF;
++ tp->mptcp->map_data_seq += 0xFFFFFFFF;
++ }
++ }
++
++ tp->mptcp->map_data_len = data_len;
++ tp->mptcp->map_subseq = sub_seq;
++ tp->mptcp->map_data_fin = mptcp_is_data_fin(skb) ? 1 : 0;
++ tp->mptcp->mapping_present = 1;
++
++ return 0;
++}
++
++/* Similar to tcp_sequence(...) */
++static inline int mptcp_sequence(const struct tcp_sock *meta_tp,
++ u64 data_seq, u64 end_data_seq)
++{
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ u64 rcv_wup64;
++
++ /* Wrap-around? */
++ if (meta_tp->rcv_wup > meta_tp->rcv_nxt) {
++ rcv_wup64 = ((u64)(mpcb->rcv_high_order[mpcb->rcv_hiseq_index] - 1) << 32) |
++ meta_tp->rcv_wup;
++ } else {
++ rcv_wup64 = mptcp_get_data_seq_64(mpcb, mpcb->rcv_hiseq_index,
++ meta_tp->rcv_wup);
++ }
++
++ return !before64(end_data_seq, rcv_wup64) &&
++ !after64(data_seq, mptcp_get_rcv_nxt_64(meta_tp) + tcp_receive_window(meta_tp));
++}
++
++/* @return: 0 everything is fine. Just continue processing
++ * -1 this packet was broken - continue with the next one.
++ */
++static int mptcp_validate_mapping(struct sock *sk, struct sk_buff *skb)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct sk_buff *tmp, *tmp1;
++ u32 tcp_end_seq;
++
++ if (!tp->mptcp->mapping_present)
++ return 0;
++
++ /* either, the new skb gave us the mapping and the first segment
++ * in the sub-rcv-queue has to be trimmed ...
++ */
++ tmp = skb_peek(&sk->sk_receive_queue);
++ if (before(TCP_SKB_CB(tmp)->seq, tp->mptcp->map_subseq) &&
++ after(TCP_SKB_CB(tmp)->end_seq, tp->mptcp->map_subseq))
++ mptcp_skb_trim_head(tmp, sk, tp->mptcp->map_subseq);
++
++ /* ... or the new skb (tail) has to be split at the end. */
++ tcp_end_seq = TCP_SKB_CB(skb)->end_seq - (tcp_hdr(skb)->fin ? 1 : 0);
++ if (after(tcp_end_seq, tp->mptcp->map_subseq + tp->mptcp->map_data_len)) {
++ u32 seq = tp->mptcp->map_subseq + tp->mptcp->map_data_len;
++ if (mptcp_skb_split_tail(skb, sk, seq)) { /* Allocation failed */
++ /* TODO : maybe handle this here better.
++ * We now just force meta-retransmission.
++ */
++ tp->copied_seq = TCP_SKB_CB(skb)->end_seq;
++ __skb_unlink(skb, &sk->sk_receive_queue);
++ __kfree_skb(skb);
++ return -1;
++ }
++ }
++
++ /* Now, remove old sk_buff's from the receive-queue.
++ * This may happen if the mapping has been lost for these segments and
++ * the next mapping has already been received.
++ */
++ if (tp->mptcp->mapping_present &&
++ before(TCP_SKB_CB(skb_peek(&sk->sk_receive_queue))->seq, tp->mptcp->map_subseq)) {
++ skb_queue_walk_safe(&sk->sk_receive_queue, tmp1, tmp) {
++ if (!before(TCP_SKB_CB(tmp1)->seq, tp->mptcp->map_subseq))
++ break;
++
++ tp->copied_seq = TCP_SKB_CB(tmp1)->end_seq;
++ __skb_unlink(tmp1, &sk->sk_receive_queue);
++
++ /* Impossible that we could free skb here, because his
++ * mapping is known to be valid from previous checks
++ */
++ __kfree_skb(tmp1);
++ }
++ }
++
++ return 0;
++}
++
++/* @return: 0 everything is fine. Just continue processing
++ * 1 subflow is broken stop everything
++ * -1 this mapping has been put in the meta-receive-queue
++ * -2 this mapping has been eaten by the application
++ */
++static int mptcp_queue_skb(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk), *meta_tp = mptcp_meta_tp(tp);
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++ struct mptcp_cb *mpcb = tp->mpcb;
++ struct sk_buff *tmp, *tmp1;
++ u64 rcv_nxt64 = mptcp_get_rcv_nxt_64(meta_tp);
++ bool data_queued = false;
++
++ /* Have we not yet received the full mapping? */
++ if (!tp->mptcp->mapping_present ||
++ before(tp->rcv_nxt, tp->mptcp->map_subseq + tp->mptcp->map_data_len))
++ return 0;
++
++ /* Is this an overlapping mapping? rcv_nxt >= end_data_seq
++ * OR
++ * This mapping is out of window
++ */
++ if (!before64(rcv_nxt64, tp->mptcp->map_data_seq + tp->mptcp->map_data_len + tp->mptcp->map_data_fin) ||
++ !mptcp_sequence(meta_tp, tp->mptcp->map_data_seq,
++ tp->mptcp->map_data_seq + tp->mptcp->map_data_len + tp->mptcp->map_data_fin)) {
++ skb_queue_walk_safe(&sk->sk_receive_queue, tmp1, tmp) {
++ __skb_unlink(tmp1, &sk->sk_receive_queue);
++ tp->copied_seq = TCP_SKB_CB(tmp1)->end_seq;
++ __kfree_skb(tmp1);
++
++ if (!skb_queue_empty(&sk->sk_receive_queue) &&
++ !before(TCP_SKB_CB(tmp)->seq,
++ tp->mptcp->map_subseq + tp->mptcp->map_data_len))
++ break;
++ }
++
++ mptcp_reset_mapping(tp);
++
++ return -1;
++ }
++
++ /* Record it, because we want to send our data_fin on the same path */
++ if (tp->mptcp->map_data_fin) {
++ mpcb->dfin_path_index = tp->mptcp->path_index;
++ mpcb->dfin_combined = !!(sk->sk_shutdown & RCV_SHUTDOWN);
++ }
++
++ /* Verify the checksum */
++ if (mpcb->dss_csum && !mpcb->infinite_mapping_rcv) {
++ int ret = mptcp_verif_dss_csum(sk);
++
++ if (ret <= 0) {
++ mptcp_reset_mapping(tp);
++ return 1;
++ }
++ }
++
++ if (before64(rcv_nxt64, tp->mptcp->map_data_seq)) {
++ /* Seg's have to go to the meta-ofo-queue */
++ skb_queue_walk_safe(&sk->sk_receive_queue, tmp1, tmp) {
++ tp->copied_seq = TCP_SKB_CB(tmp1)->end_seq;
++ mptcp_prepare_skb(tmp1, tmp, sk);
++ __skb_unlink(tmp1, &sk->sk_receive_queue);
++ /* MUST be done here, because fragstolen may be true later.
++ * Then, kfree_skb_partial will not account the memory.
++ */
++ skb_orphan(tmp1);
++
++ if (!mpcb->in_time_wait) /* In time-wait, do not receive data */
++ mptcp_add_meta_ofo_queue(meta_sk, tmp1, sk);
++ else
++ __kfree_skb(tmp1);
++
++ if (!skb_queue_empty(&sk->sk_receive_queue) &&
++ !before(TCP_SKB_CB(tmp)->seq,
++ tp->mptcp->map_subseq + tp->mptcp->map_data_len))
++ break;
++
++ }
++ } else {
++ /* Ready for the meta-rcv-queue */
++ skb_queue_walk_safe(&sk->sk_receive_queue, tmp1, tmp) {
++ int eaten = 0;
++ int copied_early = 0;
++ bool fragstolen = false;
++ u32 old_rcv_nxt = meta_tp->rcv_nxt;
++
++ tp->copied_seq = TCP_SKB_CB(tmp1)->end_seq;
++ mptcp_prepare_skb(tmp1, tmp, sk);
++ __skb_unlink(tmp1, &sk->sk_receive_queue);
++ /* MUST be done here, because fragstolen may be true.
++ * Then, kfree_skb_partial will not account the memory.
++ */
++ skb_orphan(tmp1);
++
++ /* This segment has already been received */
++ if (!after(TCP_SKB_CB(tmp1)->end_seq, meta_tp->rcv_nxt)) {
++ __kfree_skb(tmp1);
++ goto next;
++ }
++
++#ifdef CONFIG_NET_DMA
++ if (TCP_SKB_CB(tmp1)->seq == meta_tp->rcv_nxt &&
++ meta_tp->ucopy.task == current &&
++ meta_tp->copied_seq == meta_tp->rcv_nxt &&
++ tmp1->len <= meta_tp->ucopy.len &&
++ sock_owned_by_user(meta_sk) &&
++ tcp_dma_try_early_copy(meta_sk, tmp1, 0)) {
++ copied_early = 1;
++ eaten = 1;
++ }
++#endif
++
++ /* Is direct copy possible ? */
++ if (TCP_SKB_CB(tmp1)->seq == meta_tp->rcv_nxt &&
++ meta_tp->ucopy.task == current &&
++ meta_tp->copied_seq == meta_tp->rcv_nxt &&
++ meta_tp->ucopy.len && sock_owned_by_user(meta_sk) &&
++ !copied_early)
++ eaten = mptcp_direct_copy(tmp1, meta_sk);
++
++ if (mpcb->in_time_wait) /* In time-wait, do not receive data */
++ eaten = 1;
++
++ if (!eaten)
++ eaten = tcp_queue_rcv(meta_sk, tmp1, 0, &fragstolen);
++
++ meta_tp->rcv_nxt = TCP_SKB_CB(tmp1)->end_seq;
++ mptcp_check_rcvseq_wrap(meta_tp, old_rcv_nxt);
++
++ if (copied_early)
++ tcp_cleanup_rbuf(meta_sk, tmp1->len);
++
++ if (tcp_hdr(tmp1)->fin && !mpcb->in_time_wait)
++ mptcp_fin(meta_sk);
++
++ /* Check if this fills a gap in the ofo queue */
++ if (!skb_queue_empty(&meta_tp->out_of_order_queue))
++ mptcp_ofo_queue(meta_sk);
++
++#ifdef CONFIG_NET_DMA
++ if (copied_early)
++ __skb_queue_tail(&meta_sk->sk_async_wait_queue,
++ tmp1);
++ else
++#endif
++ if (eaten)
++ kfree_skb_partial(tmp1, fragstolen);
++
++ data_queued = true;
++next:
++ if (!skb_queue_empty(&sk->sk_receive_queue) &&
++ !before(TCP_SKB_CB(tmp)->seq,
++ tp->mptcp->map_subseq + tp->mptcp->map_data_len))
++ break;
++ }
++ }
++
++ inet_csk(meta_sk)->icsk_ack.lrcvtime = tcp_time_stamp;
++ tp->mptcp->last_data_seq = tp->mptcp->map_data_seq;
++ mptcp_reset_mapping(tp);
++
++ return data_queued ? -1 : -2;
++}
++
++void mptcp_data_ready(struct sock *sk, int bytes)
++{
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++ struct sk_buff *skb, *tmp;
++ int queued = 0;
++
++ /* If the meta is already closed, there is no point in pushing data */
++ if (meta_sk->sk_state == TCP_CLOSE && !tcp_sk(sk)->mpcb->in_time_wait) {
++ skb_queue_purge(&sk->sk_receive_queue);
++ tcp_sk(sk)->copied_seq = tcp_sk(sk)->rcv_nxt;
++ goto exit;
++ }
++
++restart:
++ /* Iterate over all segments, detect their mapping (if we don't have
++ * one yet), validate them and push everything one level higher.
++ */
++ skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) {
++ int ret;
++ /* Pre-validation - e.g., early fallback */
++ ret = mptcp_prevalidate_skb(sk, skb);
++ if (ret < 0)
++ goto restart;
++ else if (ret > 0)
++ break;
++
++ /* Set the current mapping */
++ ret = mptcp_detect_mapping(sk, skb);
++ if (ret < 0)
++ goto restart;
++ else if (ret > 0)
++ break;
++
++ /* Validation */
++ if (mptcp_validate_mapping(sk, skb) < 0)
++ goto restart;
++
++ /* Push a level higher */
++ ret = mptcp_queue_skb(sk);
++ if (ret < 0) {
++ if (ret == -1)
++ queued = ret;
++ goto restart;
++ } else if (ret == 0) {
++ continue;
++ } else { /* ret == 1 */
++ break;
++ }
++ }
++
++exit:
++ if (tcp_sk(sk)->close_it) {
++ tcp_send_ack(sk);
++ tcp_time_wait(sk, TCP_TIME_WAIT, 0);
++ }
++
++ if (queued == -1 && !sock_flag(meta_sk, SOCK_DEAD))
++ meta_sk->sk_data_ready(meta_sk, 0);
++}
++
++
++int mptcp_check_req(struct sk_buff *skb, struct net *net)
++{
++ struct tcphdr *th = tcp_hdr(skb);
++ struct sock *meta_sk = NULL;
++
++ /* MPTCP structures not initialized */
++ if (mptcp_init_failed)
++ return 0;
++
++ if (skb->protocol == htons(ETH_P_IP))
++ meta_sk = mptcp_v4_search_req(th->source, ip_hdr(skb)->saddr,
++ ip_hdr(skb)->daddr, net);
++#if IS_ENABLED(CONFIG_IPV6)
++ else /* IPv6 */
++ meta_sk = mptcp_v6_search_req(th->source, &ipv6_hdr(skb)->saddr,
++ &ipv6_hdr(skb)->daddr, net);
++#endif /* CONFIG_IPV6 */
++
++ if (!meta_sk)
++ return 0;
++
++ TCP_SKB_CB(skb)->mptcp_flags = MPTCPHDR_JOIN;
++
++ bh_lock_sock_nested(meta_sk);
++ if (sock_owned_by_user(meta_sk)) {
++ skb->sk = meta_sk;
++ if (unlikely(sk_add_backlog(meta_sk, skb,
++ meta_sk->sk_rcvbuf + meta_sk->sk_sndbuf))) {
++ bh_unlock_sock(meta_sk);
++ NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
++ sock_put(meta_sk); /* Taken by mptcp_search_req */
++ kfree_skb(skb);
++ return 1;
++ }
++ } else if (skb->protocol == htons(ETH_P_IP)) {
++ tcp_v4_do_rcv(meta_sk, skb);
++#if IS_ENABLED(CONFIG_IPV6)
++ } else { /* IPv6 */
++ tcp_v6_do_rcv(meta_sk, skb);
++#endif /* CONFIG_IPV6 */
++ }
++ bh_unlock_sock(meta_sk);
++ sock_put(meta_sk); /* Taken by mptcp_vX_search_req */
++ return 1;
++}
++
++struct mp_join *mptcp_find_join(struct sk_buff *skb)
++{
++ struct tcphdr *th = tcp_hdr(skb);
++ unsigned char *ptr;
++ int length = (th->doff * 4) - sizeof(struct tcphdr);
++
++ /* Jump through the options to check whether JOIN is there */
++ ptr = (unsigned char *)(th + 1);
++ while (length > 0) {
++ int opcode = *ptr++;
++ int opsize;
++
++ switch (opcode) {
++ case TCPOPT_EOL:
++ return NULL;
++ case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
++ length--;
++ continue;
++ default:
++ opsize = *ptr++;
++ if (opsize < 2) /* "silly options" */
++ return NULL;
++ if (opsize > length)
++ return NULL; /* don't parse partial options */
++ if (opcode == TCPOPT_MPTCP &&
++ ((struct mptcp_option *)(ptr - 2))->sub == MPTCP_SUB_JOIN) {
++ return (struct mp_join *)(ptr - 2);
++ }
++ ptr += opsize - 2;
++ length -= opsize;
++ }
++ }
++ return NULL;
++}
++
++int mptcp_lookup_join(struct sk_buff *skb, struct inet_timewait_sock *tw)
++{
++ struct mptcp_cb *mpcb;
++ struct sock *meta_sk;
++ u32 token;
++ struct mp_join *join_opt = mptcp_find_join(skb);
++ if (!join_opt)
++ return 0;
++
++ /* MPTCP structures were not initialized, so return error */
++ if (mptcp_init_failed)
++ return -1;
++
++ token = join_opt->u.syn.token;
++ meta_sk = mptcp_hash_find(dev_net(skb_dst(skb)->dev), token);
++ if (!meta_sk) {
++ mptcp_debug("%s:mpcb not found:%x\n", __func__, token);
++ return -1;
++ }
++
++ mpcb = tcp_sk(meta_sk)->mpcb;
++ if (mpcb->infinite_mapping_rcv || mpcb->send_infinite_mapping) {
++ /* We are in fallback-mode on the reception-side -
++ * no new subflows!
++ */
++ sock_put(meta_sk); /* Taken by mptcp_hash_find */
++ return -1;
++ }
++
++ /* Coming from time-wait-sock processing in tcp_v4_rcv.
++ * We have to deschedule it before continuing, because otherwise
++ * mptcp_v4_do_rcv will hit again on it inside tcp_v4_hnd_req.
++ */
++ if (tw) {
++ inet_twsk_deschedule(tw, &tcp_death_row);
++ inet_twsk_put(tw);
++ }
++
++ TCP_SKB_CB(skb)->mptcp_flags = MPTCPHDR_JOIN;
++ /* OK, this is a new syn/join, let's create a new open request and
++ * send syn+ack
++ */
++ bh_lock_sock_nested(meta_sk);
++ if (sock_owned_by_user(meta_sk)) {
++ skb->sk = meta_sk;
++ if (unlikely(sk_add_backlog(meta_sk, skb,
++ meta_sk->sk_rcvbuf + meta_sk->sk_sndbuf))) {
++ bh_unlock_sock(meta_sk);
++ NET_INC_STATS_BH(sock_net(meta_sk),
++ LINUX_MIB_TCPBACKLOGDROP);
++ sock_put(meta_sk); /* Taken by mptcp_hash_find */
++ kfree_skb(skb);
++ return 1;
++ }
++ } else if (skb->protocol == htons(ETH_P_IP)) {
++ tcp_v4_do_rcv(meta_sk, skb);
++#if IS_ENABLED(CONFIG_IPV6)
++ } else {
++ tcp_v6_do_rcv(meta_sk, skb);
++#endif /* CONFIG_IPV6 */
++ }
++ bh_unlock_sock(meta_sk);
++ sock_put(meta_sk); /* Taken by mptcp_hash_find */
++ return 1;
++}
++
++int mptcp_do_join_short(struct sk_buff *skb, struct mptcp_options_received *mopt,
++ struct tcp_options_received *tmp_opt, struct net *net)
++{
++ struct sock *meta_sk;
++ u32 token;
++
++ token = mopt->mptcp_rem_token;
++ meta_sk = mptcp_hash_find(net, token);
++ if (!meta_sk) {
++ mptcp_debug("%s:mpcb not found:%x\n", __func__, token);
++ return -1;
++ }
++
++ TCP_SKB_CB(skb)->mptcp_flags = MPTCPHDR_JOIN;
++
++ /* OK, this is a new syn/join, let's create a new open request and
++ * send syn+ack
++ */
++ bh_lock_sock(meta_sk);
++
++ /* This check is also done in mptcp_vX_do_rcv. But, there we cannot
++ * call tcp_vX_send_reset, because we hold already two socket-locks.
++ * (the listener and the meta from above)
++ *
++ * And the send-reset will try to take yet another one (ip_send_reply).
++ * Thus, we propagate the reset up to tcp_rcv_state_process.
++ */
++ if (tcp_sk(meta_sk)->mpcb->infinite_mapping_rcv ||
++ tcp_sk(meta_sk)->mpcb->send_infinite_mapping ||
++ meta_sk->sk_state == TCP_CLOSE || !tcp_sk(meta_sk)->inside_tk_table) {
++ bh_unlock_sock(meta_sk);
++ sock_put(meta_sk); /* Taken by mptcp_hash_find */
++ return -1;
++ }
++
++ if (sock_owned_by_user(meta_sk)) {
++ skb->sk = meta_sk;
++ if (unlikely(sk_add_backlog(meta_sk, skb,
++ meta_sk->sk_rcvbuf + meta_sk->sk_sndbuf)))
++ NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
++ else
++ /* Must make sure that upper layers won't free the
++ * skb if it is added to the backlog-queue.
++ */
++ skb_get(skb);
++ } else {
++ /* mptcp_v4_do_rcv tries to free the skb - we prevent this, as
++ * the skb will finally be freed by tcp_v4_do_rcv (where we are
++ * coming from)
++ */
++ skb_get(skb);
++ if (skb->protocol == htons(ETH_P_IP)) {
++ tcp_v4_do_rcv(meta_sk, skb);
++#if IS_ENABLED(CONFIG_IPV6)
++ } else { /* IPv6 */
++ tcp_v6_do_rcv(meta_sk, skb);
++#endif /* CONFIG_IPV6 */
++ }
++ }
++
++ bh_unlock_sock(meta_sk);
++ sock_put(meta_sk); /* Taken by mptcp_hash_find */
++ return 0;
++}
++
++/**
++ * Equivalent of tcp_fin() for MPTCP
++ * Can be called only when the FIN is validly part
++ * of the data seqnum space. Not before when we get holes.
++ */
++void mptcp_fin(struct sock *meta_sk)
++{
++ struct sock *sk = NULL, *sk_it;
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++
++ mptcp_for_each_sk(mpcb, sk_it) {
++ if (tcp_sk(sk_it)->mptcp->path_index == mpcb->dfin_path_index) {
++ sk = sk_it;
++ break;
++ }
++ }
++
++ if (!sk || sk->sk_state == TCP_CLOSE)
++ sk = mptcp_select_ack_sock(meta_sk, 0);
++
++ inet_csk_schedule_ack(sk);
++
++ meta_sk->sk_shutdown |= RCV_SHUTDOWN;
++ sock_set_flag(meta_sk, SOCK_DONE);
++
++ switch (meta_sk->sk_state) {
++ case TCP_SYN_RECV:
++ case TCP_ESTABLISHED:
++ /* Move to CLOSE_WAIT */
++ tcp_set_state(meta_sk, TCP_CLOSE_WAIT);
++ inet_csk(sk)->icsk_ack.pingpong = 1;
++ break;
++
++ case TCP_CLOSE_WAIT:
++ case TCP_CLOSING:
++ /* Received a retransmission of the FIN, do
++ * nothing.
++ */
++ break;
++ case TCP_LAST_ACK:
++ /* RFC793: Remain in the LAST-ACK state. */
++ break;
++
++ case TCP_FIN_WAIT1:
++ /* This case occurs when a simultaneous close
++ * happens, we must ack the received FIN and
++ * enter the CLOSING state.
++ */
++ tcp_send_ack(sk);
++ tcp_set_state(meta_sk, TCP_CLOSING);
++ break;
++ case TCP_FIN_WAIT2:
++ /* Received a FIN -- send ACK and enter TIME_WAIT. */
++ tcp_send_ack(sk);
++ tcp_time_wait(meta_sk, TCP_TIME_WAIT, 0);
++ break;
++ default:
++ /* Only TCP_LISTEN and TCP_CLOSE are left, in these
++ * cases we should never reach this piece of code.
++ */
++ pr_err("%s: Impossible, meta_sk->sk_state=%d\n", __func__,
++ meta_sk->sk_state);
++ break;
++ }
++
++ /* It _is_ possible, that we have something out-of-order _after_ FIN.
++ * Probably, we should reset in this case. For now drop them.
++ */
++ mptcp_purge_ofo_queue(meta_tp);
++ sk_mem_reclaim(meta_sk);
++
++ if (!sock_flag(meta_sk, SOCK_DEAD)) {
++ meta_sk->sk_state_change(meta_sk);
++
++ /* Do not send POLL_HUP for half duplex close. */
++ if (meta_sk->sk_shutdown == SHUTDOWN_MASK ||
++ meta_sk->sk_state == TCP_CLOSE)
++ sk_wake_async(meta_sk, SOCK_WAKE_WAITD, POLL_HUP);
++ else
++ sk_wake_async(meta_sk, SOCK_WAKE_WAITD, POLL_IN);
++ }
++
++ return;
++}
++
++static void mptcp_xmit_retransmit_queue(struct sock *meta_sk)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct sk_buff *skb;
++
++ if (!meta_tp->packets_out)
++ return;
++
++ tcp_for_write_queue(skb, meta_sk) {
++ if (skb == tcp_send_head(meta_sk))
++ break;
++
++ if (mptcp_retransmit_skb(meta_sk, skb))
++ return;
++
++ if (skb == tcp_write_queue_head(meta_sk))
++ inet_csk_reset_xmit_timer(meta_sk, ICSK_TIME_RETRANS,
++ inet_csk(meta_sk)->icsk_rto,
++ TCP_RTO_MAX);
++ }
++}
++
++/* Handle the DATA_ACK */
++static void mptcp_data_ack(struct sock *sk, const struct sk_buff *skb)
++{
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk), *tp = tcp_sk(sk);
++ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
++ u32 prior_snd_una = meta_tp->snd_una;
++ int prior_packets;
++ u32 nwin, data_ack, data_seq;
++ u16 data_len = 0;
++
++ /* A valid packet came in - subflow is operational again */
++ tp->pf = 0;
++
++ /* Even if there is no data-ack, we stop retransmitting.
++ * Except if this is a SYN/ACK. Then it is just a retransmission
++ */
++ if (tp->mptcp->pre_established && !tcp_hdr(skb)->syn) {
++ tp->mptcp->pre_established = 0;
++ sk_stop_timer(sk, &tp->mptcp->mptcp_ack_timer);
++ }
++
++ /* If we are in infinite mapping mode, rx_opt.data_ack has been
++ * set by mptcp_clean_rtx_infinite.
++ */
++ if (!(tcb->mptcp_flags & MPTCPHDR_ACK) && !tp->mpcb->infinite_mapping_snd)
++ goto exit;
++
++ data_ack = tp->mptcp->rx_opt.data_ack;
++
++ if (unlikely(!tp->mptcp->fully_established) &&
++ (data_ack != meta_tp->mptcp->snt_isn ||
++ tp->mptcp->snt_isn + 1 != TCP_SKB_CB(skb)->ack_seq))
++ /* As soon as data has been data-acked,
++ * or a subflow-data-ack (not acking syn - thus snt_isn + 1)
++ * includes a data-ack, we are fully established
++ */
++ mptcp_become_fully_estab(sk);
++
++ /* Get the data_seq */
++ if (mptcp_is_data_seq(skb)) {
++ data_seq = tp->mptcp->rx_opt.data_seq;
++ data_len = tp->mptcp->rx_opt.data_len;
++ } else {
++ data_seq = meta_tp->snd_wl1;
++ }
++
++ /* If the ack is older than previous acks
++ * then we can probably ignore it.
++ */
++ if (before(data_ack, prior_snd_una))
++ goto exit;
++
++ /* If the ack includes data we haven't sent yet, discard
++ * this segment (RFC793 Section 3.9).
++ */
++ if (after(data_ack, meta_tp->snd_nxt))
++ goto exit;
++
++ /*** Now, update the window - inspired by tcp_ack_update_window ***/
++ nwin = ntohs(tcp_hdr(skb)->window);
++
++ if (likely(!tcp_hdr(skb)->syn))
++ nwin <<= tp->rx_opt.snd_wscale;
++
++ if (tcp_may_update_window(meta_tp, data_ack, data_seq, nwin)) {
++ tcp_update_wl(meta_tp, data_seq);
++
++ /* Draft v09, Section 3.3.5:
++ * [...] It should only update its local receive window values
++ * when the largest sequence number allowed (i.e. DATA_ACK +
++ * receive window) increases. [...]
++ */
++ if (meta_tp->snd_wnd != nwin &&
++ !before(data_ack + nwin, tcp_wnd_end(meta_tp))) {
++ meta_tp->snd_wnd = nwin;
++
++ if (nwin > meta_tp->max_window)
++ meta_tp->max_window = nwin;
++ }
++ }
++ /*** Done, update the window ***/
++
++ /* We passed data and got it acked, remove any soft error
++ * log. Something worked...
++ */
++ sk->sk_err_soft = 0;
++ inet_csk(meta_sk)->icsk_probes_out = 0;
++ meta_tp->rcv_tstamp = tcp_time_stamp;
++ prior_packets = meta_tp->packets_out;
++ if (!prior_packets)
++ goto no_queue;
++
++ meta_tp->snd_una = data_ack;
++
++ mptcp_clean_rtx_queue(meta_sk, prior_snd_una);
++
++ /* We are in loss-state, and something got acked, retransmit the whole
++ * queue now!
++ */
++ if (inet_csk(meta_sk)->icsk_ca_state == TCP_CA_Loss &&
++ after(data_ack, prior_snd_una)) {
++ mptcp_xmit_retransmit_queue(meta_sk);
++ inet_csk(meta_sk)->icsk_ca_state = TCP_CA_Open;
++ }
++
++ /* Simplified version of tcp_new_space, because the snd-buffer
++ * is handled by all the subflows.
++ */
++ if (sock_flag(meta_sk, SOCK_QUEUE_SHRUNK)) {
++ sock_reset_flag(meta_sk, SOCK_QUEUE_SHRUNK);
++ if (meta_sk->sk_socket &&
++ test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags))
++ meta_sk->sk_write_space(meta_sk);
++ }
++
++ if (meta_sk->sk_state != TCP_ESTABLISHED &&
++ mptcp_rcv_state_process(meta_sk, sk, skb, data_seq, data_len))
++ return;
++
++exit:
++ mptcp_push_pending_frames(meta_sk);
++
++ return;
++
++no_queue:
++ if (tcp_send_head(meta_sk))
++ tcp_ack_probe(meta_sk);
++
++ mptcp_push_pending_frames(meta_sk);
++
++ return;
++}
++
++void mptcp_clean_rtx_infinite(struct sk_buff *skb, struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk), *meta_tp = tcp_sk(mptcp_meta_sk(sk));
++
++ if (!tp->mpcb->infinite_mapping_snd)
++ return;
++
++ /* The difference between both write_seq's represents the offset between
++ * data-sequence and subflow-sequence. As we are infinite, this must
++ * match.
++ *
++ * Thus, from this difference we can infer the meta snd_una.
++ */
++ tp->mptcp->rx_opt.data_ack = meta_tp->snd_nxt - tp->snd_nxt +
++ tp->snd_una;
++
++ mptcp_data_ack(sk, skb);
++}
++
++/**** static functions used by mptcp_parse_options */
++
++static inline int mptcp_rem_raddress(struct mptcp_cb *mpcb, u8 rem_id)
++{
++ if (mptcp_v4_rem_raddress(mpcb, rem_id) < 0) {
++#if IS_ENABLED(CONFIG_IPV6)
++ if (mptcp_v6_rem_raddress(mpcb, rem_id) < 0)
++ return -1;
++#else
++ return -1;
++#endif /* CONFIG_IPV6 */
++ }
++ return 0;
++}
++
++static void mptcp_send_reset_rem_id(const struct mptcp_cb *mpcb, u8 rem_id)
++{
++ struct sock *sk_it, *tmpsk;
++
++ mptcp_for_each_sk_safe(mpcb, sk_it, tmpsk) {
++ if (tcp_sk(sk_it)->mptcp->rem_id == rem_id) {
++ mptcp_reinject_data(sk_it, 0);
++ sk_it->sk_err = ECONNRESET;
++ if (tcp_need_reset(sk_it->sk_state))
++ tcp_send_active_reset(sk_it, GFP_ATOMIC);
++ mptcp_sub_force_close(sk_it);
++ }
++ }
++}
++
++void mptcp_parse_options(const uint8_t *ptr, int opsize,
++ struct tcp_options_received *opt_rx,
++ struct mptcp_options_received *mopt,
++ const struct sk_buff *skb)
++{
++ struct mptcp_option *mp_opt = (struct mptcp_option *)ptr;
++
++ /* If the socket is mp-capable we would have a mopt. */
++ if (!mopt)
++ return;
++
++ switch (mp_opt->sub) {
++ case MPTCP_SUB_CAPABLE:
++ {
++ struct mp_capable *mpcapable = (struct mp_capable *)ptr;
++
++ if (opsize != MPTCP_SUB_LEN_CAPABLE_SYN &&
++ opsize != MPTCP_SUB_LEN_CAPABLE_ACK) {
++ mptcp_debug("%s: mp_capable: bad option size %d\n",
++ __func__, opsize);
++ break;
++ }
++
++ if (!sysctl_mptcp_enabled)
++ break;
++
++ /* We only support MPTCP version 0 */
++ if (mpcapable->ver != 0)
++ break;
++
++ /* MPTCP-RFC 6824:
++ * "If receiving a message with the 'B' flag set to 1, and this
++ * is not understood, then this SYN MUST be silently ignored;
++ */
++ if (mpcapable->b) {
++ mopt->drop_me = 1;
++ break;
++ }
++
++ /* MPTCP-RFC 6824:
++ * "An implementation that only supports this method MUST set
++ * bit "H" to 1, and bits "C" through "G" to 0."
++ */
++ if (!mpcapable->h)
++ break;
++
++ mopt->saw_mpc = 1;
++ mopt->dss_csum = sysctl_mptcp_checksum || mpcapable->a;
++
++ if (opsize >= MPTCP_SUB_LEN_CAPABLE_SYN)
++ mopt->mptcp_key = mpcapable->sender_key;
++
++ break;
++ }
++ case MPTCP_SUB_JOIN:
++ {
++ struct mp_join *mpjoin = (struct mp_join *)ptr;
++
++ if (opsize != MPTCP_SUB_LEN_JOIN_SYN &&
++ opsize != MPTCP_SUB_LEN_JOIN_SYNACK &&
++ opsize != MPTCP_SUB_LEN_JOIN_ACK) {
++ mptcp_debug("%s: mp_join: bad option size %d\n",
++ __func__, opsize);
++ break;
++ }
++
++ /* saw_mpc must be set, because in tcp_check_req we assume that
++ * it is set to support falling back to reg. TCP if a rexmitted
++ * SYN has no MP_CAPABLE or MP_JOIN
++ */
++ switch (opsize) {
++ case MPTCP_SUB_LEN_JOIN_SYN:
++ mopt->is_mp_join = 1;
++ mopt->saw_mpc = 1;
++ mopt->low_prio = mpjoin->b;
++ mopt->rem_id = mpjoin->addr_id;
++ mopt->mptcp_rem_token = mpjoin->u.syn.token;
++ mopt->mptcp_recv_nonce = mpjoin->u.syn.nonce;
++ break;
++ case MPTCP_SUB_LEN_JOIN_SYNACK:
++ mopt->saw_mpc = 1;
++ mopt->low_prio = mpjoin->b;
++ mopt->rem_id = mpjoin->addr_id;
++ mopt->mptcp_recv_tmac = mpjoin->u.synack.mac;
++ mopt->mptcp_recv_nonce = mpjoin->u.synack.nonce;
++ break;
++ case MPTCP_SUB_LEN_JOIN_ACK:
++ mopt->saw_mpc = 1;
++ mopt->join_ack = 1;
++ memcpy(mopt->mptcp_recv_mac, mpjoin->u.ack.mac, 20);
++ break;
++ }
++ break;
++ }
++ case MPTCP_SUB_DSS:
++ {
++ struct mp_dss *mdss = (struct mp_dss *)ptr;
++ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
++
++ /* We check opsize for the csum and non-csum case. We do this,
++ * because the draft says that the csum SHOULD be ignored if
++ * it has not been negotiated in the MP_CAPABLE but still is
++ * present in the data.
++ *
++ * It will get ignored later in mptcp_queue_skb.
++ */
++ if (opsize != mptcp_sub_len_dss(mdss, 0) &&
++ opsize != mptcp_sub_len_dss(mdss, 1)) {
++ mptcp_debug("%s: mp_dss: bad option size %d\n",
++ __func__, opsize);
++ break;
++ }
++
++ ptr += 4;
++
++ if (mdss->A) {
++ tcb->mptcp_flags |= MPTCPHDR_ACK;
++
++ if (mdss->a) {
++ mopt->data_ack = (u32) get_unaligned_be64(ptr);
++ ptr += MPTCP_SUB_LEN_ACK_64;
++ } else {
++ mopt->data_ack = get_unaligned_be32(ptr);
++ ptr += MPTCP_SUB_LEN_ACK;
++ }
++ }
++
++ tcb->dss_off = (ptr - skb_transport_header(skb));
++
++ if (mdss->M) {
++ if (mdss->m) {
++ u64 data_seq64 = get_unaligned_be64(ptr);
++
++ tcb->mptcp_flags |= MPTCPHDR_SEQ64_SET;
++ mopt->data_seq = (u32) data_seq64;
++
++ ptr += 12; /* 64-bit dseq + subseq */
++ } else {
++ mopt->data_seq = get_unaligned_be32(ptr);
++ ptr += 8; /* 32-bit dseq + subseq */
++ }
++ mopt->data_len = get_unaligned_be16(ptr);
++
++ tcb->mptcp_flags |= MPTCPHDR_SEQ;
++
++ /* Is a check-sum present? */
++ if (opsize == mptcp_sub_len_dss(mdss, 1))
++ tcb->mptcp_flags |= MPTCPHDR_DSS_CSUM;
++
++ /* DATA_FIN only possible with DSS-mapping */
++ if (mdss->F)
++ tcb->mptcp_flags |= MPTCPHDR_FIN;
++ }
++
++ break;
++ }
++ case MPTCP_SUB_ADD_ADDR:
++ {
++#if IS_ENABLED(CONFIG_IPV6)
++ struct mp_add_addr *mpadd = (struct mp_add_addr *)ptr;
++
++ if ((mpadd->ipver == 4 && opsize != MPTCP_SUB_LEN_ADD_ADDR4 &&
++ opsize != MPTCP_SUB_LEN_ADD_ADDR4 + 2) ||
++ (mpadd->ipver == 6 && opsize != MPTCP_SUB_LEN_ADD_ADDR6 &&
++ opsize != MPTCP_SUB_LEN_ADD_ADDR6 + 2)) {
++#else
++ if (opsize != MPTCP_SUB_LEN_ADD_ADDR4 &&
++ opsize != MPTCP_SUB_LEN_ADD_ADDR4 + 2) {
++#endif /* CONFIG_IPV6 */
++ mptcp_debug("%s: mp_add_addr: bad option size %d\n",
++ __func__, opsize);
++ break;
++ }
++
++ /* We have to manually parse the options if we got two of them. */
++ if (mopt->saw_add_addr) {
++ mopt->more_add_addr = 1;
++ break;
++ }
++ mopt->saw_add_addr = 1;
++ mopt->add_addr_ptr = ptr;
++ break;
++ }
++ case MPTCP_SUB_REMOVE_ADDR:
++ if ((opsize - MPTCP_SUB_LEN_REMOVE_ADDR) < 0) {
++ mptcp_debug("%s: mp_remove_addr: bad option size %d\n",
++ __func__, opsize);
++ break;
++ }
++
++ if (mopt->saw_rem_addr) {
++ mopt->more_rem_addr = 1;
++ break;
++ }
++ mopt->saw_rem_addr = 1;
++ mopt->rem_addr_ptr = ptr;
++ break;
++ case MPTCP_SUB_PRIO:
++ {
++ struct mp_prio *mpprio = (struct mp_prio *)ptr;
++
++ if (opsize != MPTCP_SUB_LEN_PRIO &&
++ opsize != MPTCP_SUB_LEN_PRIO_ADDR) {
++ mptcp_debug("%s: mp_prio: bad option size %d\n",
++ __func__, opsize);
++ break;
++ }
++
++ mopt->saw_low_prio = 1;
++ mopt->low_prio = mpprio->b;
++
++ if (opsize == MPTCP_SUB_LEN_PRIO_ADDR) {
++ mopt->saw_low_prio = 2;
++ mopt->prio_addr_id = mpprio->addr_id;
++ }
++ break;
++ }
++ case MPTCP_SUB_FAIL:
++ if (opsize != MPTCP_SUB_LEN_FAIL) {
++ mptcp_debug("%s: mp_fail: bad option size %d\n",
++ __func__, opsize);
++ break;
++ }
++ mopt->mp_fail = 1;
++ break;
++ case MPTCP_SUB_FCLOSE:
++ if (opsize != MPTCP_SUB_LEN_FCLOSE) {
++ mptcp_debug("%s: mp_fclose: bad option size %d\n",
++ __func__, opsize);
++ break;
++ }
++
++ mopt->mp_fclose = 1;
++ mopt->mptcp_key = ((struct mp_fclose *)ptr)->key;
++
++ break;
++ default:
++ mptcp_debug("%s: Received unkown subtype: %d\n",
++ __func__, mp_opt->sub);
++ break;
++ }
++}
++
++int mptcp_check_rtt(const struct tcp_sock *tp, int time)
++{
++ struct mptcp_cb *mpcb = tp->mpcb;
++ struct sock *sk;
++ u32 rtt_max = 0;
++
++ /* In MPTCP, we take the max delay across all flows,
++ * in order to take into account meta-reordering buffers.
++ */
++ mptcp_for_each_sk(mpcb, sk) {
++ if (!mptcp_sk_can_recv(sk))
++ continue;
++
++ if (rtt_max < tcp_sk(sk)->rcv_rtt_est.rtt)
++ rtt_max = tcp_sk(sk)->rcv_rtt_est.rtt;
++ }
++ if (time < (rtt_max >> 3) || !rtt_max)
++ return 1;
++
++ return 0;
++}
++
++static void mptcp_handle_add_addr(const unsigned char *ptr, struct sock *sk)
++{
++ struct mp_add_addr *mpadd = (struct mp_add_addr *)ptr;
++
++ if (mpadd->ipver == 4) {
++ __be16 port = 0;
++ if (mpadd->len == MPTCP_SUB_LEN_ADD_ADDR4 + 2)
++ port = mpadd->u.v4.port;
++
++ mptcp_v4_add_raddress(tcp_sk(sk)->mpcb, &mpadd->u.v4.addr, port,
++ mpadd->addr_id);
++#if IS_ENABLED(CONFIG_IPV6)
++ } else if (mpadd->ipver == 6) {
++ __be16 port = 0;
++ if (mpadd->len == MPTCP_SUB_LEN_ADD_ADDR6 + 2)
++ port = mpadd->u.v6.port;
++
++ mptcp_v6_add_raddress(tcp_sk(sk)->mpcb, &mpadd->u.v6.addr, port,
++ mpadd->addr_id);
++#endif /* CONFIG_IPV6 */
++ }
++}
++
++static void mptcp_handle_rem_addr(const unsigned char *ptr, struct sock *sk)
++{
++ struct mp_remove_addr *mprem = (struct mp_remove_addr *)ptr;
++ int i;
++ u8 rem_id;
++
++ for (i = 0; i <= mprem->len - MPTCP_SUB_LEN_REMOVE_ADDR; i++) {
++ rem_id = (&mprem->addrs_id)[i];
++ if (!mptcp_rem_raddress(tcp_sk(sk)->mpcb, rem_id))
++ mptcp_send_reset_rem_id(tcp_sk(sk)->mpcb, rem_id);
++ }
++}
++
++static void mptcp_parse_addropt(const struct sk_buff *skb, struct sock *sk)
++{
++ struct tcphdr *th = tcp_hdr(skb);
++ unsigned char *ptr;
++ int length = (th->doff * 4) - sizeof(struct tcphdr);
++
++ /* Jump through the options to check whether ADD_ADDR is there */
++ ptr = (unsigned char *)(th + 1);
++ while (length > 0) {
++ int opcode = *ptr++;
++ int opsize;
++
++ switch (opcode) {
++ case TCPOPT_EOL:
++ return;
++ case TCPOPT_NOP:
++ length--;
++ continue;
++ default:
++ opsize = *ptr++;
++ if (opsize < 2)
++ return;
++ if (opsize > length)
++ return; /* don't parse partial options */
++ if (opcode == TCPOPT_MPTCP &&
++ ((struct mptcp_option *)ptr)->sub == MPTCP_SUB_ADD_ADDR) {
++#if IS_ENABLED(CONFIG_IPV6)
++ struct mp_add_addr *mpadd = (struct mp_add_addr *)ptr;
++ if ((mpadd->ipver == 4 && opsize != MPTCP_SUB_LEN_ADD_ADDR4 &&
++ opsize != MPTCP_SUB_LEN_ADD_ADDR4 + 2) ||
++ (mpadd->ipver == 6 && opsize != MPTCP_SUB_LEN_ADD_ADDR6 &&
++ opsize != MPTCP_SUB_LEN_ADD_ADDR6 + 2))
++#else
++ if (opsize != MPTCP_SUB_LEN_ADD_ADDR4 &&
++ opsize != MPTCP_SUB_LEN_ADD_ADDR4 + 2)
++#endif /* CONFIG_IPV6 */
++ goto cont;
++
++ mptcp_handle_add_addr(ptr, sk);
++ }
++ if (opcode == TCPOPT_MPTCP &&
++ ((struct mptcp_option *)ptr)->sub == MPTCP_SUB_REMOVE_ADDR) {
++ if ((opsize - MPTCP_SUB_LEN_REMOVE_ADDR) < 0)
++ goto cont;
++
++ mptcp_handle_rem_addr(ptr, sk);
++ }
++cont:
++ ptr += opsize - 2;
++ length -= opsize;
++ }
++ }
++ return;
++}
++
++static inline int mptcp_mp_fail_rcvd(struct sock *sk, const struct tcphdr *th)
++{
++ struct mptcp_tcp_sock *mptcp = tcp_sk(sk)->mptcp;
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++ struct mptcp_cb *mpcb = tcp_sk(sk)->mpcb;
++
++ if (unlikely(mptcp->rx_opt.mp_fail)) {
++ mptcp->rx_opt.mp_fail = 0;
++
++ if (!th->rst && !mpcb->infinite_mapping_snd) {
++ struct sock *sk_it;
++
++ mpcb->send_infinite_mapping = 1;
++ /* We resend everything that has not been acknowledged */
++ meta_sk->sk_send_head = tcp_write_queue_head(meta_sk);
++
++ /* We artificially restart the whole send-queue. Thus,
++ * it is as if no packets are in flight
++ */
++ tcp_sk(meta_sk)->packets_out = 0;
++
++ /* If the snd_nxt already wrapped around, we have to
++ * undo the wrapping, as we are restarting from snd_una
++ * on.
++ */
++ if (tcp_sk(meta_sk)->snd_nxt < tcp_sk(meta_sk)->snd_una) {
++ mpcb->snd_high_order[mpcb->snd_hiseq_index] -= 2;
++ mpcb->snd_hiseq_index = mpcb->snd_hiseq_index ? 0 : 1;
++ }
++ tcp_sk(meta_sk)->snd_nxt = tcp_sk(meta_sk)->snd_una;
++
++ /* Trigger a sending on the meta. */
++ mptcp_push_pending_frames(meta_sk);
++
++ mptcp_for_each_sk(mpcb, sk_it) {
++ if (sk != sk_it)
++ mptcp_sub_force_close(sk_it);
++ }
++ }
++
++ return 0;
++ }
++
++ if (unlikely(mptcp->rx_opt.mp_fclose)) {
++ struct sock *sk_it, *tmpsk;
++
++ mptcp->rx_opt.mp_fclose = 0;
++ if (mptcp->rx_opt.mptcp_key != mpcb->mptcp_loc_key)
++ return 0;
++
++ if (tcp_need_reset(sk->sk_state))
++ tcp_send_active_reset(sk, GFP_ATOMIC);
++
++ mptcp_for_each_sk_safe(mpcb, sk_it, tmpsk)
++ mptcp_sub_force_close(sk_it);
++
++ tcp_reset(meta_sk);
++
++ return 1;
++ }
++
++ return 0;
++}
++
++static inline void mptcp_path_array_check(struct sock *meta_sk)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++
++ if (unlikely(mpcb->list_rcvd)) {
++ mpcb->list_rcvd = 0;
++ if (mpcb->pm_ops->new_remote_address)
++ mpcb->pm_ops->new_remote_address(meta_sk);
++ }
++}
++
++int mptcp_handle_options(struct sock *sk, const struct tcphdr *th, struct sk_buff *skb)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct mptcp_options_received *mopt = &tp->mptcp->rx_opt;
++
++ if (tp->mpcb->infinite_mapping_rcv || tp->mpcb->infinite_mapping_snd)
++ return 0;
++
++ if (mptcp_mp_fail_rcvd(sk, th))
++ return 1;
++
++ /* RFC 6824, Section 3.3:
++ * If a checksum is not present when its use has been negotiated, the
++ * receiver MUST close the subflow with a RST as it is considered broken.
++ */
++ if (mptcp_is_data_seq(skb) && tp->mpcb->dss_csum &&
++ !(TCP_SKB_CB(skb)->mptcp_flags & MPTCPHDR_DSS_CSUM)) {
++ if (tcp_need_reset(sk->sk_state))
++ tcp_send_active_reset(sk, GFP_ATOMIC);
++
++ mptcp_sub_force_close(sk);
++ return 1;
++ }
++
++ /* We have to acknowledge retransmissions of the third
++ * ack.
++ */
++ if (mopt->join_ack) {
++ tcp_send_delayed_ack(sk);
++ mopt->join_ack = 0;
++ }
++
++ if (mopt->saw_add_addr || mopt->saw_rem_addr) {
++ if (mopt->more_add_addr || mopt->more_rem_addr) {
++ mptcp_parse_addropt(skb, sk);
++ } else {
++ if (mopt->saw_add_addr)
++ mptcp_handle_add_addr(mopt->add_addr_ptr, sk);
++ if (mopt->saw_rem_addr)
++ mptcp_handle_rem_addr(mopt->rem_addr_ptr, sk);
++ }
++
++ mopt->more_add_addr = 0;
++ mopt->saw_add_addr = 0;
++ mopt->more_rem_addr = 0;
++ mopt->saw_rem_addr = 0;
++ }
++ if (mopt->saw_low_prio) {
++ if (mopt->saw_low_prio == 1) {
++ tp->mptcp->rcv_low_prio = mopt->low_prio;
++ } else {
++ struct sock *sk_it;
++ mptcp_for_each_sk(tp->mpcb, sk_it) {
++ struct mptcp_tcp_sock *mptcp = tcp_sk(sk_it)->mptcp;
++ if (mptcp->rem_id == mopt->prio_addr_id)
++ mptcp->rcv_low_prio = mopt->low_prio;
++ }
++ }
++ mopt->saw_low_prio = 0;
++ }
++
++ mptcp_data_ack(sk, skb);
++
++ mptcp_path_array_check(mptcp_meta_sk(sk));
++ /* Socket may have been mp_killed by a REMOVE_ADDR */
++ if (tp->mp_killed)
++ return 1;
++
++ return 0;
++}
++
++/* The skptr is needed, because if we become MPTCP-capable, we have to switch
++ * from meta-socket to master-socket.
++ *
++ * @return: 1 - we want to reset this connection
++ * 2 - we want to discard the received syn/ack
++ * 0 - everything is fine - continue
++ */
++int mptcp_rcv_synsent_state_process(struct sock *sk, struct sock **skptr,
++ struct sk_buff *skb,
++ struct mptcp_options_received *mopt)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ if (tp->mpc) {
++ u8 hash_mac_check[20];
++ struct mptcp_cb *mpcb = tp->mpcb;
++
++ mptcp_hmac_sha1((u8 *)&mpcb->mptcp_rem_key,
++ (u8 *)&mpcb->mptcp_loc_key,
++ (u8 *)&tp->mptcp->rx_opt.mptcp_recv_nonce,
++ (u8 *)&tp->mptcp->mptcp_loc_nonce,
++ (u32 *)hash_mac_check);
++ if (memcmp(hash_mac_check,
++ (char *)&tp->mptcp->rx_opt.mptcp_recv_tmac, 8)) {
++ mptcp_sub_force_close(sk);
++ return 1;
++ }
++
++ /* Set this flag in order to postpone data sending
++ * until the 4th ack arrives.
++ */
++ tp->mptcp->pre_established = 1;
++ tp->mptcp->rcv_low_prio = tp->mptcp->rx_opt.low_prio;
++
++ mptcp_hmac_sha1((u8 *)&mpcb->mptcp_loc_key,
++ (u8 *)&mpcb->mptcp_rem_key,
++ (u8 *)&tp->mptcp->mptcp_loc_nonce,
++ (u8 *)&tp->mptcp->rx_opt.mptcp_recv_nonce,
++ (u32 *)&tp->mptcp->sender_mac[0]);
++
++ } else if (mopt->saw_mpc) {
++ if (mptcp_create_master_sk(sk, mopt->mptcp_key,
++ ntohs(tcp_hdr(skb)->window)))
++ return 2;
++
++ sk = tcp_sk(sk)->mpcb->master_sk;
++ *skptr = sk;
++ tp = tcp_sk(sk);
++
++ /* snd_nxt - 1, because it has been incremented
++ * by tcp_connect for the SYN
++ */
++ tp->mptcp->snt_isn = tp->snd_nxt - 1;
++ tp->mpcb->dss_csum = mopt->dss_csum;
++ tp->mptcp->include_mpc = 1;
++
++ sk_set_socket(sk, mptcp_meta_sk(sk)->sk_socket);
++ sk->sk_wq = mptcp_meta_sk(sk)->sk_wq;
++
++ mptcp_update_metasocket(sk, mptcp_meta_sk(sk));
++
++ /* hold in mptcp_inherit_sk due to initialization to 2 */
++ sock_put(sk);
++ } else {
++ tp->request_mptcp = 0;
++
++ if (tp->inside_tk_table)
++ mptcp_hash_remove(tp);
++ }
++
++ if (tp->mpc)
++ tp->mptcp->rcv_isn = TCP_SKB_CB(skb)->seq;
++
++ return 0;
++}
++
++bool mptcp_should_expand_sndbuf(const struct sock *sk)
++{
++ struct sock *sk_it;
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ int cnt_backups = 0;
++ int backup_available = 0;
++
++ /* We circumvent this check in tcp_check_space, because we want to
++ * always call sk_write_space. So, we reproduce the check here.
++ */
++ if (!meta_sk->sk_socket ||
++ !test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags))
++ return false;
++
++ /* If the user specified a specific send buffer setting, do
++ * not modify it.
++ */
++ if (meta_sk->sk_userlocks & SOCK_SNDBUF_LOCK)
++ return false;
++
++ /* If we are under global TCP memory pressure, do not expand. */
++ if (sk_under_memory_pressure(meta_sk))
++ return false;
++
++ /* If we are under soft global TCP memory pressure, do not expand. */
++ if (sk_memory_allocated(meta_sk) >= sk_prot_mem_limits(meta_sk, 0))
++ return false;
++
++
++ /* For MPTCP we look for a subsocket that could send data.
++ * If we found one, then we update the send-buffer.
++ */
++ mptcp_for_each_sk(meta_tp->mpcb, sk_it) {
++ struct tcp_sock *tp_it = tcp_sk(sk_it);
++
++ if (!mptcp_sk_can_send(sk_it))
++ continue;
++
++ /* Backup-flows have to be counted - if there is no other
++ * subflow we take the backup-flow into account. */
++ if (tp_it->mptcp->rcv_low_prio || tp_it->mptcp->low_prio) {
++ cnt_backups++;
++ }
++
++ if (tp_it->packets_out < tp_it->snd_cwnd) {
++ if (tp_it->mptcp->rcv_low_prio || tp_it->mptcp->low_prio) {
++ backup_available = 1;
++ continue;
++ }
++ return true;
++ }
++ }
++
++ /* Backup-flow is available for sending - update send-buffer */
++ if (meta_tp->mpcb->cnt_established == cnt_backups && backup_available)
++ return true;
++ return false;
++}
++
++void mptcp_init_buffer_space(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ int space;
++
++ tcp_init_buffer_space(sk);
++
++ if (is_master_tp(tp)) {
++ /* If there is only one subflow, we just use regular TCP
++ * autotuning. User-locks are handled already by
++ * tcp_init_buffer_space
++ */
++ meta_tp->window_clamp = tp->window_clamp;
++ meta_tp->rcv_ssthresh = tp->rcv_ssthresh;
++ meta_sk->sk_rcvbuf = sk->sk_rcvbuf;
++ meta_sk->sk_sndbuf = sk->sk_sndbuf;
++
++ return;
++ }
++
++ if (meta_sk->sk_userlocks & SOCK_RCVBUF_LOCK)
++ goto snd_buf;
++
++ /* Adding a new subflow to the rcv-buffer space. We make a simple
++ * addition, to give some space to allow traffic on the new subflow.
++ * Autotuning will increase it further later on.
++ */
++ space = min(meta_sk->sk_rcvbuf + sk->sk_rcvbuf, sysctl_tcp_rmem[2]);
++ if (space > meta_sk->sk_rcvbuf) {
++ meta_tp->window_clamp += tp->window_clamp;
++ meta_tp->rcv_ssthresh += tp->rcv_ssthresh;
++ meta_sk->sk_rcvbuf = space;
++ }
++
++snd_buf:
++ if (meta_sk->sk_userlocks & SOCK_SNDBUF_LOCK)
++ return;
++
++ /* Adding a new subflow to the send-buffer space. We make a simple
++ * addition, to give some space to allow traffic on the new subflow.
++ * Autotuning will increase it further later on.
++ */
++ space = min(meta_sk->sk_sndbuf + sk->sk_sndbuf, sysctl_tcp_wmem[2]);
++ if (space > meta_sk->sk_sndbuf) {
++ meta_sk->sk_sndbuf = space;
++ meta_sk->sk_write_space(meta_sk);
++ }
++}
++
++void mptcp_tcp_set_rto(struct sock *sk)
++{
++ tcp_set_rto(sk);
++ mptcp_set_rto(sk);
++}
+diff --git a/net/mptcp/mptcp_ipv4.c b/net/mptcp/mptcp_ipv4.c
+new file mode 100644
+index 0000000..b6053f1
+--- /dev/null
++++ b/net/mptcp/mptcp_ipv4.c
+@@ -0,0 +1,603 @@
++/*
++ * MPTCP implementation - IPv4-specific functions
++ *
++ * Initial Design & Implementation:
++ * Sébastien Barré <sebastien.barre@uclouvain.be>
++ *
++ * Current Maintainer:
++ * Christoph Paasch <christoph.paasch@uclouvain.be>
++ *
++ * Additional authors:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ * Gregory Detal <gregory.detal@uclouvain.be>
++ * Fabien Duchêne <fabien.duchene@uclouvain.be>
++ * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de>
++ * Lavkesh Lahngir <lavkesh51@gmail.com>
++ * Andreas Ripke <ripke@neclab.eu>
++ * Vlad Dogaru <vlad.dogaru@intel.com>
++ * Octavian Purdila <octavian.purdila@intel.com>
++ * John Ronan <jronan@tssg.org>
++ * Catalin Nicutar <catalin.nicutar@gmail.com>
++ * Brandon Heller <brandonh@stanford.edu>
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/export.h>
++#include <linux/ip.h>
++#include <linux/list.h>
++#include <linux/skbuff.h>
++#include <linux/spinlock.h>
++#include <linux/tcp.h>
++
++#include <net/inet_common.h>
++#include <net/inet_connection_sock.h>
++#include <net/mptcp.h>
++#include <net/mptcp_v4.h>
++#include <net/request_sock.h>
++#include <net/tcp.h>
++
++u32 mptcp_v4_get_nonce(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
++ u32 seq)
++{
++ u32 hash[MD5_DIGEST_WORDS];
++
++ hash[0] = (__force u32)saddr;
++ hash[1] = (__force u32)daddr;
++ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
++ hash[3] = seq;
++
++ md5_transform(hash, mptcp_secret);
++
++ return hash[0];
++}
++
++u64 mptcp_v4_get_key(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport)
++{
++ u32 hash[MD5_DIGEST_WORDS];
++
++ hash[0] = (__force u32)saddr;
++ hash[1] = (__force u32)daddr;
++ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
++ hash[3] = mptcp_key_seed++;
++
++ md5_transform(hash, mptcp_secret);
++
++ return *((u64 *)hash);
++}
++
++
++static void mptcp_v4_reqsk_destructor(struct request_sock *req)
++{
++ mptcp_reqsk_destructor(req);
++
++ tcp_v4_reqsk_destructor(req);
++}
++
++/* Similar to tcp_request_sock_ops */
++struct request_sock_ops mptcp_request_sock_ops __read_mostly = {
++ .family = PF_INET,
++ .obj_size = sizeof(struct mptcp_request_sock),
++ .rtx_syn_ack = tcp_v4_rtx_synack,
++ .send_ack = tcp_v4_reqsk_send_ack,
++ .destructor = mptcp_v4_reqsk_destructor,
++ .send_reset = tcp_v4_send_reset,
++ .syn_ack_timeout = tcp_syn_ack_timeout,
++};
++
++static void mptcp_v4_reqsk_queue_hash_add(struct sock *meta_sk,
++ struct request_sock *req,
++ unsigned long timeout)
++{
++ const u32 h1 = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
++ inet_rsk(req)->ir_rmt_port,
++ 0, MPTCP_HASH_SIZE);
++ /* We cannot call inet_csk_reqsk_queue_hash_add(), because we do not
++ * want to reset the keepalive-timer (responsible for retransmitting
++ * SYN/ACKs). We do not retransmit SYN/ACKs+MP_JOINs, because we cannot
++ * overload the keepalive timer. Also, it's not a big deal, because the
++ * third ACK of the MP_JOIN-handshake is sent in a reliable manner. So,
++ * if the third ACK gets lost, the client will handle the retransmission
++ * anyways. If our SYN/ACK gets lost, the client will retransmit the
++ * SYN.
++ */
++ struct inet_connection_sock *meta_icsk = inet_csk(meta_sk);
++ struct listen_sock *lopt = meta_icsk->icsk_accept_queue.listen_opt;
++ const u32 h2 = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
++ inet_rsk(req)->ir_rmt_port,
++ lopt->hash_rnd, lopt->nr_table_entries);
++
++ reqsk_queue_hash_req(&meta_icsk->icsk_accept_queue, h2, req, timeout);
++ reqsk_queue_added(&meta_icsk->icsk_accept_queue);
++
++ spin_lock(&mptcp_reqsk_hlock);
++ list_add(&mptcp_rsk(req)->collide_tuple, &mptcp_reqsk_htb[h1]);
++ spin_unlock(&mptcp_reqsk_hlock);
++}
++
++/* Similar to tcp_v4_conn_request */
++static void mptcp_v4_join_request(struct sock *meta_sk, struct sk_buff *skb)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct tcp_options_received tmp_opt;
++ struct mptcp_options_received mopt;
++ struct request_sock *req;
++ struct inet_request_sock *ireq;
++ struct mptcp_request_sock *mtreq;
++ struct dst_entry *dst = NULL;
++ u8 mptcp_hash_mac[20];
++ __be32 saddr = ip_hdr(skb)->saddr;
++ __be32 daddr = ip_hdr(skb)->daddr;
++ __u32 isn = TCP_SKB_CB(skb)->when;
++ int want_cookie = 0;
++ union inet_addr addr;
++
++ tcp_clear_options(&tmp_opt);
++ mptcp_init_mp_opt(&mopt);
++ tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
++ tmp_opt.user_mss = tcp_sk(meta_sk)->rx_opt.user_mss;
++ tcp_parse_options(skb, &tmp_opt, &mopt, 0, NULL);
++
++ req = inet_reqsk_alloc(&mptcp_request_sock_ops);
++ if (!req)
++ return;
++
++#ifdef CONFIG_TCP_MD5SIG
++ tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
++#endif
++
++ tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
++ tcp_openreq_init(req, &tmp_opt, skb);
++
++ ireq = inet_rsk(req);
++ ireq->ir_loc_addr = daddr;
++ ireq->ir_rmt_addr = saddr;
++ ireq->no_srccheck = inet_sk(meta_sk)->transparent;
++ ireq->opt = tcp_v4_save_options(skb);
++
++ if (security_inet_conn_request(meta_sk, skb, req))
++ goto drop_and_free;
++
++ if (!want_cookie || tmp_opt.tstamp_ok)
++ TCP_ECN_create_request(req, skb, sock_net(meta_sk));
++
++ if (!isn) {
++ struct flowi4 fl4;
++
++ /* VJ's idea. We save last timestamp seen
++ * from the destination in peer table, when entering
++ * state TIME-WAIT, and check against it before
++ * accepting new connection request.
++ *
++ * If "isn" is not zero, this request hit alive
++ * timewait bucket, so that all the necessary checks
++ * are made in the function processing timewait state.
++ */
++ if (tmp_opt.saw_tstamp &&
++ tcp_death_row.sysctl_tw_recycle &&
++ (dst = inet_csk_route_req(meta_sk, &fl4, req)) != NULL &&
++ fl4.daddr == saddr) {
++ if (!tcp_peer_is_proven(req, dst, true)) {
++ NET_INC_STATS_BH(sock_net(meta_sk), LINUX_MIB_PAWSPASSIVEREJECTED);
++ goto drop_and_release;
++ }
++ }
++ /* Kill the following clause, if you dislike this way. */
++ else if (!sysctl_tcp_syncookies &&
++ (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(meta_sk) <
++ (sysctl_max_syn_backlog >> 2)) &&
++ !tcp_peer_is_proven(req, dst, false)) {
++ /* Without syncookies last quarter of
++ * backlog is filled with destinations,
++ * proven to be alive.
++ * It means that we continue to communicate
++ * to destinations, already remembered
++ * to the moment of synflood.
++ */
++ LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
++ &saddr, ntohs(tcp_hdr(skb)->source));
++ goto drop_and_release;
++ }
++
++ isn = tcp_v4_init_sequence(skb);
++ }
++ tcp_rsk(req)->snt_isn = isn;
++ tcp_rsk(req)->snt_synack = tcp_time_stamp;
++ tcp_rsk(req)->listener = NULL;
++
++ mtreq = mptcp_rsk(req);
++ mtreq->mpcb = mpcb;
++ INIT_LIST_HEAD(&mtreq->collide_tuple);
++ mtreq->mptcp_rem_nonce = mopt.mptcp_recv_nonce;
++ mtreq->mptcp_rem_key = mpcb->mptcp_rem_key;
++ mtreq->mptcp_loc_key = mpcb->mptcp_loc_key;
++ mtreq->mptcp_loc_nonce = mptcp_v4_get_nonce(saddr, daddr,
++ tcp_hdr(skb)->source,
++ tcp_hdr(skb)->dest, isn);
++ mptcp_hmac_sha1((u8 *)&mtreq->mptcp_loc_key,
++ (u8 *)&mtreq->mptcp_rem_key,
++ (u8 *)&mtreq->mptcp_loc_nonce,
++ (u8 *)&mtreq->mptcp_rem_nonce, (u32 *)mptcp_hash_mac);
++ mtreq->mptcp_hash_tmac = *(u64 *)mptcp_hash_mac;
++
++ addr.ip = ireq->ir_loc_addr;
++ mtreq->loc_id = mpcb->pm_ops->get_local_id(AF_INET, &addr, sock_net(meta_sk));
++ if (mtreq->loc_id == -1) /* Address not part of the allowed ones */
++ goto drop_and_release;
++ mtreq->rem_id = mopt.rem_id;
++ mtreq->low_prio = mopt.low_prio;
++ tcp_rsk(req)->saw_mpc = 1;
++
++ if (tcp_v4_send_synack(meta_sk, dst, req, skb_get_queue_mapping(skb)))
++ goto drop_and_free;
++
++ /* Adding to request queue in metasocket */
++ mptcp_v4_reqsk_queue_hash_add(meta_sk, req, TCP_TIMEOUT_INIT);
++
++ return;
++
++drop_and_release:
++ dst_release(dst);
++drop_and_free:
++ reqsk_free(req);
++ return;
++}
++
++int mptcp_v4_rem_raddress(struct mptcp_cb *mpcb, u8 id)
++{
++ int i;
++
++ for (i = 0; i < MPTCP_MAX_ADDR; i++) {
++ if (!((1 << i) & mpcb->rem4_bits))
++ continue;
++
++ if (mpcb->remaddr4[i].rem4_id == id) {
++ /* remove address from bitfield */
++ mpcb->rem4_bits &= ~(1 << i);
++
++ return 0;
++ }
++ }
++
++ return -1;
++}
++
++/* Based on function tcp_v4_conn_request (tcp_ipv4.c)
++ * Returns -1 if there is no space anymore to store an additional
++ * address
++ */
++int mptcp_v4_add_raddress(struct mptcp_cb *mpcb, const struct in_addr *addr,
++ __be16 port, u8 id)
++{
++ int i;
++ struct mptcp_rem4 *rem4;
++
++ mptcp_for_each_bit_set(mpcb->rem4_bits, i) {
++ rem4 = &mpcb->remaddr4[i];
++
++ /* Address is already in the list --- continue */
++ if (rem4->rem4_id == id &&
++ rem4->addr.s_addr == addr->s_addr && rem4->port == port)
++ return 0;
++
++ /* This may be the case, when the peer is behind a NAT. He is
++ * trying to JOIN, thus sending the JOIN with a certain ID.
++ * However the src_addr of the IP-packet has been changed. We
++ * update the addr in the list, because this is the address as
++ * OUR BOX sees it.
++ */
++ if (rem4->rem4_id == id && rem4->addr.s_addr != addr->s_addr) {
++ /* update the address */
++ mptcp_debug("%s: updating old addr:%pI4 to addr %pI4 with id:%d\n",
++ __func__, &rem4->addr.s_addr,
++ &addr->s_addr, id);
++ rem4->addr.s_addr = addr->s_addr;
++ rem4->port = port;
++ mpcb->list_rcvd = 1;
++ return 0;
++ }
++ }
++
++ i = mptcp_find_free_index(mpcb->rem4_bits);
++ /* Do we have already the maximum number of local/remote addresses? */
++ if (i < 0) {
++ mptcp_debug("%s: At max num of remote addresses: %d --- not adding address: %pI4\n",
++ __func__, MPTCP_MAX_ADDR, &addr->s_addr);
++ return -1;
++ }
++
++ rem4 = &mpcb->remaddr4[i];
++
++ /* Address is not known yet, store it */
++ rem4->addr.s_addr = addr->s_addr;
++ rem4->port = port;
++ rem4->bitfield = 0;
++ rem4->retry_bitfield = 0;
++ rem4->rem4_id = id;
++ mpcb->list_rcvd = 1;
++ mpcb->rem4_bits |= (1 << i);
++
++ return 0;
++}
++
++/* Sets the bitfield of the remote-address field
++ * local address is not set as it will disappear with the global address-list
++ */
++void mptcp_v4_set_init_addr_bit(struct mptcp_cb *mpcb, __be32 daddr, int index)
++{
++ int i;
++
++ mptcp_for_each_bit_set(mpcb->rem4_bits, i) {
++ if (mpcb->remaddr4[i].addr.s_addr == daddr) {
++ mpcb->remaddr4[i].bitfield |= (1 << index);
++ return;
++ }
++ }
++}
++
++/* We only process join requests here. (either the SYN or the final ACK) */
++int mptcp_v4_do_rcv(struct sock *meta_sk, struct sk_buff *skb)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct sock *child, *rsk = NULL;
++ int ret;
++
++ if (!(TCP_SKB_CB(skb)->mptcp_flags & MPTCPHDR_JOIN)) {
++ struct tcphdr *th = tcp_hdr(skb);
++ const struct iphdr *iph = ip_hdr(skb);
++ struct sock *sk;
++
++ sk = inet_lookup_established(sock_net(meta_sk), &tcp_hashinfo,
++ iph->saddr, th->source, iph->daddr,
++ th->dest, inet_iif(skb));
++
++ if (!sk) {
++ kfree_skb(skb);
++ return 0;
++ }
++ if (is_meta_sk(sk)) {
++ WARN("%s Did not find a sub-sk - did found the meta!\n", __func__);
++ kfree_skb(skb);
++ sock_put(sk);
++ return 0;
++ }
++
++ if (sk->sk_state == TCP_TIME_WAIT) {
++ inet_twsk_put(inet_twsk(sk));
++ kfree_skb(skb);
++ return 0;
++ }
++
++ ret = tcp_v4_do_rcv(sk, skb);
++ sock_put(sk);
++
++ return ret;
++ }
++ TCP_SKB_CB(skb)->mptcp_flags = 0;
++
++ /* Has been removed from the tk-table. Thus, no new subflows.
++ *
++ * Check for close-state is necessary, because we may have been closed
++ * without passing by mptcp_close().
++ *
++ * When falling back, no new subflows are allowed either.
++ */
++ if (meta_sk->sk_state == TCP_CLOSE || !tcp_sk(meta_sk)->inside_tk_table ||
++ mpcb->infinite_mapping_rcv || mpcb->send_infinite_mapping)
++ goto reset_and_discard;
++
++ child = tcp_v4_hnd_req(meta_sk, skb);
++
++ if (!child)
++ goto discard;
++
++ if (child != meta_sk) {
++ sock_rps_save_rxhash(child, skb);
++ /* We don't call tcp_child_process here, because we hold
++ * already the meta-sk-lock and are sure that it is not owned
++ * by the user.
++ */
++ ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb), skb->len);
++ bh_unlock_sock(child);
++ sock_put(child);
++ if (ret) {
++ rsk = child;
++ goto reset_and_discard;
++ }
++ } else {
++ if (tcp_hdr(skb)->syn) {
++ struct mp_join *join_opt = mptcp_find_join(skb);
++ /* Currently we make two calls to mptcp_find_join(). This
++ * can probably be optimized.
++ */
++ if (mptcp_v4_add_raddress(mpcb,
++ (struct in_addr *)&ip_hdr(skb)->saddr,
++ 0,
++ join_opt->addr_id) < 0)
++ goto reset_and_discard;
++ mpcb->list_rcvd = 0;
++
++ mptcp_v4_join_request(meta_sk, skb);
++ goto discard;
++ }
++ goto reset_and_discard;
++ }
++ return 0;
++
++reset_and_discard:
++ tcp_v4_send_reset(rsk, skb);
++discard:
++ kfree_skb(skb);
++ return 0;
++}
++
++/* After this, the ref count of the meta_sk associated with the request_sock
++ * is incremented. Thus it is the responsibility of the caller
++ * to call sock_put() when the reference is not needed anymore.
++ */
++struct sock *mptcp_v4_search_req(const __be16 rport, const __be32 raddr,
++ const __be32 laddr, const struct net *net)
++{
++ struct mptcp_request_sock *mtreq;
++ struct sock *meta_sk = NULL;
++
++ spin_lock(&mptcp_reqsk_hlock);
++ list_for_each_entry(mtreq,
++ &mptcp_reqsk_htb[inet_synq_hash(raddr, rport, 0,
++ MPTCP_HASH_SIZE)],
++ collide_tuple) {
++ struct inet_request_sock *ireq = inet_rsk(rev_mptcp_rsk(mtreq));
++ meta_sk = mtreq->mpcb->meta_sk;
++
++ if (ireq->ir_rmt_port == rport &&
++ ireq->ir_rmt_addr == raddr &&
++ ireq->ir_loc_addr == laddr &&
++ rev_mptcp_rsk(mtreq)->rsk_ops->family == AF_INET &&
++ net_eq(net, sock_net(meta_sk)))
++ break;
++ meta_sk = NULL;
++ }
++
++ if (meta_sk && unlikely(!atomic_inc_not_zero(&meta_sk->sk_refcnt)))
++ meta_sk = NULL;
++ spin_unlock(&mptcp_reqsk_hlock);
++
++ return meta_sk;
++}
++
++/* Create a new IPv4 subflow.
++ *
++ * We are in user-context and meta-sock-lock is hold.
++ */
++int mptcp_init4_subsockets(struct sock *meta_sk, const struct mptcp_loc4 *loc,
++ struct mptcp_rem4 *rem)
++{
++ struct tcp_sock *tp;
++ struct sock *sk;
++ struct sockaddr_in loc_in, rem_in;
++ struct socket sock;
++ int ulid_size = 0, ret;
++
++ /** First, create and prepare the new socket */
++
++ sock.type = meta_sk->sk_socket->type;
++ sock.state = SS_UNCONNECTED;
++ sock.wq = meta_sk->sk_socket->wq;
++ sock.file = meta_sk->sk_socket->file;
++ sock.ops = NULL;
++
++ ret = inet_create(sock_net(meta_sk), &sock, IPPROTO_TCP, 1);
++ if (unlikely(ret < 0)) {
++ mptcp_debug("%s inet_create failed ret: %d\n", __func__, ret);
++ return ret;
++ }
++
++ sk = sock.sk;
++ tp = tcp_sk(sk);
++
++ /* All subsockets need the MPTCP-lock-class */
++ lockdep_set_class_and_name(&(sk)->sk_lock.slock, &meta_slock_key, "slock-AF_INET-MPTCP");
++ lockdep_init_map(&(sk)->sk_lock.dep_map, "sk_lock-AF_INET-MPTCP", &meta_key, 0);
++
++ if (mptcp_add_sock(meta_sk, sk, loc->loc4_id, rem->rem4_id, GFP_KERNEL))
++ goto error;
++
++ tp->mptcp->slave_sk = 1;
++ tp->mptcp->low_prio = loc->low_prio;
++
++ /* Initializing the timer for an MPTCP subflow */
++ setup_timer(&tp->mptcp->mptcp_ack_timer, mptcp_ack_handler, (unsigned long)sk);
++
++ /** Then, connect the socket to the peer */
++
++ ulid_size = sizeof(struct sockaddr_in);
++ loc_in.sin_family = AF_INET;
++ rem_in.sin_family = AF_INET;
++ loc_in.sin_port = 0;
++ if (rem->port)
++ rem_in.sin_port = rem->port;
++ else
++ rem_in.sin_port = inet_sk(meta_sk)->inet_dport;
++ loc_in.sin_addr = loc->addr;
++ rem_in.sin_addr = rem->addr;
++
++ ret = sock.ops->bind(&sock, (struct sockaddr *)&loc_in, ulid_size);
++ if (ret < 0) {
++ mptcp_debug("%s: MPTCP subsocket bind() failed, error %d\n",
++ __func__, ret);
++ goto error;
++ }
++
++ mptcp_debug("%s: token %#x pi %d src_addr:%pI4:%d dst_addr:%pI4:%d\n",
++ __func__, tcp_sk(meta_sk)->mpcb->mptcp_loc_token,
++ tp->mptcp->path_index, &loc_in.sin_addr,
++ ntohs(loc_in.sin_port), &rem_in.sin_addr,
++ ntohs(rem_in.sin_port));
++
++ ret = sock.ops->connect(&sock, (struct sockaddr *)&rem_in,
++ ulid_size, O_NONBLOCK);
++ if (ret < 0 && ret != -EINPROGRESS) {
++ mptcp_debug("%s: MPTCP subsocket connect() failed, error %d\n",
++ __func__, ret);
++ goto error;
++ }
++
++ sk_set_socket(sk, meta_sk->sk_socket);
++ sk->sk_wq = meta_sk->sk_wq;
++
++ return 0;
++
++error:
++ /* May happen if mptcp_add_sock fails first */
++ if (!tp->mpc) {
++ tcp_close(sk, 0);
++ } else {
++ local_bh_disable();
++ mptcp_sub_force_close(sk);
++ local_bh_enable();
++ }
++ return ret;
++}
++EXPORT_SYMBOL(mptcp_init4_subsockets);
++
++/* General initialization of IPv4 for MPTCP */
++int mptcp_pm_v4_init(void)
++{
++ int ret = 0;
++ struct request_sock_ops *ops = &mptcp_request_sock_ops;
++
++ ops->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", "MPTCP");
++ if (ops->slab_name == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ops->slab = kmem_cache_create(ops->slab_name, ops->obj_size, 0,
++ SLAB_DESTROY_BY_RCU|SLAB_HWCACHE_ALIGN,
++ NULL);
++
++ if (ops->slab == NULL) {
++ ret = -ENOMEM;
++ goto err_reqsk_create;
++ }
++
++out:
++ return ret;
++
++err_reqsk_create:
++ kfree(ops->slab_name);
++ ops->slab_name = NULL;
++ goto out;
++}
++
++void mptcp_pm_v4_undo(void)
++{
++ kmem_cache_destroy(mptcp_request_sock_ops.slab);
++ kfree(mptcp_request_sock_ops.slab_name);
++}
++
++
+diff --git a/net/mptcp/mptcp_ipv6.c b/net/mptcp/mptcp_ipv6.c
+new file mode 100644
+index 0000000..b6b444d
+--- /dev/null
++++ b/net/mptcp/mptcp_ipv6.c
+@@ -0,0 +1,822 @@
++/*
++ * MPTCP implementation - IPv6-specific functions
++ *
++ * Initial Design & Implementation:
++ * Sébastien Barré <sebastien.barre@uclouvain.be>
++ *
++ * Current Maintainer:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ *
++ * Additional authors:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ * Gregory Detal <gregory.detal@uclouvain.be>
++ * Fabien Duchêne <fabien.duchene@uclouvain.be>
++ * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de>
++ * Lavkesh Lahngir <lavkesh51@gmail.com>
++ * Andreas Ripke <ripke@neclab.eu>
++ * Vlad Dogaru <vlad.dogaru@intel.com>
++ * Octavian Purdila <octavian.purdila@intel.com>
++ * John Ronan <jronan@tssg.org>
++ * Catalin Nicutar <catalin.nicutar@gmail.com>
++ * Brandon Heller <brandonh@stanford.edu>
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/export.h>
++#include <linux/in6.h>
++#include <linux/kernel.h>
++
++#include <net/addrconf.h>
++#include <net/flow.h>
++#include <net/inet6_connection_sock.h>
++#include <net/inet6_hashtables.h>
++#include <net/inet_common.h>
++#include <net/ipv6.h>
++#include <net/ip6_checksum.h>
++#include <net/ip6_route.h>
++#include <net/mptcp.h>
++#include <net/mptcp_v6.h>
++#include <net/tcp.h>
++#include <net/transp_v6.h>
++
++static int mptcp_v6v4_send_synack(struct sock *meta_sk, struct request_sock *req,
++ u16 queue_mapping);
++
++__u32 mptcp_v6_get_nonce(const __be32 *saddr, const __be32 *daddr,
++ __be16 sport, __be16 dport, u32 seq)
++{
++ u32 secret[MD5_MESSAGE_BYTES / 4];
++ u32 hash[MD5_DIGEST_WORDS];
++ u32 i;
++
++ memcpy(hash, saddr, 16);
++ for (i = 0; i < 4; i++)
++ secret[i] = mptcp_secret[i] + (__force u32)daddr[i];
++ secret[4] = mptcp_secret[4] +
++ (((__force u16)sport << 16) + (__force u16)dport);
++ secret[5] = seq;
++ for (i = 6; i < MD5_MESSAGE_BYTES / 4; i++)
++ secret[i] = mptcp_secret[i];
++
++ md5_transform(hash, secret);
++
++ return hash[0];
++}
++
++u64 mptcp_v6_get_key(const __be32 *saddr, const __be32 *daddr,
++ __be16 sport, __be16 dport)
++{
++ u32 secret[MD5_MESSAGE_BYTES / 4];
++ u32 hash[MD5_DIGEST_WORDS];
++ u32 i;
++
++ memcpy(hash, saddr, 16);
++ for (i = 0; i < 4; i++)
++ secret[i] = mptcp_secret[i] + (__force u32)daddr[i];
++ secret[4] = mptcp_secret[4] +
++ (((__force u16)sport << 16) + (__force u16)dport);
++ secret[5] = mptcp_key_seed++;
++ for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
++ secret[i] = mptcp_secret[i];
++
++ md5_transform(hash, secret);
++
++ return *((u64 *)hash);
++}
++
++static void mptcp_v6_reqsk_destructor(struct request_sock *req)
++{
++ mptcp_reqsk_destructor(req);
++
++ tcp_v6_reqsk_destructor(req);
++}
++
++/* Similar to tcp_v6_rtx_synack */
++static int mptcp_v6_rtx_synack(struct sock *meta_sk, struct request_sock *req)
++{
++ if (meta_sk->sk_family == AF_INET6)
++ return tcp_v6_rtx_synack(meta_sk, req);
++
++ TCP_INC_STATS_BH(sock_net(meta_sk), TCP_MIB_RETRANSSEGS);
++ return mptcp_v6v4_send_synack(meta_sk, req, 0);
++}
++
++/* Similar to tcp6_request_sock_ops */
++struct request_sock_ops mptcp6_request_sock_ops __read_mostly = {
++ .family = AF_INET6,
++ .obj_size = sizeof(struct mptcp_request_sock),
++ .rtx_syn_ack = mptcp_v6_rtx_synack,
++ .send_ack = tcp_v6_reqsk_send_ack,
++ .destructor = mptcp_v6_reqsk_destructor,
++ .send_reset = tcp_v6_send_reset,
++ .syn_ack_timeout = tcp_syn_ack_timeout,
++};
++
++static void mptcp_v6_reqsk_queue_hash_add(struct sock *meta_sk,
++ struct request_sock *req,
++ unsigned long timeout)
++{
++ const u32 h1 = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr,
++ inet_rsk(req)->ir_rmt_port,
++ 0, MPTCP_HASH_SIZE);
++ /* We cannot call inet6_csk_reqsk_queue_hash_add(), because we do not
++ * want to reset the keepalive-timer (responsible for retransmitting
++ * SYN/ACKs). We do not retransmit SYN/ACKs+MP_JOINs, because we cannot
++ * overload the keepalive timer. Also, it's not a big deal, because the
++ * third ACK of the MP_JOIN-handshake is sent in a reliable manner. So,
++ * if the third ACK gets lost, the client will handle the retransmission
++ * anyways. If our SYN/ACK gets lost, the client will retransmit the
++ * SYN.
++ */
++ struct inet_connection_sock *meta_icsk = inet_csk(meta_sk);
++ struct listen_sock *lopt = meta_icsk->icsk_accept_queue.listen_opt;
++ const u32 h2 = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr,
++ inet_rsk(req)->ir_rmt_port,
++ lopt->hash_rnd, lopt->nr_table_entries);
++
++ reqsk_queue_hash_req(&meta_icsk->icsk_accept_queue, h2, req, timeout);
++ reqsk_queue_added(&meta_icsk->icsk_accept_queue);
++
++ spin_lock(&mptcp_reqsk_hlock);
++ list_add(&mptcp_rsk(req)->collide_tuple, &mptcp_reqsk_htb[h1]);
++ spin_unlock(&mptcp_reqsk_hlock);
++}
++
++/* Similar to tcp_v6_send_synack
++ *
++ * The meta-socket is IPv4, but a new subsocket is IPv6
++ */
++static int mptcp_v6v4_send_synack(struct sock *meta_sk, struct request_sock *req,
++ u16 queue_mapping)
++{
++ struct inet_request_sock *treq = inet_rsk(req);
++ struct sk_buff *skb;
++ struct flowi6 fl6;
++ struct dst_entry *dst;
++ int err = -ENOMEM;
++
++ memset(&fl6, 0, sizeof(fl6));
++ fl6.flowi6_proto = IPPROTO_TCP;
++ fl6.daddr = treq->ir_v6_rmt_addr;
++ fl6.saddr = treq->ir_v6_loc_addr;
++ fl6.flowlabel = 0;
++ fl6.flowi6_oif = treq->ir_iif;
++ fl6.flowi6_mark = meta_sk->sk_mark;
++ fl6.fl6_dport = inet_rsk(req)->ir_rmt_port;
++ fl6.fl6_sport = htons(inet_rsk(req)->ir_num);
++ security_req_classify_flow(req, flowi6_to_flowi(&fl6));
++
++ dst = ip6_dst_lookup_flow(meta_sk, &fl6, NULL);
++ if (IS_ERR(dst)) {
++ err = PTR_ERR(dst);
++ return err;
++ }
++ skb = tcp_make_synack(meta_sk, dst, req, NULL);
++
++ if (skb) {
++ __tcp_v6_send_check(skb, &treq->ir_v6_loc_addr,
++ &treq->ir_v6_rmt_addr);
++
++ fl6.daddr = treq->ir_v6_rmt_addr;
++ skb_set_queue_mapping(skb, queue_mapping);
++ err = ip6_xmit(meta_sk, skb, &fl6, NULL, 0);
++ err = net_xmit_eval(err);
++ }
++
++ return err;
++}
++
++/* Similar to tcp_v6_syn_recv_sock
++ *
++ * The meta-socket is IPv4, but a new subsocket is IPv6
++ */
++struct sock *mptcp_v6v4_syn_recv_sock(struct sock *meta_sk, struct sk_buff *skb,
++ struct request_sock *req,
++ struct dst_entry *dst)
++{
++ struct inet_request_sock *treq;
++ struct ipv6_pinfo *newnp;
++ struct tcp6_sock *newtcp6sk;
++ struct inet_sock *newinet;
++ struct tcp_sock *newtp;
++ struct sock *newsk;
++
++ treq = inet_rsk(req);
++
++ if (sk_acceptq_is_full(meta_sk))
++ goto out_overflow;
++
++ if (!dst) {
++ /* This code is similar to inet6_csk_route_req, but as we
++ * don't have a np-pointer in the meta, we have to do it
++ * manually.
++ */
++ struct flowi6 fl6;
++
++ memset(&fl6, 0, sizeof(fl6));
++ fl6.flowi6_proto = IPPROTO_TCP;
++ fl6.daddr = treq->ir_v6_rmt_addr;
++ fl6.saddr = treq->ir_v6_loc_addr;
++ fl6.flowi6_oif = treq->ir_iif;
++ fl6.flowi6_mark = meta_sk->sk_mark;
++ fl6.fl6_dport = inet_rsk(req)->ir_rmt_port;
++ fl6.fl6_sport = htons(inet_rsk(req)->ir_num);
++ security_req_classify_flow(req, flowi6_to_flowi(&fl6));
++
++ dst = ip6_dst_lookup_flow(meta_sk, &fl6, NULL);
++ if (IS_ERR(dst))
++ goto out;
++ }
++
++ newsk = tcp_create_openreq_child(meta_sk, req, skb);
++ if (newsk == NULL)
++ goto out_nonewsk;
++
++ /* Diff to tcp_v6_syn_recv_sock: Must do this prior to __ip6_dst_store,
++ * as it tries to access the pinet6-pointer.
++ */
++ newtcp6sk = (struct tcp6_sock *)newsk;
++ inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
++
++ /*
++ * No need to charge this sock to the relevant IPv6 refcnt debug socks
++ * count here, tcp_create_openreq_child now does this for us, see the
++ * comment in that function for the gory details. -acme
++ */
++
++ newsk->sk_gso_type = SKB_GSO_TCPV6;
++ __ip6_dst_store(newsk, dst, NULL, NULL);
++ inet6_sk_rx_dst_set(newsk, skb);
++
++ newtp = tcp_sk(newsk);
++ newinet = inet_sk(newsk);
++ newnp = inet6_sk(newsk);
++
++ newsk->sk_v6_daddr = treq->ir_v6_rmt_addr;
++ newnp->saddr = treq->ir_v6_loc_addr;
++ newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr;
++ newsk->sk_bound_dev_if = treq->ir_iif;
++
++ /* Now IPv6 options...
++
++ First: no IPv4 options.
++ */
++ newinet->inet_opt = NULL;
++ newnp->ipv6_ac_list = NULL;
++ newnp->ipv6_fl_list = NULL;
++ newnp->rxopt.all = 0;
++
++ /* Clone pktoptions received with SYN */
++ newnp->pktoptions = NULL;
++ if (treq->pktopts != NULL) {
++ newnp->pktoptions = skb_clone(treq->pktopts,
++ sk_gfp_atomic(meta_sk, GFP_ATOMIC));
++ consume_skb(treq->pktopts);
++ treq->pktopts = NULL;
++ if (newnp->pktoptions)
++ skb_set_owner_r(newnp->pktoptions, newsk);
++ }
++ newnp->opt = NULL;
++ newnp->mcast_oif = inet6_iif(skb);
++ newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
++ newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
++
++ /* Initialization copied from inet6_create - normally this should have
++ * been handled by the memcpy as in tcp_v6_syn_recv_sock
++ */
++ newnp->hop_limit = -1;
++ newnp->mc_loop = 1;
++ newnp->pmtudisc = IPV6_PMTUDISC_WANT;
++ (void)xchg(&newnp->rxpmtu, NULL);
++
++ inet_csk(newsk)->icsk_ext_hdr_len = 0;
++
++ tcp_mtup_init(newsk);
++ tcp_sync_mss(newsk, dst_mtu(dst));
++ newtp->advmss = dst_metric_advmss(dst);
++ if (tcp_sk(meta_sk)->rx_opt.user_mss &&
++ tcp_sk(meta_sk)->rx_opt.user_mss < newtp->advmss)
++ newtp->advmss = tcp_sk(meta_sk)->rx_opt.user_mss;
++
++ tcp_initialize_rcv_mss(newsk);
++
++ newinet->inet_daddr = LOOPBACK4_IPV6;
++ newinet->inet_saddr = LOOPBACK4_IPV6;
++ newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
++
++ if (__inet_inherit_port(meta_sk, newsk) < 0) {
++ inet_csk_prepare_forced_close(newsk);
++ tcp_done(newsk);
++ goto out;
++ }
++ __inet6_hash(newsk, NULL);
++
++ return newsk;
++
++out_overflow:
++ NET_INC_STATS_BH(sock_net(meta_sk), LINUX_MIB_LISTENOVERFLOWS);
++out_nonewsk:
++ dst_release(dst);
++out:
++ NET_INC_STATS_BH(sock_net(meta_sk), LINUX_MIB_LISTENDROPS);
++ return NULL;
++}
++
++/* Similar to tcp_v6_conn_request */
++static void mptcp_v6_join_request(struct sock *meta_sk, struct sk_buff *skb)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct tcp_options_received tmp_opt;
++ struct mptcp_options_received mopt;
++ struct ipv6_pinfo *np = inet6_sk(meta_sk);
++ struct request_sock *req;
++ struct inet_request_sock *treq;
++ struct mptcp_request_sock *mtreq;
++ u8 mptcp_hash_mac[20];
++ __u32 isn = TCP_SKB_CB(skb)->when;
++ struct dst_entry *dst = NULL;
++ struct flowi6 fl6;
++ int want_cookie = 0;
++ union inet_addr addr;
++
++ tcp_clear_options(&tmp_opt);
++ mptcp_init_mp_opt(&mopt);
++ tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
++ tmp_opt.user_mss = tcp_sk(meta_sk)->rx_opt.user_mss;
++ tcp_parse_options(skb, &tmp_opt, &mopt, 0, NULL);
++
++ req = inet6_reqsk_alloc(&mptcp6_request_sock_ops);
++ if (!req)
++ return;
++
++#ifdef CONFIG_TCP_MD5SIG
++ tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
++#endif
++
++ tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
++ tcp_openreq_init(req, &tmp_opt, skb);
++
++ treq = inet_rsk(req);
++ treq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
++ treq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
++
++ if (!want_cookie || tmp_opt.tstamp_ok)
++ TCP_ECN_create_request(req, skb, sock_net(meta_sk));
++
++ treq->ir_iif = meta_sk->sk_bound_dev_if;
++
++ /* So that link locals have meaning */
++ if (!meta_sk->sk_bound_dev_if &&
++ ipv6_addr_type(&treq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
++ treq->ir_iif = inet6_iif(skb);
++
++ if (!isn) {
++ if (meta_sk->sk_family == AF_INET6 &&
++ (ipv6_opt_accepted(meta_sk, skb) ||
++ np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
++ np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)) {
++ atomic_inc(&skb->users);
++ treq->pktopts = skb;
++ }
++
++ /* VJ's idea. We save last timestamp seen
++ * from the destination in peer table, when entering
++ * state TIME-WAIT, and check against it before
++ * accepting new connection request.
++ *
++ * If "isn" is not zero, this request hit alive
++ * timewait bucket, so that all the necessary checks
++ * are made in the function processing timewait state.
++ */
++ if (tmp_opt.saw_tstamp &&
++ tcp_death_row.sysctl_tw_recycle &&
++ (dst = inet6_csk_route_req(meta_sk, &fl6, req)) != NULL) {
++ if (!tcp_peer_is_proven(req, dst, true)) {
++ NET_INC_STATS_BH(sock_net(meta_sk), LINUX_MIB_PAWSPASSIVEREJECTED);
++ goto drop_and_release;
++ }
++ }
++ /* Kill the following clause, if you dislike this way. */
++ else if (!sysctl_tcp_syncookies &&
++ (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(meta_sk) <
++ (sysctl_max_syn_backlog >> 2)) &&
++ !tcp_peer_is_proven(req, dst, false)) {
++ /* Without syncookies last quarter of
++ * backlog is filled with destinations,
++ * proven to be alive.
++ * It means that we continue to communicate
++ * to destinations, already remembered
++ * to the moment of synflood.
++ */
++ LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
++ &treq->ir_v6_rmt_addr,
++ ntohs(tcp_hdr(skb)->source));
++ goto drop_and_release;
++ }
++
++ isn = tcp_v6_init_sequence(skb);
++ }
++
++ tcp_rsk(req)->snt_isn = isn;
++ tcp_rsk(req)->snt_synack = tcp_time_stamp;
++ tcp_rsk(req)->listener = NULL;
++
++ mtreq = mptcp_rsk(req);
++ mtreq->mpcb = mpcb;
++ INIT_LIST_HEAD(&mtreq->collide_tuple);
++ mtreq->mptcp_rem_nonce = mopt.mptcp_recv_nonce;
++ mtreq->mptcp_rem_key = mpcb->mptcp_rem_key;
++ mtreq->mptcp_loc_key = mpcb->mptcp_loc_key;
++ mtreq->mptcp_loc_nonce = mptcp_v6_get_nonce(ipv6_hdr(skb)->daddr.s6_addr32,
++ ipv6_hdr(skb)->saddr.s6_addr32,
++ tcp_hdr(skb)->dest,
++ tcp_hdr(skb)->source, isn);
++ mptcp_hmac_sha1((u8 *)&mtreq->mptcp_loc_key,
++ (u8 *)&mtreq->mptcp_rem_key,
++ (u8 *)&mtreq->mptcp_loc_nonce,
++ (u8 *)&mtreq->mptcp_rem_nonce, (u32 *)mptcp_hash_mac);
++ mtreq->mptcp_hash_tmac = *(u64 *)mptcp_hash_mac;
++
++ addr.in6 = treq->ir_v6_loc_addr;
++ mtreq->loc_id = mpcb->pm_ops->get_local_id(AF_INET6, &addr, sock_net(meta_sk));
++ if (mtreq->loc_id == -1) /* Address not part of the allowed ones */
++ goto drop_and_release;
++ mtreq->rem_id = mopt.rem_id;
++ mtreq->low_prio = mopt.low_prio;
++ tcp_rsk(req)->saw_mpc = 1;
++
++ if (meta_sk->sk_family == AF_INET6) {
++ if (tcp_v6_send_synack(meta_sk, dst, &fl6, req,
++ skb_get_queue_mapping(skb)))
++ goto drop_and_free;
++ } else {
++ if (mptcp_v6v4_send_synack(meta_sk, req, skb_get_queue_mapping(skb)))
++ goto drop_and_free;
++ }
++
++ /* Adding to request queue in metasocket */
++ mptcp_v6_reqsk_queue_hash_add(meta_sk, req, TCP_TIMEOUT_INIT);
++
++ return;
++
++drop_and_release:
++ dst_release(dst);
++drop_and_free:
++ reqsk_free(req);
++ return;
++}
++
++int mptcp_v6_rem_raddress(struct mptcp_cb *mpcb, u8 id)
++{
++ int i;
++
++ for (i = 0; i < MPTCP_MAX_ADDR; i++) {
++ if (!((1 << i) & mpcb->rem6_bits))
++ continue;
++
++ if (mpcb->remaddr6[i].rem6_id == id) {
++ /* remove address from bitfield */
++ mpcb->rem6_bits &= ~(1 << i);
++
++ return 0;
++ }
++ }
++
++ return -1;
++}
++
++/* Returns -1 if there is no space anymore to store an additional
++ * address
++ */
++int mptcp_v6_add_raddress(struct mptcp_cb *mpcb, const struct in6_addr *addr,
++ __be16 port, u8 id)
++{
++ int i;
++ struct mptcp_rem6 *rem6;
++
++ mptcp_for_each_bit_set(mpcb->rem6_bits, i) {
++ rem6 = &mpcb->remaddr6[i];
++
++ /* Address is already in the list --- continue */
++ if (rem6->rem6_id == id &&
++ ipv6_addr_equal(&rem6->addr, addr) && rem6->port == port)
++ return 0;
++
++ /* This may be the case, when the peer is behind a NAT. He is
++ * trying to JOIN, thus sending the JOIN with a certain ID.
++ * However the src_addr of the IP-packet has been changed. We
++ * update the addr in the list, because this is the address as
++ * OUR BOX sees it.
++ */
++ if (rem6->rem6_id == id) {
++ /* update the address */
++ mptcp_debug("%s: updating old addr: %pI6 to addr %pI6 with id:%d\n",
++ __func__, &rem6->addr, addr, id);
++ rem6->addr = *addr;
++ rem6->port = port;
++ mpcb->list_rcvd = 1;
++ return 0;
++ }
++ }
++
++ i = mptcp_find_free_index(mpcb->rem6_bits);
++ /* Do we have already the maximum number of local/remote addresses? */
++ if (i < 0) {
++ mptcp_debug("%s: At max num of remote addresses: %d --- not adding address: %pI6\n",
++ __func__, MPTCP_MAX_ADDR, addr);
++ return -1;
++ }
++
++ rem6 = &mpcb->remaddr6[i];
++
++ /* Address is not known yet, store it */
++ rem6->addr = *addr;
++ rem6->port = port;
++ rem6->bitfield = 0;
++ rem6->retry_bitfield = 0;
++ rem6->rem6_id = id;
++ mpcb->list_rcvd = 1;
++ mpcb->rem6_bits |= (1 << i);
++
++ return 0;
++}
++
++/* Sets the bitfield of the remote-address field
++ * local address is not set as it will disappear with the global address-list
++ */
++void mptcp_v6_set_init_addr_bit(struct mptcp_cb *mpcb,
++ const struct in6_addr *daddr, int index)
++{
++ int i;
++ mptcp_for_each_bit_set(mpcb->rem6_bits, i) {
++ if (ipv6_addr_equal(&mpcb->remaddr6[i].addr, daddr)) {
++ mpcb->remaddr6[i].bitfield |= (1 << index);
++ return;
++ }
++ }
++}
++
++int mptcp_v6_do_rcv(struct sock *meta_sk, struct sk_buff *skb)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct sock *child, *rsk = NULL;
++ int ret;
++
++ if (!(TCP_SKB_CB(skb)->mptcp_flags & MPTCPHDR_JOIN)) {
++ struct tcphdr *th = tcp_hdr(skb);
++ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
++ struct sock *sk;
++
++ sk = __inet6_lookup_established(sock_net(meta_sk),
++ &tcp_hashinfo,
++ &ip6h->saddr, th->source,
++ &ip6h->daddr, ntohs(th->dest),
++ inet6_iif(skb));
++
++ if (!sk) {
++ kfree_skb(skb);
++ return 0;
++ }
++ if (is_meta_sk(sk)) {
++ WARN("%s Did not find a sub-sk!\n", __func__);
++ kfree_skb(skb);
++ sock_put(sk);
++ return 0;
++ }
++
++ if (sk->sk_state == TCP_TIME_WAIT) {
++ inet_twsk_put(inet_twsk(sk));
++ kfree_skb(skb);
++ return 0;
++ }
++
++ ret = tcp_v6_do_rcv(sk, skb);
++ sock_put(sk);
++
++ return ret;
++ }
++ TCP_SKB_CB(skb)->mptcp_flags = 0;
++
++ /* Has been removed from the tk-table. Thus, no new subflows.
++ *
++ * Check for close-state is necessary, because we may have been closed
++ * without passing by mptcp_close().
++ *
++ * When falling back, no new subflows are allowed either.
++ */
++ if (meta_sk->sk_state == TCP_CLOSE || !tcp_sk(meta_sk)->inside_tk_table ||
++ mpcb->infinite_mapping_rcv || mpcb->send_infinite_mapping)
++ goto reset_and_discard;
++
++ child = tcp_v6_hnd_req(meta_sk, skb);
++
++ if (!child)
++ goto discard;
++
++ if (child != meta_sk) {
++ sock_rps_save_rxhash(child, skb);
++ /* We don't call tcp_child_process here, because we hold
++ * already the meta-sk-lock and are sure that it is not owned
++ * by the user.
++ */
++ ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb), skb->len);
++ bh_unlock_sock(child);
++ sock_put(child);
++ if (ret) {
++ rsk = child;
++ goto reset_and_discard;
++ }
++ } else {
++ if (tcp_hdr(skb)->syn) {
++ struct mp_join *join_opt = mptcp_find_join(skb);
++ /* Currently we make two calls to mptcp_find_join(). This
++ * can probably be optimized. */
++ if (mptcp_v6_add_raddress(mpcb,
++ (struct in6_addr *)&ipv6_hdr(skb)->saddr,
++ 0,
++ join_opt->addr_id) < 0)
++ goto reset_and_discard;
++ mpcb->list_rcvd = 0;
++
++ mptcp_v6_join_request(meta_sk, skb);
++ goto discard;
++ }
++ goto reset_and_discard;
++ }
++ return 0;
++
++reset_and_discard:
++ tcp_v6_send_reset(rsk, skb);
++discard:
++ kfree_skb(skb);
++ return 0;
++}
++
++/* After this, the ref count of the meta_sk associated with the request_sock
++ * is incremented. Thus it is the responsibility of the caller
++ * to call sock_put() when the reference is not needed anymore.
++ */
++struct sock *mptcp_v6_search_req(const __be16 rport, const struct in6_addr *raddr,
++ const struct in6_addr *laddr, const struct net *net)
++{
++ struct mptcp_request_sock *mtreq;
++ struct sock *meta_sk = NULL;
++
++ spin_lock(&mptcp_reqsk_hlock);
++ list_for_each_entry(mtreq,
++ &mptcp_reqsk_htb[inet6_synq_hash(raddr, rport, 0,
++ MPTCP_HASH_SIZE)],
++ collide_tuple) {
++ struct inet_request_sock *treq = inet_rsk(rev_mptcp_rsk(mtreq));
++ meta_sk = mtreq->mpcb->meta_sk;
++
++ if (inet_rsk(rev_mptcp_rsk(mtreq))->ir_rmt_port == rport &&
++ rev_mptcp_rsk(mtreq)->rsk_ops->family == AF_INET6 &&
++ ipv6_addr_equal(&treq->ir_v6_rmt_addr, raddr) &&
++ ipv6_addr_equal(&treq->ir_v6_loc_addr, laddr) &&
++ net_eq(net, sock_net(meta_sk)))
++ break;
++ meta_sk = NULL;
++ }
++
++ if (meta_sk && unlikely(!atomic_inc_not_zero(&meta_sk->sk_refcnt)))
++ meta_sk = NULL;
++ spin_unlock(&mptcp_reqsk_hlock);
++
++ return meta_sk;
++}
++
++/* Create a new IPv6 subflow.
++ *
++ * We are in user-context and meta-sock-lock is hold.
++ */
++int mptcp_init6_subsockets(struct sock *meta_sk, const struct mptcp_loc6 *loc,
++ struct mptcp_rem6 *rem)
++{
++ struct tcp_sock *tp;
++ struct sock *sk;
++ struct sockaddr_in6 loc_in, rem_in;
++ struct socket sock;
++ int ulid_size = 0, ret;
++
++ /** First, create and prepare the new socket */
++
++ sock.type = meta_sk->sk_socket->type;
++ sock.state = SS_UNCONNECTED;
++ sock.wq = meta_sk->sk_socket->wq;
++ sock.file = meta_sk->sk_socket->file;
++ sock.ops = NULL;
++
++ ret = inet6_create(sock_net(meta_sk), &sock, IPPROTO_TCP, 1);
++ if (unlikely(ret < 0)) {
++ mptcp_debug("%s inet6_create failed ret: %d\n", __func__, ret);
++ return ret;
++ }
++
++ sk = sock.sk;
++ tp = tcp_sk(sk);
++
++ /* All subsockets need the MPTCP-lock-class */
++ lockdep_set_class_and_name(&(sk)->sk_lock.slock, &meta_slock_key, "slock-AF_INET-MPTCP");
++ lockdep_init_map(&(sk)->sk_lock.dep_map, "sk_lock-AF_INET-MPTCP", &meta_key, 0);
++
++ if (mptcp_add_sock(meta_sk, sk, loc->loc6_id, rem->rem6_id, GFP_KERNEL))
++ goto error;
++
++ tp->mptcp->slave_sk = 1;
++ tp->mptcp->low_prio = loc->low_prio;
++
++ /* Initializing the timer for an MPTCP subflow */
++ setup_timer(&tp->mptcp->mptcp_ack_timer, mptcp_ack_handler, (unsigned long)sk);
++
++ /** Then, connect the socket to the peer */
++
++ ulid_size = sizeof(struct sockaddr_in6);
++ loc_in.sin6_family = AF_INET6;
++ rem_in.sin6_family = AF_INET6;
++ loc_in.sin6_port = 0;
++ if (rem->port)
++ rem_in.sin6_port = rem->port;
++ else
++ rem_in.sin6_port = inet_sk(meta_sk)->inet_dport;
++ loc_in.sin6_addr = loc->addr;
++ rem_in.sin6_addr = rem->addr;
++
++ ret = sock.ops->bind(&sock, (struct sockaddr *)&loc_in, ulid_size);
++ if (ret < 0) {
++ mptcp_debug("%s: MPTCP subsocket bind()failed, error %d\n",
++ __func__, ret);
++ goto error;
++ }
++
++ mptcp_debug("%s: token %#x pi %d src_addr:%pI6:%d dst_addr:%pI6:%d\n",
++ __func__, tcp_sk(meta_sk)->mpcb->mptcp_loc_token,
++ tp->mptcp->path_index, &loc_in.sin6_addr,
++ ntohs(loc_in.sin6_port), &rem_in.sin6_addr,
++ ntohs(rem_in.sin6_port));
++
++ ret = sock.ops->connect(&sock, (struct sockaddr *)&rem_in,
++ ulid_size, O_NONBLOCK);
++ if (ret < 0 && ret != -EINPROGRESS) {
++ mptcp_debug("%s: MPTCP subsocket connect() failed, error %d\n",
++ __func__, ret);
++ goto error;
++ }
++
++ sk_set_socket(sk, meta_sk->sk_socket);
++ sk->sk_wq = meta_sk->sk_wq;
++
++ return 0;
++
++error:
++ /* May happen if mptcp_add_sock fails first */
++ if (!tp->mpc) {
++ tcp_close(sk, 0);
++ } else {
++ local_bh_disable();
++ mptcp_sub_force_close(sk);
++ local_bh_enable();
++ }
++ return ret;
++}
++EXPORT_SYMBOL(mptcp_init6_subsockets);
++
++int mptcp_pm_v6_init(void)
++{
++ int ret = 0;
++ struct request_sock_ops *ops = &mptcp6_request_sock_ops;
++
++ ops->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", "MPTCP6");
++ if (ops->slab_name == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ops->slab = kmem_cache_create(ops->slab_name, ops->obj_size, 0,
++ SLAB_DESTROY_BY_RCU|SLAB_HWCACHE_ALIGN,
++ NULL);
++
++ if (ops->slab == NULL) {
++ ret = -ENOMEM;
++ goto err_reqsk_create;
++ }
++
++out:
++ return ret;
++
++err_reqsk_create:
++ kfree(ops->slab_name);
++ ops->slab_name = NULL;
++ goto out;
++}
++
++void mptcp_pm_v6_undo(void)
++{
++ kmem_cache_destroy(mptcp6_request_sock_ops.slab);
++ kfree(mptcp6_request_sock_ops.slab_name);
++}
+diff --git a/net/mptcp/mptcp_ndiffports.c b/net/mptcp/mptcp_ndiffports.c
+new file mode 100644
+index 0000000..a126325
+--- /dev/null
++++ b/net/mptcp/mptcp_ndiffports.c
+@@ -0,0 +1,171 @@
++#include <linux/module.h>
++
++#include <net/mptcp.h>
++#include <net/mptcp_v4.h>
++
++#if IS_ENABLED(CONFIG_IPV6)
++#include <net/mptcp_v6.h>
++#endif
++
++struct ndiffports_priv {
++ /* Worker struct for subflow establishment */
++ struct work_struct subflow_work;
++
++ struct mptcp_cb *mpcb;
++};
++
++static int sysctl_mptcp_ndiffports __read_mostly = 2;
++
++/**
++ * Create all new subflows, by doing calls to mptcp_initX_subsockets
++ *
++ * This function uses a goto next_subflow, to allow releasing the lock between
++ * new subflows and giving other processes a chance to do some work on the
++ * socket and potentially finishing the communication.
++ **/
++static void create_subflow_worker(struct work_struct *work)
++{
++ struct ndiffports_priv *pm_priv = container_of(work,
++ struct ndiffports_priv,
++ subflow_work);
++ struct mptcp_cb *mpcb = pm_priv->mpcb;
++ struct sock *meta_sk = mpcb->meta_sk;
++ int iter = 0;
++
++next_subflow:
++ if (iter) {
++ release_sock(meta_sk);
++ mutex_unlock(&mpcb->mpcb_mutex);
++
++ yield();
++ }
++ mutex_lock(&mpcb->mpcb_mutex);
++ lock_sock_nested(meta_sk, SINGLE_DEPTH_NESTING);
++
++ iter++;
++
++ if (sock_flag(meta_sk, SOCK_DEAD))
++ goto exit;
++
++ if (mpcb->master_sk &&
++ !tcp_sk(mpcb->master_sk)->mptcp->fully_established)
++ goto exit;
++
++ if (sysctl_mptcp_ndiffports > iter &&
++ sysctl_mptcp_ndiffports > mpcb->cnt_subflows) {
++ if (meta_sk->sk_family == AF_INET ||
++ mptcp_v6_is_v4_mapped(meta_sk)) {
++ struct mptcp_loc4 loc;
++
++ loc.addr.s_addr = inet_sk(meta_sk)->inet_saddr;
++ loc.loc4_id = 0;
++ loc.low_prio = 0;
++
++ mptcp_init4_subsockets(meta_sk, &loc, &mpcb->remaddr4[0]);
++ } else {
++#if IS_ENABLED(CONFIG_IPV6)
++ struct mptcp_loc6 loc;
++
++ loc.addr = inet6_sk(meta_sk)->saddr;
++ loc.loc6_id = 0;
++ loc.low_prio = 0;
++
++ mptcp_init6_subsockets(meta_sk, &loc, &mpcb->remaddr6[0]);
++#endif
++ }
++ goto next_subflow;
++ }
++
++exit:
++ release_sock(meta_sk);
++ mutex_unlock(&mpcb->mpcb_mutex);
++ sock_put(meta_sk);
++}
++
++static void ndiffports_new_session(struct sock *meta_sk, int index)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct ndiffports_priv *fmp = (struct ndiffports_priv *)&mpcb->mptcp_pm[0];
++
++ /* Initialize workqueue-struct */
++ INIT_WORK(&fmp->subflow_work, create_subflow_worker);
++ fmp->mpcb = mpcb;
++}
++
++static void ndiffports_create_subflows(struct sock *meta_sk)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct ndiffports_priv *pm_priv = (struct ndiffports_priv *)&mpcb->mptcp_pm[0];
++
++ if (mpcb->infinite_mapping_snd || mpcb->infinite_mapping_rcv ||
++ mpcb->send_infinite_mapping ||
++ mpcb->server_side || sock_flag(meta_sk, SOCK_DEAD))
++ return;
++
++ if (!work_pending(&pm_priv->subflow_work)) {
++ sock_hold(meta_sk);
++ queue_work(mptcp_wq, &pm_priv->subflow_work);
++ }
++}
++
++static int ndiffports_get_local_index(sa_family_t family, union inet_addr *addr,
++ struct net *net)
++{
++ return 0;
++}
++
++static struct mptcp_pm_ops ndiffports __read_mostly = {
++ .new_session = ndiffports_new_session,
++ .fully_established = ndiffports_create_subflows,
++ .get_local_index = ndiffports_get_local_index,
++ .get_local_id = ndiffports_get_local_index,
++ .name = "ndiffports",
++ .owner = THIS_MODULE,
++};
++
++static struct ctl_table ndiff_table[] = {
++ {
++ .procname = "mptcp_ndiffports",
++ .data = &sysctl_mptcp_ndiffports,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++ { }
++};
++
++struct ctl_table_header *mptcp_sysctl;
++
++/* General initialization of MPTCP_PM */
++static int __init ndiffports_register(void)
++{
++ BUILD_BUG_ON(sizeof(struct ndiffports_priv) > MPTCP_PM_SIZE);
++
++ mptcp_sysctl = register_net_sysctl(&init_net, "net/mptcp", ndiff_table);
++ if (!mptcp_sysctl)
++ goto exit;
++
++ if (mptcp_register_path_manager(&ndiffports))
++ goto pm_failed;
++
++ return 0;
++
++pm_failed:
++ unregister_net_sysctl_table(mptcp_sysctl);
++exit:
++ return -1;
++}
++
++static void ndiffports_unregister(void)
++{
++ mptcp_unregister_path_manager(&ndiffports);
++ unregister_net_sysctl_table(mptcp_sysctl);
++}
++
++module_init(ndiffports_register);
++module_exit(ndiffports_unregister);
++
++MODULE_AUTHOR("Christoph Paasch");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("NDIFF-PORTS MPTCP");
++MODULE_VERSION("0.88");
+diff --git a/net/mptcp/mptcp_ofo_queue.c b/net/mptcp/mptcp_ofo_queue.c
+new file mode 100644
+index 0000000..e182855
+--- /dev/null
++++ b/net/mptcp/mptcp_ofo_queue.c
+@@ -0,0 +1,278 @@
++/*
++ * MPTCP implementation - Fast algorithm for MPTCP meta-reordering
++ *
++ * Initial Design & Implementation:
++ * Sébastien Barré <sebastien.barre@uclouvain.be>
++ *
++ * Current Maintainer & Author:
++ * Christoph Paasch <christoph.paasch@uclouvain.be>
++ *
++ * Additional authors:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ * Gregory Detal <gregory.detal@uclouvain.be>
++ * Fabien Duchêne <fabien.duchene@uclouvain.be>
++ * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de>
++ * Lavkesh Lahngir <lavkesh51@gmail.com>
++ * Andreas Ripke <ripke@neclab.eu>
++ * Vlad Dogaru <vlad.dogaru@intel.com>
++ * Octavian Purdila <octavian.purdila@intel.com>
++ * John Ronan <jronan@tssg.org>
++ * Catalin Nicutar <catalin.nicutar@gmail.com>
++ * Brandon Heller <brandonh@stanford.edu>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/skbuff.h>
++#include <linux/slab.h>
++#include <net/tcp.h>
++#include <net/mptcp.h>
++
++void mptcp_remove_shortcuts(const struct mptcp_cb *mpcb,
++ const struct sk_buff *skb)
++{
++ struct tcp_sock *tp;
++
++ mptcp_for_each_tp(mpcb, tp) {
++ if (tp->mptcp->shortcut_ofoqueue == skb) {
++ tp->mptcp->shortcut_ofoqueue = NULL;
++ return;
++ }
++ }
++}
++
++/* Does 'skb' fits after 'here' in the queue 'head' ?
++ * If yes, we queue it and return 1
++ */
++static int mptcp_ofo_queue_after(struct sk_buff_head *head,
++ struct sk_buff *skb, struct sk_buff *here,
++ struct tcp_sock *tp)
++{
++ struct sock *meta_sk = tp->meta_sk;
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ u32 seq = TCP_SKB_CB(skb)->seq;
++ u32 end_seq = TCP_SKB_CB(skb)->end_seq;
++
++ /* We want to queue skb after here, thus seq >= end_seq */
++ if (before(seq, TCP_SKB_CB(here)->end_seq))
++ return 0;
++
++ if (seq == TCP_SKB_CB(here)->end_seq) {
++ bool fragstolen = false;
++
++ if (!tcp_try_coalesce(meta_sk, here, skb, &fragstolen)) {
++ __skb_queue_after(&meta_tp->out_of_order_queue, here, skb);
++ return 1;
++ } else {
++ kfree_skb_partial(skb, fragstolen);
++ return -1;
++ }
++ }
++
++ /* If here is the last one, we can always queue it */
++ if (skb_queue_is_last(head, here)) {
++ __skb_queue_after(head, here, skb);
++ return 1;
++ } else {
++ struct sk_buff *skb1 = skb_queue_next(head, here);
++ /* It's not the last one, but does it fits between 'here' and
++ * the one after 'here' ? Thus, does end_seq <= after_here->seq
++ */
++ if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) {
++ __skb_queue_after(head, here, skb);
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++static void try_shortcut(struct sk_buff *shortcut, struct sk_buff *skb,
++ struct sk_buff_head *head, struct tcp_sock *tp)
++{
++ struct sock *meta_sk = tp->meta_sk;
++ struct tcp_sock *tp_it, *meta_tp = tcp_sk(meta_sk);
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ struct sk_buff *skb1, *best_shortcut = NULL;
++ u32 seq = TCP_SKB_CB(skb)->seq;
++ u32 end_seq = TCP_SKB_CB(skb)->end_seq;
++ u32 distance = 0xffffffff;
++
++ /* First, check the tp's shortcut */
++ if (!shortcut) {
++ if (skb_queue_empty(head)) {
++ __skb_queue_head(head, skb);
++ goto end;
++ }
++ } else {
++ int ret = mptcp_ofo_queue_after(head, skb, shortcut, tp);
++ /* Does the tp's shortcut is a hit? If yes, we insert. */
++
++ if (ret) {
++ skb = (ret > 0) ? skb : NULL;
++ goto end;
++ }
++ }
++
++ /* Check the shortcuts of the other subsockets. */
++ mptcp_for_each_tp(mpcb, tp_it) {
++ shortcut = tp_it->mptcp->shortcut_ofoqueue;
++ /* Can we queue it here? If yes, do so! */
++ if (shortcut) {
++ int ret = mptcp_ofo_queue_after(head, skb, shortcut, tp);
++
++ if (ret) {
++ skb = (ret > 0) ? skb : NULL;
++ goto end;
++ }
++ }
++
++ /* Could not queue it, check if we are close.
++ * We are looking for a shortcut, close enough to seq to
++ * set skb1 prematurely and thus improve the subsequent lookup,
++ * which tries to find a skb1 so that skb1->seq <= seq.
++ *
++ * So, here we only take shortcuts, whose shortcut->seq > seq,
++ * and minimize the distance between shortcut->seq and seq and
++ * set best_shortcut to this one with the minimal distance.
++ *
++ * That way, the subsequent while-loop is shortest.
++ */
++ if (shortcut && after(TCP_SKB_CB(shortcut)->seq, seq)) {
++ /* Are we closer than the current best shortcut? */
++ if ((u32)(TCP_SKB_CB(shortcut)->seq - seq) < distance) {
++ distance = (u32)(TCP_SKB_CB(shortcut)->seq - seq);
++ best_shortcut = shortcut;
++ }
++ }
++ }
++
++ if (best_shortcut)
++ skb1 = best_shortcut;
++ else
++ skb1 = skb_peek_tail(head);
++
++ if (seq == TCP_SKB_CB(skb1)->end_seq) {
++ bool fragstolen = false;
++
++ if (!tcp_try_coalesce(meta_sk, skb1, skb, &fragstolen)) {
++ __skb_queue_after(&meta_tp->out_of_order_queue, skb1, skb);
++ } else {
++ kfree_skb_partial(skb, fragstolen);
++ skb = NULL;
++ }
++
++ goto end;
++ }
++
++ /* Find the insertion point, starting from best_shortcut if available.
++ *
++ * Inspired from tcp_data_queue_ofo.
++ */
++ while (1) {
++ /* skb1->seq <= seq */
++ if (!after(TCP_SKB_CB(skb1)->seq, seq))
++ break;
++ if (skb_queue_is_first(head, skb1)) {
++ skb1 = NULL;
++ break;
++ }
++ skb1 = skb_queue_prev(head, skb1);
++ }
++
++ /* Do skb overlap to previous one? */
++ if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
++ if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
++ /* All the bits are present. */
++ __kfree_skb(skb);
++ skb = NULL;
++ goto end;
++ }
++ if (seq == TCP_SKB_CB(skb1)->seq) {
++ if (skb_queue_is_first(head, skb1))
++ skb1 = NULL;
++ else
++ skb1 = skb_queue_prev(head, skb1);
++ }
++ }
++ if (!skb1)
++ __skb_queue_head(head, skb);
++ else
++ __skb_queue_after(head, skb1, skb);
++
++ /* And clean segments covered by new one as whole. */
++ while (!skb_queue_is_last(head, skb)) {
++ skb1 = skb_queue_next(head, skb);
++
++ if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
++ break;
++
++ __skb_unlink(skb1, head);
++ mptcp_remove_shortcuts(mpcb, skb1);
++ __kfree_skb(skb1);
++ }
++
++end:
++ if (skb) {
++ skb_set_owner_r(skb, meta_sk);
++ tp->mptcp->shortcut_ofoqueue = skb;
++ }
++
++ return;
++}
++
++/**
++ * @sk: the subflow that received this skb.
++ */
++void mptcp_add_meta_ofo_queue(struct sock *meta_sk, struct sk_buff *skb,
++ struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ try_shortcut(tp->mptcp->shortcut_ofoqueue, skb,
++ &tcp_sk(meta_sk)->out_of_order_queue, tp);
++}
++
++void mptcp_ofo_queue(struct sock *meta_sk)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct sk_buff *skb;
++
++ while ((skb = skb_peek(&meta_tp->out_of_order_queue)) != NULL) {
++ u32 old_rcv_nxt = meta_tp->rcv_nxt;
++ if (after(TCP_SKB_CB(skb)->seq, meta_tp->rcv_nxt))
++ break;
++
++ if (!after(TCP_SKB_CB(skb)->end_seq, meta_tp->rcv_nxt)) {
++ __skb_unlink(skb, &meta_tp->out_of_order_queue);
++ mptcp_remove_shortcuts(meta_tp->mpcb, skb);
++ __kfree_skb(skb);
++ continue;
++ }
++
++ __skb_unlink(skb, &meta_tp->out_of_order_queue);
++ mptcp_remove_shortcuts(meta_tp->mpcb, skb);
++
++ __skb_queue_tail(&meta_sk->sk_receive_queue, skb);
++ meta_tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
++ mptcp_check_rcvseq_wrap(meta_tp, old_rcv_nxt);
++
++ if (tcp_hdr(skb)->fin)
++ mptcp_fin(meta_sk);
++ }
++}
++
++void mptcp_purge_ofo_queue(struct tcp_sock *meta_tp)
++{
++ struct sk_buff_head *head = &meta_tp->out_of_order_queue;
++ struct sk_buff *skb, *tmp;
++
++ skb_queue_walk_safe(head, skb, tmp) {
++ __skb_unlink(skb, head);
++ mptcp_remove_shortcuts(meta_tp->mpcb, skb);
++ kfree_skb(skb);
++ }
++}
+diff --git a/net/mptcp/mptcp_olia.c b/net/mptcp/mptcp_olia.c
+new file mode 100644
+index 0000000..43d821e
+--- /dev/null
++++ b/net/mptcp/mptcp_olia.c
+@@ -0,0 +1,314 @@
++/*
++ * MPTCP implementation - OPPORTUNISTIC LINKED INCREASES CONGESTION CONTROL:
++ *
++ * Algorithm design:
++ * Ramin Khalili <ramin.khalili@epfl.ch>
++ * Nicolas Gast <nicolas.gast@epfl.ch>
++ * Jean-Yves Le Boudec <jean-yves.leboudec@epfl.ch>
++ *
++ * Implementation:
++ * Ramin Khalili <ramin.khalili@epfl.ch>
++ *
++ * Ported to the official MPTCP-kernel:
++ * Christoph Paasch <christoph.paasch@uclouvain.be>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++
++#include <net/tcp.h>
++#include <net/mptcp.h>
++
++#include <linux/module.h>
++
++static int scale = 10;
++
++struct mptcp_olia {
++ u32 mptcp_loss1;
++ u32 mptcp_loss2;
++ u32 mptcp_loss3;
++ int epsilon_num;
++ u32 epsilon_den;
++ int mptcp_snd_cwnd_cnt;
++};
++
++static inline int mptcp_olia_sk_can_send(const struct sock *sk)
++{
++ return mptcp_sk_can_send(sk) && tcp_sk(sk)->srtt;
++}
++
++static inline u64 mptcp_olia_scale(u64 val, int scale)
++{
++ return (u64) val << scale;
++}
++
++/* take care of artificially inflate (see RFC5681)
++ * of cwnd during fast-retransmit phase
++ */
++static u32 mptcp_get_crt_cwnd(struct sock *sk)
++{
++ struct inet_connection_sock *icsk = inet_csk(sk);
++
++ if (icsk->icsk_ca_state == TCP_CA_Recovery)
++ return tcp_sk(sk)->snd_ssthresh;
++ else
++ return tcp_sk(sk)->snd_cwnd;
++}
++
++/* return the dominator of the first term of the increasing term */
++static u64 mptcp_get_rate(struct mptcp_cb *mpcb , u32 path_rtt)
++{
++ struct sock *sk;
++ u64 rate = 1; /* We have to avoid a zero-rate because it is used as a divisor */
++
++ mptcp_for_each_sk(mpcb, sk) {
++ struct tcp_sock *tp = tcp_sk(sk);
++ u64 scaled_num;
++ u32 tmp_cwnd;
++
++ if (!mptcp_olia_sk_can_send(sk))
++ continue;
++
++ tmp_cwnd = mptcp_get_crt_cwnd(sk);
++ scaled_num = mptcp_olia_scale(tmp_cwnd, scale) * path_rtt;
++ rate += div_u64(scaled_num , tp->srtt);
++ }
++ rate *= rate;
++ return rate;
++}
++
++/* find the maximum cwnd, used to find set M */
++static u32 mptcp_get_max_cwnd(struct mptcp_cb *mpcb)
++{
++ struct sock *sk;
++ u32 best_cwnd = 0;
++
++ mptcp_for_each_sk(mpcb, sk) {
++ u32 tmp_cwnd;
++
++ if (!mptcp_olia_sk_can_send(sk))
++ continue;
++
++ tmp_cwnd = mptcp_get_crt_cwnd(sk);
++ if (tmp_cwnd > best_cwnd)
++ best_cwnd = tmp_cwnd;
++ }
++ return best_cwnd;
++}
++
++static void mptcp_get_epsilon(struct mptcp_cb *mpcb)
++{
++ struct mptcp_olia *ca;
++ struct tcp_sock *tp;
++ struct sock *sk;
++ u64 tmp_int, tmp_rtt, best_int = 0, best_rtt = 1;
++ u32 max_cwnd = 1, best_cwnd = 1, tmp_cwnd;
++ u8 M = 0, B_not_M = 0;
++
++ /* TODO - integrate this in the following loop - we just want to iterate once */
++
++ max_cwnd = mptcp_get_max_cwnd(mpcb);
++
++ /* find the best path */
++ mptcp_for_each_sk(mpcb, sk) {
++ tp = tcp_sk(sk);
++ ca = inet_csk_ca(sk);
++
++ if (!mptcp_olia_sk_can_send(sk))
++ continue;
++
++ tmp_rtt = tp->srtt * tp->srtt;
++ /* TODO - check here and rename variables */
++ tmp_int = max(ca->mptcp_loss3 - ca->mptcp_loss2,
++ ca->mptcp_loss2 - ca->mptcp_loss1);
++
++ tmp_cwnd = mptcp_get_crt_cwnd(sk);
++ if (tmp_int * best_rtt >= best_int * tmp_rtt) {
++ best_rtt = tmp_rtt;
++ best_int = tmp_int;
++ best_cwnd = tmp_cwnd;
++ }
++ }
++
++ /* TODO - integrate this here in mptcp_get_max_cwnd and in the previous loop */
++ /* find the size of M and B_not_M */
++ mptcp_for_each_sk(mpcb, sk) {
++ tp = tcp_sk(sk);
++ ca = inet_csk_ca(sk);
++
++ if (!mptcp_olia_sk_can_send(sk))
++ continue;
++
++ tmp_cwnd = mptcp_get_crt_cwnd(sk);
++ if (tmp_cwnd == max_cwnd) {
++ M++;
++ } else {
++ tmp_rtt = tp->srtt * tp->srtt;
++ tmp_int = max(ca->mptcp_loss3 - ca->mptcp_loss2,
++ ca->mptcp_loss2 - ca->mptcp_loss1);
++
++ if (tmp_int * best_rtt == best_int * tmp_rtt)
++ B_not_M++;
++ }
++ }
++
++ /* check if the path is in M or B_not_M and set the value of epsilon accordingly */
++ mptcp_for_each_sk(mpcb, sk) {
++ tp = tcp_sk(sk);
++ ca = inet_csk_ca(sk);
++
++ if (!mptcp_olia_sk_can_send(sk))
++ continue;
++
++ if (B_not_M == 0) {
++ ca->epsilon_num = 0;
++ ca->epsilon_den = 1;
++ } else {
++ tmp_rtt = tp->srtt * tp->srtt;
++ tmp_int = max(ca->mptcp_loss3 - ca->mptcp_loss2,
++ ca->mptcp_loss2 - ca->mptcp_loss1);
++ tmp_cwnd = mptcp_get_crt_cwnd(sk);
++
++ if (tmp_cwnd < max_cwnd &&
++ tmp_int * best_rtt == best_int * tmp_rtt){
++ ca->epsilon_num = 1;
++ ca->epsilon_den = mpcb->cnt_established * B_not_M;
++ } else if (tmp_cwnd == max_cwnd) {
++ ca->epsilon_num = -1;
++ ca->epsilon_den = mpcb->cnt_established * M;
++ } else {
++ ca->epsilon_num = 0;
++ ca->epsilon_den = 1;
++ }
++ }
++ }
++
++}
++
++/* setting the initial values */
++static void mptcp_olia_init(struct sock *sk)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct mptcp_olia *ca = inet_csk_ca(sk);
++
++ if (tp->mpc) {
++ ca->mptcp_loss1 = tp->snd_una;
++ ca->mptcp_loss2 = tp->snd_una;
++ ca->mptcp_loss3 = tp->snd_una;
++ ca->mptcp_snd_cwnd_cnt = 0;
++ ca->epsilon_num = 0;
++ ca->epsilon_den = 1;
++ }
++}
++
++/* updating inter-loss distance and ssthresh */
++static void mptcp_olia_set_state(struct sock *sk, u8 new_state)
++{
++ if (!tcp_sk(sk)->mpc)
++ return;
++
++ if (new_state == TCP_CA_Loss ||
++ new_state == TCP_CA_Recovery || new_state == TCP_CA_CWR) {
++ struct mptcp_olia *ca = inet_csk_ca(sk);
++
++ if (ca->mptcp_loss3 != ca->mptcp_loss2 &&
++ !inet_csk(sk)->icsk_retransmits) {
++ ca->mptcp_loss1 = ca->mptcp_loss2;
++ ca->mptcp_loss2 = ca->mptcp_loss3;
++ }
++ }
++
++}
++
++/* main algorithm */
++static void mptcp_olia_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct mptcp_olia *ca = inet_csk_ca(sk);
++ struct mptcp_cb *mpcb = tp->mpcb;
++
++ u64 inc_num, inc_den, rate, cwnd_scaled;
++
++ if (!tp->mpc) {
++ tcp_reno_cong_avoid(sk, ack, acked, in_flight);
++ return;
++ }
++
++ ca->mptcp_loss3 = tp->snd_una;
++
++ if (!tcp_is_cwnd_limited(sk, in_flight))
++ return;
++
++ /* slow start if it is in the safe area */
++ if (tp->snd_cwnd <= tp->snd_ssthresh) {
++ tcp_slow_start(tp, acked);
++ return;
++ }
++
++ mptcp_get_epsilon(mpcb);
++ rate = mptcp_get_rate(mpcb, tp->srtt);
++ cwnd_scaled = mptcp_olia_scale(tp->snd_cwnd, scale);
++ inc_den = ca->epsilon_den * tp->snd_cwnd * rate ? : 1;
++
++ /* calculate the increasing term, scaling is used to reduce the rounding effect */
++ if (ca->epsilon_num == -1) {
++ if (ca->epsilon_den * cwnd_scaled * cwnd_scaled < rate) {
++ inc_num = rate - ca->epsilon_den *
++ cwnd_scaled * cwnd_scaled;
++ ca->mptcp_snd_cwnd_cnt -= div64_u64(
++ mptcp_olia_scale(inc_num , scale) , inc_den);
++ } else {
++ inc_num = ca->epsilon_den *
++ cwnd_scaled * cwnd_scaled - rate;
++ ca->mptcp_snd_cwnd_cnt += div64_u64(
++ mptcp_olia_scale(inc_num , scale) , inc_den);
++ }
++ } else {
++ inc_num = ca->epsilon_num * rate +
++ ca->epsilon_den * cwnd_scaled * cwnd_scaled;
++ ca->mptcp_snd_cwnd_cnt += div64_u64(
++ mptcp_olia_scale(inc_num , scale) , inc_den);
++ }
++
++
++ if (ca->mptcp_snd_cwnd_cnt >= (1 << scale) - 1) {
++ if (tp->snd_cwnd < tp->snd_cwnd_clamp)
++ tp->snd_cwnd++;
++ ca->mptcp_snd_cwnd_cnt = 0;
++ } else if (ca->mptcp_snd_cwnd_cnt <= 0 - (1 << scale) + 1) {
++ tp->snd_cwnd = max((int) 1 , (int) tp->snd_cwnd - 1);
++ ca->mptcp_snd_cwnd_cnt = 0;
++ }
++}
++
++static struct tcp_congestion_ops mptcp_olia = {
++ .init = mptcp_olia_init,
++ .ssthresh = tcp_reno_ssthresh,
++ .cong_avoid = mptcp_olia_cong_avoid,
++ .set_state = mptcp_olia_set_state,
++ .min_cwnd = tcp_reno_min_cwnd,
++ .owner = THIS_MODULE,
++ .name = "olia",
++};
++
++static int __init mptcp_olia_register(void)
++{
++ BUILD_BUG_ON(sizeof(struct mptcp_olia) > ICSK_CA_PRIV_SIZE);
++ return tcp_register_congestion_control(&mptcp_olia);
++}
++
++static void __exit mptcp_olia_unregister(void)
++{
++ tcp_unregister_congestion_control(&mptcp_olia);
++}
++
++module_init(mptcp_olia_register);
++module_exit(mptcp_olia_unregister);
++
++MODULE_AUTHOR("Ramin Khalili, Nicolas Gast, Jean-Yves Le Boudec");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("MPTCP COUPLED CONGESTION CONTROL");
++MODULE_VERSION("0.1");
+diff --git a/net/mptcp/mptcp_output.c b/net/mptcp/mptcp_output.c
+new file mode 100644
+index 0000000..807b79e
+--- /dev/null
++++ b/net/mptcp/mptcp_output.c
+@@ -0,0 +1,2255 @@
++/*
++ * MPTCP implementation - Sending side
++ *
++ * Initial Design & Implementation:
++ * Sébastien Barré <sebastien.barre@uclouvain.be>
++ *
++ * Current Maintainer & Author:
++ * Christoph Paasch <christoph.paasch@uclouvain.be>
++ *
++ * Additional authors:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ * Gregory Detal <gregory.detal@uclouvain.be>
++ * Fabien Duchêne <fabien.duchene@uclouvain.be>
++ * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de>
++ * Lavkesh Lahngir <lavkesh51@gmail.com>
++ * Andreas Ripke <ripke@neclab.eu>
++ * Vlad Dogaru <vlad.dogaru@intel.com>
++ * Octavian Purdila <octavian.purdila@intel.com>
++ * John Ronan <jronan@tssg.org>
++ * Catalin Nicutar <catalin.nicutar@gmail.com>
++ * Brandon Heller <brandonh@stanford.edu>
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/kconfig.h>
++#include <linux/skbuff.h>
++#include <linux/tcp.h>
++
++#include <net/mptcp.h>
++#include <net/mptcp_v4.h>
++#include <net/mptcp_v6.h>
++#include <net/sock.h>
++
++static inline int mptcp_pi_to_flag(int pi)
++{
++ return 1 << (pi - 1);
++}
++
++static inline int mptcp_sub_len_remove_addr(u16 bitfield)
++{
++ unsigned int c;
++ for (c = 0; bitfield; c++)
++ bitfield &= bitfield - 1;
++ return MPTCP_SUB_LEN_REMOVE_ADDR + c - 1;
++}
++
++int mptcp_sub_len_remove_addr_align(u16 bitfield)
++{
++ return ALIGN(mptcp_sub_len_remove_addr(bitfield), 4);
++}
++EXPORT_SYMBOL(mptcp_sub_len_remove_addr_align);
++
++/* If the sub-socket sk available to send the skb? */
++static int mptcp_is_available(struct sock *sk, struct sk_buff *skb,
++ unsigned int *mss)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ unsigned int mss_now;
++
++ /* Set of states for which we are allowed to send data */
++ if (!mptcp_sk_can_send(sk))
++ return 0;
++
++ /* We do not send data on this subflow unless it is
++ * fully established, i.e. the 4th ack has been received.
++ */
++ if (tp->mptcp->pre_established)
++ return 0;
++
++ if (tp->pf ||
++ (tp->mpcb->noneligible & mptcp_pi_to_flag(tp->mptcp->path_index)))
++ return 0;
++
++ if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) {
++ /* If SACK is disabled, and we got a loss, TCP does not exit
++ * the loss-state until something above high_seq has been acked.
++ * (see tcp_try_undo_recovery)
++ *
++ * high_seq is the snd_nxt at the moment of the RTO. As soon
++ * as we have an RTO, we won't push data on the subflow.
++ * Thus, snd_una can never go beyond high_seq.
++ */
++ if (!tcp_is_reno(tp))
++ return 0;
++ else if (tp->snd_una != tp->high_seq)
++ return 0;
++ }
++
++ if (!tp->mptcp->fully_established) {
++ /* Make sure that we send in-order data */
++ if (skb && tp->mptcp->second_packet &&
++ tp->mptcp->last_end_data_seq != TCP_SKB_CB(skb)->seq)
++ return 0;
++ }
++
++ if (!tcp_cwnd_test(tp, skb))
++ return 0;
++
++ mss_now = tcp_current_mss(sk);
++ /* Don't send on this subflow if we bypass the allowed send-window at
++ * the per-subflow level. Similar to tcp_snd_wnd_test, but manually
++ * calculated end_seq (because here at this point end_seq is still at
++ * the meta-level).
++ */
++ if (skb && after(tp->write_seq + min(skb->len, mss_now), tcp_wnd_end(tp)))
++ return 0;
++
++ if (mss)
++ *mss = mss_now;
++
++ return 1;
++}
++
++/* Are we not allowed to reinject this skb on tp? */
++static int mptcp_dont_reinject_skb(struct tcp_sock *tp, struct sk_buff *skb)
++{
++ /* If the skb has already been enqueued in this sk, try to find
++ * another one.
++ */
++ return skb &&
++ /* Has the skb already been enqueued into this subsocket? */
++ mptcp_pi_to_flag(tp->mptcp->path_index) & TCP_SKB_CB(skb)->path_mask;
++}
++
++/* This is the scheduler. This function decides on which flow to send
++ * a given MSS. If all subflows are found to be busy, NULL is returned
++ * The flow is selected based on the shortest RTT.
++ * If all paths have full cong windows, we simply return NULL.
++ *
++ * Additionally, this function is aware of the backup-subflows.
++ */
++static struct sock *get_available_subflow(struct sock *meta_sk,
++ struct sk_buff *skb,
++ unsigned int *mss_now)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct sock *sk, *bestsk = NULL, *lowpriosk = NULL, *backupsk = NULL;
++ unsigned int mss = 0, mss_lowprio = 0, mss_backup = 0;
++ u32 min_time_to_peer = 0xffffffff, lowprio_min_time_to_peer = 0xffffffff;
++ int cnt_backups = 0;
++
++ /* if there is only one subflow, bypass the scheduling function */
++ if (mpcb->cnt_subflows == 1) {
++ bestsk = (struct sock *)mpcb->connection_list;
++ if (!mptcp_is_available(bestsk, skb, mss_now))
++ bestsk = NULL;
++ return bestsk;
++ }
++
++ /* Answer data_fin on same subflow!!! */
++ if (meta_sk->sk_shutdown & RCV_SHUTDOWN &&
++ skb && mptcp_is_data_fin(skb)) {
++ mptcp_for_each_sk(mpcb, sk) {
++ if (tcp_sk(sk)->mptcp->path_index == mpcb->dfin_path_index &&
++ mptcp_is_available(sk, skb, mss_now))
++ return sk;
++ }
++ }
++
++ /* First, find the best subflow */
++ mptcp_for_each_sk(mpcb, sk) {
++ struct tcp_sock *tp = tcp_sk(sk);
++ int this_mss;
++
++ if (tp->mptcp->rcv_low_prio || tp->mptcp->low_prio)
++ cnt_backups++;
++
++ if ((tp->mptcp->rcv_low_prio || tp->mptcp->low_prio) &&
++ tp->srtt < lowprio_min_time_to_peer) {
++
++ if (!mptcp_is_available(sk, skb, &this_mss))
++ continue;
++
++ if (mptcp_dont_reinject_skb(tp, skb)) {
++ mss_backup = this_mss;
++ backupsk = sk;
++ continue;
++ }
++
++ lowprio_min_time_to_peer = tp->srtt;
++ lowpriosk = sk;
++ mss_lowprio = this_mss;
++ } else if (!(tp->mptcp->rcv_low_prio || tp->mptcp->low_prio) &&
++ tp->srtt < min_time_to_peer) {
++ if (!mptcp_is_available(sk, skb, &this_mss))
++ continue;
++
++ if (mptcp_dont_reinject_skb(tp, skb)) {
++ mss_backup = this_mss;
++ backupsk = sk;
++ continue;
++ }
++
++ min_time_to_peer = tp->srtt;
++ bestsk = sk;
++ mss = this_mss;
++ }
++ }
++
++ if (mpcb->cnt_established == cnt_backups && lowpriosk) {
++ mss = mss_lowprio;
++ sk = lowpriosk;
++ } else if (bestsk) {
++ sk = bestsk;
++ } else if (backupsk){
++ /* It has been sent on all subflows once - let's give it a
++ * chance again by restarting its pathmask.
++ */
++ if (skb)
++ TCP_SKB_CB(skb)->path_mask = 0;
++ mss = mss_backup;
++ sk = backupsk;
++ }
++
++ if (mss_now)
++ *mss_now = mss;
++
++ return sk;
++}
++
++static struct mp_dss *mptcp_skb_find_dss(const struct sk_buff *skb)
++{
++ if (!mptcp_is_data_seq(skb))
++ return NULL;
++
++ return (struct mp_dss *)(skb->data - (MPTCP_SUB_LEN_DSS_ALIGN +
++ MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN));
++}
++
++/* get the data-seq and end-data-seq and store them again in the
++ * tcp_skb_cb
++ */
++static int mptcp_reconstruct_mapping(struct sk_buff *skb, struct sk_buff *orig_skb)
++{
++ struct mp_dss *mpdss = mptcp_skb_find_dss(orig_skb);
++ u32 *p32;
++ u16 *p16;
++
++ if (!mpdss || !mpdss->M)
++ return 1;
++
++ /* Move the pointer to the data-seq */
++ p32 = (u32 *)mpdss;
++ p32++;
++ if (mpdss->A) {
++ p32++;
++ if (mpdss->a)
++ p32++;
++ }
++
++ TCP_SKB_CB(skb)->seq = ntohl(*p32);
++
++ /* Get the data_len to calculate the end_data_seq */
++ p32++;
++ p32++;
++ p16 = (u16 *)p32;
++ TCP_SKB_CB(skb)->end_seq = ntohs(*p16) + TCP_SKB_CB(skb)->seq;
++
++ return 0;
++}
++
++/* Similar to __pskb_copy and sk_stream_alloc_skb. */
++static struct sk_buff *mptcp_pskb_copy(struct sk_buff *skb)
++{
++ struct sk_buff *n;
++ /* The TCP header must be at least 32-bit aligned. */
++ int size = ALIGN(skb_headlen(skb), 4);
++
++ n = alloc_skb_fclone(size + MAX_TCP_HEADER, GFP_ATOMIC);
++ if (!n)
++ return NULL;
++
++ /* Set the data pointer */
++ skb_reserve(n, MAX_TCP_HEADER);
++ /* Set the tail pointer and length */
++ skb_put(n, skb_headlen(skb));
++ /* Copy the bytes */
++ skb_copy_from_linear_data(skb, n->data, n->len);
++
++ n->truesize += skb->data_len;
++ n->data_len = skb->data_len;
++ n->len = skb->len;
++
++ if (skb_shinfo(skb)->nr_frags) {
++ int i;
++
++ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
++ if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
++ kfree_skb(n);
++ n = NULL;
++ goto out;
++ }
++ }
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
++ skb_frag_ref(skb, i);
++ }
++ skb_shinfo(n)->nr_frags = i;
++ }
++
++ if (skb_has_frag_list(skb)) {
++ skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
++ skb_clone_fraglist(n);
++ }
++
++ copy_skb_header(n, skb);
++out:
++ return n;
++}
++
++/* Reinject data from one TCP subflow to the meta_sk. If sk == NULL, we are
++ * coming from the meta-retransmit-timer
++ */
++static void __mptcp_reinject_data(struct sk_buff *orig_skb, struct sock *meta_sk,
++ struct sock *sk, int clone_it)
++{
++ struct sk_buff *skb, *skb1;
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ u32 seq, end_seq;
++
++ if (clone_it) {
++ /* pskb_copy is necessary here, because the TCP/IP-headers
++ * will be changed when it's going to be reinjected on another
++ * subflow.
++ */
++ skb = mptcp_pskb_copy(orig_skb);
++ } else {
++ __skb_unlink(orig_skb, &sk->sk_write_queue);
++ sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
++ sk->sk_wmem_queued -= orig_skb->truesize;
++ sk_mem_uncharge(sk, orig_skb->truesize);
++ skb = orig_skb;
++ }
++ if (unlikely(!skb))
++ return;
++
++ if (sk && mptcp_reconstruct_mapping(skb, orig_skb)) {
++ __kfree_skb(skb);
++ return;
++ }
++
++ skb->sk = meta_sk;
++
++ /* If it reached already the destination, we don't have to reinject it */
++ if (!after(TCP_SKB_CB(skb)->end_seq, meta_tp->snd_una)) {
++ __kfree_skb(skb);
++ return;
++ }
++
++ /* Only reinject segments that are fully covered by the mapping */
++ if (skb->len + (mptcp_is_data_fin(skb) ? 1 : 0) !=
++ TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq) {
++ u32 seq = TCP_SKB_CB(skb)->seq;
++ u32 end_seq = TCP_SKB_CB(skb)->end_seq;
++
++ __kfree_skb(skb);
++
++ /* Ok, now we have to look for the full mapping in the meta
++ * send-queue :S
++ */
++ tcp_for_write_queue(skb, meta_sk) {
++ /* Not yet at the mapping? */
++ if (before(TCP_SKB_CB(skb)->seq, seq))
++ continue;
++ /* We have passed by the mapping */
++ if (after(TCP_SKB_CB(skb)->end_seq, end_seq))
++ return;
++
++ __mptcp_reinject_data(skb, meta_sk, NULL, 1);
++ }
++ return;
++ }
++
++ /* If it's empty, just add */
++ if (skb_queue_empty(&mpcb->reinject_queue)) {
++ skb_queue_head(&mpcb->reinject_queue, skb);
++ return;
++ }
++
++ /* Find place to insert skb - or even we can 'drop' it, as the
++ * data is already covered by other skb's in the reinject-queue.
++ *
++ * This is inspired by code from tcp_data_queue.
++ */
++
++ skb1 = skb_peek_tail(&mpcb->reinject_queue);
++ seq = TCP_SKB_CB(skb)->seq;
++ while (1) {
++ if (!after(TCP_SKB_CB(skb1)->seq, seq))
++ break;
++ if (skb_queue_is_first(&mpcb->reinject_queue, skb1)) {
++ skb1 = NULL;
++ break;
++ }
++ skb1 = skb_queue_prev(&mpcb->reinject_queue, skb1);
++ }
++
++ /* Do skb overlap to previous one? */
++ end_seq = TCP_SKB_CB(skb)->end_seq;
++ if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
++ if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
++ /* All the bits are present. Don't reinject */
++ __kfree_skb(skb);
++ return;
++ }
++ if (seq == TCP_SKB_CB(skb1)->seq) {
++ if (skb_queue_is_first(&mpcb->reinject_queue, skb1))
++ skb1 = NULL;
++ else
++ skb1 = skb_queue_prev(&mpcb->reinject_queue, skb1);
++ }
++ }
++ if (!skb1)
++ __skb_queue_head(&mpcb->reinject_queue, skb);
++ else
++ __skb_queue_after(&mpcb->reinject_queue, skb1, skb);
++
++ /* And clean segments covered by new one as whole. */
++ while (!skb_queue_is_last(&mpcb->reinject_queue, skb)) {
++ skb1 = skb_queue_next(&mpcb->reinject_queue, skb);
++
++ if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
++ break;
++
++ __skb_unlink(skb1, &mpcb->reinject_queue);
++ __kfree_skb(skb1);
++ }
++ return;
++}
++
++/* Inserts data into the reinject queue */
++void mptcp_reinject_data(struct sock *sk, int clone_it)
++{
++ struct sk_buff *skb_it, *tmp;
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct sock *meta_sk = tp->meta_sk;
++
++ /* It has already been closed - there is really no point in reinjecting */
++ if (meta_sk->sk_state == TCP_CLOSE)
++ return;
++
++ skb_queue_walk_safe(&sk->sk_write_queue, skb_it, tmp) {
++ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb_it);
++ /* Subflow syn's and fin's are not reinjected.
++ *
++ * As well as empty subflow-fins with a data-fin.
++ * They are reinjected below (without the subflow-fin-flag)
++ */
++ if (tcb->tcp_flags & TCPHDR_SYN ||
++ (tcb->tcp_flags & TCPHDR_FIN && !mptcp_is_data_fin(skb_it)) ||
++ (tcb->tcp_flags & TCPHDR_FIN && mptcp_is_data_fin(skb_it) && !skb_it->len))
++ continue;
++
++ __mptcp_reinject_data(skb_it, meta_sk, sk, clone_it);
++ }
++
++ skb_it = tcp_write_queue_tail(meta_sk);
++ /* If sk has sent the empty data-fin, we have to reinject it too. */
++ if (skb_it && mptcp_is_data_fin(skb_it) && skb_it->len == 0 &&
++ TCP_SKB_CB(skb_it)->path_mask & mptcp_pi_to_flag(tp->mptcp->path_index)) {
++ __mptcp_reinject_data(skb_it, meta_sk, NULL, 1);
++ }
++
++ mptcp_push_pending_frames(meta_sk);
++
++ tp->pf = 1;
++}
++EXPORT_SYMBOL(mptcp_reinject_data);
++
++static void mptcp_combine_dfin(struct sk_buff *skb, struct sock *meta_sk,
++ struct sock *subsk)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ struct sock *sk_it;
++ int all_empty = 1, all_acked;
++
++ /* In infinite mapping we always try to combine */
++ if (mpcb->infinite_mapping_snd && tcp_close_state(subsk)) {
++ subsk->sk_shutdown |= SEND_SHUTDOWN;
++ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
++ return;
++ }
++
++ /* Don't combine, if they didn't combine - otherwise we end up in
++ * TIME_WAIT, even if our app is smart enough to avoid it
++ */
++ if (meta_sk->sk_shutdown & RCV_SHUTDOWN) {
++ if (!mpcb->dfin_combined)
++ return;
++ }
++
++ /* If no other subflow has data to send, we can combine */
++ mptcp_for_each_sk(mpcb, sk_it) {
++ if (!mptcp_sk_can_send(sk_it))
++ continue;
++
++ if (!tcp_write_queue_empty(sk_it))
++ all_empty = 0;
++ }
++
++ /* If all data has been DATA_ACKed, we can combine.
++ * -1, because the data_fin consumed one byte
++ */
++ all_acked = (meta_tp->snd_una == (meta_tp->write_seq - 1));
++
++ if ((all_empty || all_acked) && tcp_close_state(subsk)) {
++ subsk->sk_shutdown |= SEND_SHUTDOWN;
++ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
++ }
++}
++
++static struct sk_buff *mptcp_skb_entail(struct sock *sk, struct sk_buff *skb,
++ int reinject)
++{
++ __be32 *ptr;
++ __u16 data_len;
++ struct mp_dss *mdss;
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++ struct mptcp_cb *mpcb = tp->mpcb;
++ struct tcp_skb_cb *tcb;
++ struct sk_buff *subskb = NULL;
++
++ if (!reinject)
++ TCP_SKB_CB(skb)->mptcp_flags |= (mpcb->snd_hiseq_index ?
++ MPTCPHDR_SEQ64_INDEX : 0);
++
++ subskb = mptcp_pskb_copy(skb);
++ if (!subskb)
++ return NULL;
++
++ TCP_SKB_CB(skb)->path_mask |= mptcp_pi_to_flag(tp->mptcp->path_index);
++
++ if (!(sk->sk_route_caps & NETIF_F_ALL_CSUM) &&
++ skb->ip_summed == CHECKSUM_PARTIAL) {
++ subskb->csum = skb->csum = skb_checksum(skb, 0, skb->len, 0);
++ subskb->ip_summed = skb->ip_summed = CHECKSUM_NONE;
++ }
++
++ /* The subskb is going in the subflow send-queue. Its path-mask
++ * is not needed anymore and MUST be set to 0, as the path-mask
++ * is a union with inet_skb_param.
++ */
++ tcb = TCP_SKB_CB(subskb);
++ tcb->path_mask = 0;
++
++ if (mptcp_is_data_fin(subskb))
++ mptcp_combine_dfin(subskb, meta_sk, sk);
++
++ if (tp->mpcb->infinite_mapping_snd)
++ goto no_data_seq;
++
++ if (tp->mpcb->send_infinite_mapping &&
++ !before(tcb->seq, mptcp_meta_tp(tp)->snd_nxt)) {
++ tp->mptcp->fully_established = 1;
++ tp->mpcb->infinite_mapping_snd = 1;
++ tp->mptcp->infinite_cutoff_seq = tp->write_seq;
++ tcb->mptcp_flags |= MPTCPHDR_INF;
++ data_len = 0;
++ } else {
++ data_len = tcb->end_seq - tcb->seq;
++ }
++
++ /**** Write MPTCP DSS-option to the packet. ****/
++ ptr = (__be32 *)(subskb->data - (MPTCP_SUB_LEN_DSS_ALIGN +
++ MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN));
++
++ /* Then we start writing it from the start */
++ mdss = (struct mp_dss *)ptr;
++
++ mdss->kind = TCPOPT_MPTCP;
++ mdss->sub = MPTCP_SUB_DSS;
++ mdss->rsv1 = 0;
++ mdss->rsv2 = 0;
++ mdss->F = (mptcp_is_data_fin(subskb) ? 1 : 0);
++ mdss->m = 0;
++ mdss->M = 1;
++ mdss->a = 0;
++ mdss->A = 1;
++ mdss->len = mptcp_sub_len_dss(mdss, tp->mpcb->dss_csum);
++
++ ptr++;
++ ptr++; /* data_ack will be set in mptcp_options_write */
++ *ptr++ = htonl(tcb->seq); /* data_seq */
++
++ /* If it's a non-data DATA_FIN, we set subseq to 0 (draft v7) */
++ if (mptcp_is_data_fin(subskb) && subskb->len == 0)
++ *ptr++ = 0; /* subseq */
++ else
++ *ptr++ = htonl(tp->write_seq - tp->mptcp->snt_isn); /* subseq */
++
++ if (tp->mpcb->dss_csum && data_len) {
++ __be16 *p16 = (__be16 *)ptr;
++ __be32 hdseq = mptcp_get_highorder_sndbits(subskb, tp->mpcb);
++ __wsum csum;
++ *ptr = htonl(((data_len) << 16) |
++ (TCPOPT_EOL << 8) |
++ (TCPOPT_EOL));
++
++ csum = csum_partial(ptr - 2, 12, subskb->csum);
++ p16++;
++ *p16++ = csum_fold(csum_partial(&hdseq, sizeof(hdseq), csum));
++ } else {
++ *ptr++ = htonl(((data_len) << 16) |
++ (TCPOPT_NOP << 8) |
++ (TCPOPT_NOP));
++ }
++
++no_data_seq:
++ tcb->seq = tp->write_seq;
++ tcb->sacked = 0; /* reset the sacked field: from the point of view
++ * of this subflow, we are sending a brand new
++ * segment */
++ /* Take into account seg len */
++ tp->write_seq += subskb->len + ((tcb->tcp_flags & TCPHDR_FIN) ? 1 : 0);
++ tcb->end_seq = tp->write_seq;
++
++ /* If it's a non-payload DATA_FIN (also no subflow-fin), the
++ * segment is not part of the subflow but on a meta-only-level
++ */
++ if (!mptcp_is_data_fin(subskb) || tcb->end_seq != tcb->seq) {
++ tcp_add_write_queue_tail(sk, subskb);
++ sk->sk_wmem_queued += subskb->truesize;
++ sk_mem_charge(sk, subskb->truesize);
++ }
++
++ return subskb;
++}
++
++static void mptcp_sub_event_new_data_sent(struct sock *sk,
++ struct sk_buff *subskb,
++ struct sk_buff *skb)
++{
++ /* If it's a non-payload DATA_FIN (also no subflow-fin), the
++ * segment is not part of the subflow but on a meta-only-level
++ *
++ * We free it, because it has been queued nowhere.
++ */
++ if (!mptcp_is_data_fin(subskb) ||
++ (TCP_SKB_CB(subskb)->end_seq != TCP_SKB_CB(subskb)->seq)) {
++ tcp_event_new_data_sent(sk, subskb);
++ tcp_sk(sk)->mptcp->second_packet = 1;
++ tcp_sk(sk)->mptcp->last_end_data_seq = TCP_SKB_CB(skb)->end_seq;
++ } else {
++ kfree_skb(subskb);
++ }
++}
++
++/* Handle the packets and sockets after a tcp_transmit_skb failed */
++static void mptcp_transmit_skb_failed(struct sock *sk, struct sk_buff *skb,
++ struct sk_buff *subskb)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct mptcp_cb *mpcb = tp->mpcb;
++
++ /* No work to do if we are in infinite mapping mode
++ * There is only one subflow left and we cannot send this segment on
++ * another subflow.
++ */
++ if (mpcb->infinite_mapping_snd)
++ return;
++
++ TCP_SKB_CB(skb)->path_mask &= ~mptcp_pi_to_flag(tp->mptcp->path_index);
++
++ if (TCP_SKB_CB(subskb)->tcp_flags & TCPHDR_FIN) {
++ /* If it is a subflow-fin we must leave it on the
++ * subflow-send-queue, so that the probe-timer
++ * can retransmit it.
++ */
++ if (!tp->packets_out && !inet_csk(sk)->icsk_pending)
++ inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
++ inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
++ } else if (mptcp_is_data_fin(subskb) &&
++ TCP_SKB_CB(subskb)->end_seq == TCP_SKB_CB(subskb)->seq) {
++ /* An empty data-fin has not been enqueued on the subflow
++ * and thus we free it.
++ */
++
++ kfree_skb(subskb);
++ } else {
++ /* In all other cases we remove it from the sub-queue.
++ * Other subflows may send it, or the probe-timer will
++ * handle it.
++ */
++ tcp_advance_send_head(sk, subskb);
++
++ /* tcp_add_write_queue_tail initialized highest_sack. We have
++ * to reset it, if necessary.
++ */
++ if (tp->highest_sack == subskb)
++ tp->highest_sack = NULL;
++
++ tcp_unlink_write_queue(subskb, sk);
++ tp->write_seq -= subskb->len;
++ sk_wmem_free_skb(sk, subskb);
++ }
++}
++
++/* Function to create two new TCP segments. Shrinks the given segment
++ * to the specified size and appends a new segment with the rest of the
++ * packet to the list. This won't be called frequently, I hope.
++ * Remember, these are still headerless SKBs at this point.
++ */
++int mptcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
++ unsigned int mss_now, int reinject)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct sk_buff *buff;
++ int nsize, old_factor;
++ int nlen;
++ u8 flags;
++ int dsslen = MPTCP_SUB_LEN_DSS_ALIGN + MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN;
++ char dss[MPTCP_SUB_LEN_DSS_ALIGN + MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN];
++
++ if (WARN_ON(len > skb->len))
++ return -EINVAL;
++
++ /* DSS-option must be recovered afterwards. */
++ if (!is_meta_sk(sk))
++ memcpy(dss, skb->data - dsslen, dsslen);
++
++ nsize = skb_headlen(skb) - len;
++ if (nsize < 0)
++ nsize = 0;
++
++ if (skb_cloned(skb)) {
++ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
++ return -ENOMEM;
++ /* Recover dss-option */
++ if (!is_meta_sk(sk))
++ memcpy(skb->data - dsslen, dss, dsslen);
++ }
++
++ /* Get a new skb... force flag on. */
++ buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
++ if (buff == NULL)
++ return -ENOMEM; /* We'll just try again later. */
++
++ /* See below - if reinject == 1, the buff will be added to the reinject-
++ * queue, which is currently not part of the memory-accounting.
++ */
++ if (reinject != 1) {
++ sk->sk_wmem_queued += buff->truesize;
++ sk_mem_charge(sk, buff->truesize);
++ }
++ nlen = skb->len - len - nsize;
++ buff->truesize += nlen;
++ skb->truesize -= nlen;
++
++ /* Correct the sequence numbers. */
++ TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
++ TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
++ TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
++
++ /* PSH and FIN should only be set in the second packet. */
++ flags = TCP_SKB_CB(skb)->tcp_flags;
++ TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
++ TCP_SKB_CB(buff)->tcp_flags = flags;
++ TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
++
++ flags = TCP_SKB_CB(skb)->mptcp_flags;
++ TCP_SKB_CB(skb)->mptcp_flags = flags & ~(MPTCPHDR_FIN);
++ TCP_SKB_CB(buff)->mptcp_flags = flags;
++
++ if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
++ /* Copy and checksum data tail into the new buffer. */
++ buff->csum = csum_partial_copy_nocheck(skb->data + len,
++ skb_put(buff, nsize),
++ nsize, 0);
++
++ skb_trim(skb, len);
++
++ skb->csum = csum_block_sub(skb->csum, buff->csum, len);
++ } else {
++ skb->ip_summed = CHECKSUM_PARTIAL;
++ skb_split(skb, buff, len);
++ }
++
++ /* We lost the dss-option when creating buff - put it back! */
++ if (!is_meta_sk(sk))
++ memcpy(buff->data - dsslen, dss, dsslen);
++
++ buff->ip_summed = skb->ip_summed;
++
++ /* Looks stupid, but our code really uses when of
++ * skbs, which it never sent before. --ANK
++ */
++ TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
++ buff->tstamp = skb->tstamp;
++
++ old_factor = tcp_skb_pcount(skb);
++
++ /* Fix up tso_factor for both original and new SKB. */
++ tcp_set_skb_tso_segs(sk, skb, mss_now);
++ tcp_set_skb_tso_segs(sk, buff, mss_now);
++
++ /* If this packet has been sent out already, we must
++ * adjust the various packet counters.
++ */
++ if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq) && reinject != 1) {
++ int diff = old_factor - tcp_skb_pcount(skb) -
++ tcp_skb_pcount(buff);
++
++ if (diff)
++ tcp_adjust_pcount(sk, skb, diff);
++ }
++
++ /* Link BUFF into the send queue. */
++ skb_header_release(buff);
++ if (reinject == 1)
++ __skb_queue_after(&tcp_sk(sk)->mpcb->reinject_queue, skb, buff);
++ else
++ tcp_insert_write_queue_after(skb, buff, sk);
++
++ return 0;
++}
++
++int mptso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
++ unsigned int mss_now, gfp_t gfp, int reinject)
++{
++ struct sk_buff *buff;
++ int nlen = skb->len - len, old_factor;
++ u8 flags;
++ int dsslen = MPTCP_SUB_LEN_DSS_ALIGN + MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN;
++
++ /* All of a TSO frame must be composed of paged data. */
++ if (skb->len != skb->data_len)
++ return mptcp_fragment(sk, skb, len, mss_now, reinject);
++
++ buff = sk_stream_alloc_skb(sk, 0, gfp);
++ if (unlikely(buff == NULL))
++ return -ENOMEM;
++
++ /* See below - if reinject == 1, the buff will be added to the reinject-
++ * queue, which is currently not part of the memory-accounting.
++ */
++ if (reinject != 1) {
++ sk->sk_wmem_queued += buff->truesize;
++ sk_mem_charge(sk, buff->truesize);
++ }
++ buff->truesize += nlen;
++ skb->truesize -= nlen;
++
++ /* Correct the sequence numbers. */
++ TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
++ TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
++ TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
++
++ /* PSH and FIN should only be set in the second packet. */
++ flags = TCP_SKB_CB(skb)->tcp_flags;
++ TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
++ TCP_SKB_CB(buff)->tcp_flags = flags;
++
++ flags = TCP_SKB_CB(skb)->mptcp_flags;
++ TCP_SKB_CB(skb)->mptcp_flags = flags & ~(MPTCPHDR_FIN);
++ TCP_SKB_CB(buff)->mptcp_flags = flags;
++
++ /* This packet was never sent out yet, so no SACK bits. */
++ TCP_SKB_CB(buff)->sacked = 0;
++
++ buff->ip_summed = CHECKSUM_PARTIAL;
++ skb->ip_summed = CHECKSUM_PARTIAL;
++ skb_split(skb, buff, len);
++
++ /* We lost the dss-option when creating buff - put it back! */
++ if (!is_meta_sk(sk))
++ memcpy(buff->data - dsslen, skb->data - dsslen, dsslen);
++
++ old_factor = tcp_skb_pcount(skb);
++
++ /* Fix up tso_factor for both original and new SKB. */
++ tcp_set_skb_tso_segs(sk, skb, mss_now);
++ tcp_set_skb_tso_segs(sk, buff, mss_now);
++
++ /* If this packet has been sent out already, we must
++ * adjust the various packet counters.
++ */
++ if (!before(tcp_sk(sk)->snd_nxt, TCP_SKB_CB(buff)->end_seq) && reinject != 1) {
++ int diff = old_factor - tcp_skb_pcount(skb) -
++ tcp_skb_pcount(buff);
++
++ if (diff)
++ tcp_adjust_pcount(sk, skb, diff);
++ }
++
++ /* Link BUFF into the send queue. */
++ skb_header_release(buff);
++ if (reinject == 1)
++ __skb_queue_after(&tcp_sk(sk)->mpcb->reinject_queue, skb, buff);
++ else
++ tcp_insert_write_queue_after(skb, buff, sk);
++
++ return 0;
++}
++
++/* Inspired by tcp_write_wakeup */
++int mptcp_write_wakeup(struct sock *meta_sk)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct sk_buff *skb, *subskb;
++
++ skb = tcp_send_head(meta_sk);
++ if (skb &&
++ before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(meta_tp))) {
++ int err;
++ unsigned int mss;
++ unsigned int seg_size = tcp_wnd_end(meta_tp) - TCP_SKB_CB(skb)->seq;
++ struct sock *subsk = get_available_subflow(meta_sk, skb, &mss);
++ if (!subsk)
++ return -1;
++
++ if (before(meta_tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
++ meta_tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
++
++ /* We are probing the opening of a window
++ * but the window size is != 0
++ * must have been a result SWS avoidance ( sender )
++ */
++ if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
++ skb->len > mss) {
++ seg_size = min(seg_size, mss);
++ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
++ if (mptcp_fragment(meta_sk, skb, seg_size, mss, 0))
++ return -1;
++ } else if (!tcp_skb_pcount(skb)) {
++ tcp_set_skb_tso_segs(meta_sk, skb, mss);
++ }
++
++ subskb = mptcp_skb_entail(subsk, skb, 0);
++ if (!subskb)
++ return -1;
++
++ TCP_SKB_CB(subskb)->tcp_flags |= TCPHDR_PSH;
++ TCP_SKB_CB(skb)->when = tcp_time_stamp;
++ TCP_SKB_CB(subskb)->when = tcp_time_stamp;
++ err = tcp_transmit_skb(subsk, subskb, 1, GFP_ATOMIC);
++ if (unlikely(err)) {
++ mptcp_transmit_skb_failed(subsk, skb, subskb);
++ return err;
++ }
++
++ mptcp_check_sndseq_wrap(meta_tp, TCP_SKB_CB(skb)->end_seq -
++ TCP_SKB_CB(skb)->seq);
++ tcp_event_new_data_sent(meta_sk, skb);
++ mptcp_sub_event_new_data_sent(subsk, subskb, skb);
++
++ return 0;
++ } else {
++ struct sock *sk_it;
++ int ans = 0;
++
++ if (between(meta_tp->snd_up, meta_tp->snd_una + 1,
++ meta_tp->snd_una + 0xFFFF)) {
++ mptcp_for_each_sk(meta_tp->mpcb, sk_it) {
++ if (mptcp_sk_can_send_ack(sk_it))
++ tcp_xmit_probe_skb(sk_it, 1);
++ }
++ }
++
++ /* At least one of the tcp_xmit_probe_skb's has to succeed */
++ mptcp_for_each_sk(meta_tp->mpcb, sk_it) {
++ int ret;
++
++ if (!mptcp_sk_can_send_ack(sk_it))
++ continue;
++
++ ret = tcp_xmit_probe_skb(sk_it, 0);
++ if (unlikely(ret > 0))
++ ans = ret;
++ }
++ return ans;
++ }
++}
++
++static void mptcp_find_and_set_pathmask(struct sock *meta_sk, struct sk_buff *skb)
++{
++ struct sk_buff *skb_it;
++
++ skb_it = tcp_write_queue_head(meta_sk);
++
++ tcp_for_write_queue_from(skb_it, meta_sk) {
++ if (skb_it == tcp_send_head(meta_sk))
++ break;
++
++ if (TCP_SKB_CB(skb_it)->seq == TCP_SKB_CB(skb)->seq) {
++ TCP_SKB_CB(skb)->path_mask = TCP_SKB_CB(skb_it)->path_mask;
++ break;
++ }
++ }
++}
++
++static struct sk_buff *mptcp_rcv_buf_optimization(struct sock *sk, int penal)
++{
++ struct sock *meta_sk;
++ struct tcp_sock *tp = tcp_sk(sk), *tp_it;
++ struct sk_buff *skb_head;
++
++ if (tp->mpcb->cnt_subflows == 1)
++ return NULL;
++
++ meta_sk = mptcp_meta_sk(sk);
++ skb_head = tcp_write_queue_head(meta_sk);
++
++ if (!skb_head || skb_head == tcp_send_head(meta_sk))
++ return NULL;
++
++ /* If penalization is optional (coming from mptcp_next_segment() and
++ * We are not send-buffer-limited we do not penalize. The retransmission
++ * is just an optimization to fix the idle-time due to the delay before
++ * we wake up the application.
++ */
++ if (!penal && sk_stream_memory_free(meta_sk))
++ goto retrans;
++
++ /* Only penalize again after an RTT has elapsed */
++ if (tcp_time_stamp - tp->mptcp->last_rbuf_opti < tp->srtt >> 3)
++ goto retrans;
++
++ /* Half the cwnd of the slow flow */
++ mptcp_for_each_tp(tp->mpcb, tp_it) {
++ if (tp_it != tp &&
++ TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp_it->mptcp->path_index)) {
++ if (tp->srtt < tp_it->srtt && inet_csk((struct sock *)tp_it)->icsk_ca_state == TCP_CA_Open) {
++ tp_it->snd_cwnd = max(tp_it->snd_cwnd >> 1U, 1U);
++ if (tp_it->snd_ssthresh != TCP_INFINITE_SSTHRESH)
++ tp_it->snd_ssthresh = max(tp_it->snd_ssthresh >> 1U, 2U);
++
++ tp->mptcp->last_rbuf_opti = tcp_time_stamp;
++ }
++ break;
++ }
++ }
++
++retrans:
++
++ /* Segment not yet injected into this path? Take it!!! */
++ if (!(TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp->mptcp->path_index))) {
++ bool do_retrans = false;
++ mptcp_for_each_tp(tp->mpcb, tp_it) {
++ if (tp_it != tp &&
++ TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp_it->mptcp->path_index)) {
++ if (tp_it->snd_cwnd <= 4) {
++ do_retrans = true;
++ break;
++ }
++
++ if (4 * tp->srtt >= tp_it->srtt) {
++ do_retrans = false;
++ break;
++ } else {
++ do_retrans = true;
++ }
++ }
++ }
++
++ if (do_retrans)
++ return skb_head;
++ }
++ return NULL;
++}
++
++int mptcp_write_xmit(struct sock *meta_sk, unsigned int mss_now, int nonagle,
++ int push_one, gfp_t gfp)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk), *subtp;
++ struct sock *subsk;
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ struct sk_buff *skb;
++ unsigned int tso_segs, old_factor, sent_pkts;
++ int cwnd_quota;
++ int result;
++ int reinject = 0;
++
++ sent_pkts = 0;
++
++ /* Currently mtu-probing is not done in MPTCP */
++ if (!push_one && 0) {
++ /* Do MTU probing. */
++ result = tcp_mtu_probe(meta_sk);
++ if (!result)
++ return 0;
++ else if (result > 0)
++ sent_pkts = 1;
++ }
++
++ while ((skb = mptcp_next_segment(meta_sk, &reinject))) {
++ unsigned int limit;
++ struct sk_buff *subskb = NULL;
++ u32 noneligible = mpcb->noneligible;
++
++ if (reinject == 1) {
++ if (!after(TCP_SKB_CB(skb)->end_seq, meta_tp->snd_una)) {
++ /* Segment already reached the peer, take the next one */
++ __skb_unlink(skb, &mpcb->reinject_queue);
++ __kfree_skb(skb);
++ continue;
++ }
++
++ /* Reinjection and it is coming from a subflow? We need
++ * to find out the path-mask from the meta-write-queue
++ * to properly select a subflow.
++ */
++ if (!TCP_SKB_CB(skb)->path_mask)
++ mptcp_find_and_set_pathmask(meta_sk, skb);
++ }
++
++subflow:
++ subsk = get_available_subflow(meta_sk, skb, &mss_now);
++ if (!subsk)
++ break;
++ subtp = tcp_sk(subsk);
++
++ /* Since all subsocks are locked before calling the scheduler,
++ * the tcp_send_head should not change.
++ */
++ BUG_ON(!reinject && tcp_send_head(meta_sk) != skb);
++retry:
++ /* If the segment was cloned (e.g. a meta retransmission),
++ * the header must be expanded/copied so that there is no
++ * corruption of TSO information.
++ */
++ if (skb_unclone(skb, GFP_ATOMIC))
++ break;
++
++ old_factor = tcp_skb_pcount(skb);
++ tcp_set_skb_tso_segs(meta_sk, skb, mss_now);
++ tso_segs = tcp_skb_pcount(skb);
++
++ if (reinject == -1) {
++ /* The packet has already once been sent, so if we
++ * change the pcount here we have to adjust packets_out
++ * in the meta-sk
++ */
++ int diff = old_factor - tso_segs;
++
++ if (diff)
++ tcp_adjust_pcount(meta_sk, skb, diff);
++ }
++
++ cwnd_quota = tcp_cwnd_test(subtp, skb);
++ if (!cwnd_quota) {
++ /* May happen due to two cases:
++ *
++ * - if at the first selection we circumvented
++ * the test due to a DATA_FIN (and got rejected at
++ * tcp_snd_wnd_test), but the reinjected segment is not
++ * a DATA_FIN.
++ * - if we take a DATA_FIN with data, but
++ * tcp_set_skb_tso_segs() increases the number of
++ * tso_segs to something > 1. Then, cwnd_test might
++ * reject it.
++ */
++ mpcb->noneligible |= mptcp_pi_to_flag(subtp->mptcp->path_index);
++ continue;
++ }
++
++ if (!reinject && unlikely(!tcp_snd_wnd_test(meta_tp, skb, mss_now))) {
++ skb = mptcp_rcv_buf_optimization(subsk, 1);
++ if (skb) {
++ reinject = -1;
++ goto retry;
++ }
++ break;
++ }
++
++ if (tso_segs == 1) {
++ if (unlikely(!tcp_nagle_test(meta_tp, skb, mss_now,
++ (tcp_skb_is_last(meta_sk, skb) ?
++ nonagle : TCP_NAGLE_PUSH))))
++ break;
++ } else {
++ /* Do not try to defer the transmission of a reinjected
++ * segment. Send it directly.
++ * If it is not possible to send the TSO segment on the
++ * best subflow right now try to look for another subflow.
++ * If there is no subflow available defer the segment to avoid
++ * the call to mptso_fragment.
++ */
++ if (!push_one && !reinject && tcp_tso_should_defer(subsk, skb)) {
++ mpcb->noneligible |= mptcp_pi_to_flag(subtp->mptcp->path_index);
++ goto subflow;
++ }
++ }
++
++ limit = mss_now;
++ if (tso_segs > 1 && !tcp_urg_mode(meta_tp))
++ limit = tcp_mss_split_point(subsk, skb, mss_now,
++ min_t(unsigned int,
++ cwnd_quota,
++ subsk->sk_gso_max_segs),
++ nonagle);
++
++ if (skb->len > limit &&
++ unlikely(mptso_fragment(meta_sk, skb, limit, mss_now, gfp, reinject)))
++ break;
++
++ subskb = mptcp_skb_entail(subsk, skb, reinject);
++ if (!subskb)
++ break;
++
++ mpcb->noneligible = noneligible;
++ TCP_SKB_CB(skb)->when = tcp_time_stamp;
++ TCP_SKB_CB(subskb)->when = tcp_time_stamp;
++ if (unlikely(tcp_transmit_skb(subsk, subskb, 1, gfp))) {
++ mptcp_transmit_skb_failed(subsk, skb, subskb);
++ mpcb->noneligible |= mptcp_pi_to_flag(subtp->mptcp->path_index);
++ continue;
++ }
++
++ if (!reinject) {
++ mptcp_check_sndseq_wrap(meta_tp,
++ TCP_SKB_CB(skb)->end_seq -
++ TCP_SKB_CB(skb)->seq);
++ tcp_event_new_data_sent(meta_sk, skb);
++ }
++
++ tcp_minshall_update(meta_tp, mss_now, skb);
++ sent_pkts += tcp_skb_pcount(skb);
++ tcp_sk(subsk)->mptcp->sent_pkts += tcp_skb_pcount(skb);
++
++ mptcp_sub_event_new_data_sent(subsk, subskb, skb);
++
++ if (reinject > 0) {
++ __skb_unlink(skb, &mpcb->reinject_queue);
++ kfree_skb(skb);
++ }
++
++ if (push_one)
++ break;
++ }
++
++ mpcb->noneligible = 0;
++
++ if (likely(sent_pkts)) {
++ mptcp_for_each_sk(mpcb, subsk) {
++ subtp = tcp_sk(subsk);
++ if (subtp->mptcp->sent_pkts) {
++ if (tcp_in_cwnd_reduction(subsk))
++ subtp->prr_out += subtp->mptcp->sent_pkts;
++ tcp_cwnd_validate(subsk);
++ subtp->mptcp->sent_pkts = 0;
++ }
++ }
++ return 0;
++ }
++
++ return !meta_tp->packets_out && tcp_send_head(meta_sk);
++}
++
++void mptcp_write_space(struct sock *sk)
++{
++ mptcp_push_pending_frames(mptcp_meta_sk(sk));
++}
++
++u32 __mptcp_select_window(struct sock *sk)
++{
++ struct inet_connection_sock *icsk = inet_csk(sk);
++ struct tcp_sock *tp = tcp_sk(sk), *meta_tp = mptcp_meta_tp(tp);
++ int mss, free_space, full_space, window;
++
++ /* MSS for the peer's data. Previous versions used mss_clamp
++ * here. I don't know if the value based on our guesses
++ * of peer's MSS is better for the performance. It's more correct
++ * but may be worse for the performance because of rcv_mss
++ * fluctuations. --SAW 1998/11/1
++ */
++ mss = icsk->icsk_ack.rcv_mss;
++ free_space = tcp_space(sk);
++ full_space = min_t(int, meta_tp->window_clamp,
++ tcp_full_space(sk));
++
++ if (mss > full_space)
++ mss = full_space;
++
++ if (free_space < (full_space >> 1)) {
++ icsk->icsk_ack.quick = 0;
++
++ if (tcp_memory_pressure)
++ /* TODO this has to be adapted when we support different
++ * MSS's among the subflows.
++ */
++ meta_tp->rcv_ssthresh = min(meta_tp->rcv_ssthresh,
++ 4U * meta_tp->advmss);
++
++ if (free_space < mss)
++ return 0;
++ }
++
++ if (free_space > meta_tp->rcv_ssthresh)
++ free_space = meta_tp->rcv_ssthresh;
++
++ /* Don't do rounding if we are using window scaling, since the
++ * scaled window will not line up with the MSS boundary anyway.
++ */
++ window = meta_tp->rcv_wnd;
++ if (tp->rx_opt.rcv_wscale) {
++ window = free_space;
++
++ /* Advertise enough space so that it won't get scaled away.
++ * Import case: prevent zero window announcement if
++ * 1<<rcv_wscale > mss.
++ */
++ if (((window >> tp->rx_opt.rcv_wscale) << tp->
++ rx_opt.rcv_wscale) != window)
++ window = (((window >> tp->rx_opt.rcv_wscale) + 1)
++ << tp->rx_opt.rcv_wscale);
++ } else {
++ /* Get the largest window that is a nice multiple of mss.
++ * Window clamp already applied above.
++ * If our current window offering is within 1 mss of the
++ * free space we just keep it. This prevents the divide
++ * and multiply from happening most of the time.
++ * We also don't do any window rounding when the free space
++ * is too small.
++ */
++ if (window <= free_space - mss || window > free_space)
++ window = (free_space / mss) * mss;
++ else if (mss == full_space &&
++ free_space > window + (full_space >> 1))
++ window = free_space;
++ }
++
++ return window;
++}
++
++void mptcp_syn_options(struct sock *sk, struct tcp_out_options *opts,
++ unsigned *remaining)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++
++ opts->options |= OPTION_MPTCP;
++ if (is_master_tp(tp)) {
++ opts->mptcp_options |= OPTION_MP_CAPABLE | OPTION_TYPE_SYN;
++ *remaining -= MPTCP_SUB_LEN_CAPABLE_SYN_ALIGN;
++ opts->mp_capable.sender_key = tp->mptcp_loc_key;
++ opts->dss_csum = !!sysctl_mptcp_checksum;
++ } else {
++ struct mptcp_cb *mpcb = tp->mpcb;
++
++ opts->mptcp_options |= OPTION_MP_JOIN | OPTION_TYPE_SYN;
++ *remaining -= MPTCP_SUB_LEN_JOIN_SYN_ALIGN;
++ opts->mp_join_syns.token = mpcb->mptcp_rem_token;
++ opts->addr_id = tp->mptcp->loc_id;
++ opts->mp_join_syns.sender_nonce = tp->mptcp->mptcp_loc_nonce;
++ }
++}
++
++void mptcp_synack_options(struct request_sock *req,
++ struct tcp_out_options *opts, unsigned *remaining)
++{
++ struct mptcp_request_sock *mtreq;
++ mtreq = mptcp_rsk(req);
++
++ opts->options |= OPTION_MPTCP;
++ /* MPCB not yet set - thus it's a new MPTCP-session */
++ if (!mtreq->mpcb) {
++ opts->mptcp_options |= OPTION_MP_CAPABLE | OPTION_TYPE_SYNACK;
++ opts->mp_capable.sender_key = mtreq->mptcp_loc_key;
++ opts->dss_csum = !!sysctl_mptcp_checksum || mtreq->dss_csum;
++ *remaining -= MPTCP_SUB_LEN_CAPABLE_SYN_ALIGN;
++ } else {
++ opts->mptcp_options |= OPTION_MP_JOIN | OPTION_TYPE_SYNACK;
++ opts->mp_join_syns.sender_truncated_mac =
++ mtreq->mptcp_hash_tmac;
++ opts->mp_join_syns.sender_nonce = mtreq->mptcp_loc_nonce;
++ opts->addr_id = mtreq->loc_id;
++ *remaining -= MPTCP_SUB_LEN_JOIN_SYNACK_ALIGN;
++ }
++}
++
++void mptcp_established_options(struct sock *sk, struct sk_buff *skb,
++ struct tcp_out_options *opts, unsigned *size)
++{
++ struct tcp_sock *tp = tcp_sk(sk), *meta_tp = mptcp_meta_tp(tp);
++ struct mptcp_cb *mpcb = tp->mpcb;
++ struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
++
++ /* In fallback mp_fail-mode, we have to repeat it until the fallback
++ * has been done by the sender
++ */
++ if (unlikely(tp->mptcp->send_mp_fail)) {
++ opts->options |= OPTION_MPTCP;
++ opts->mptcp_options |= OPTION_MP_FAIL;
++ opts->data_ack = (__u32)(mpcb->csum_cutoff_seq >> 32);
++ opts->data_seq = (__u32)mpcb->csum_cutoff_seq;
++ *size += MPTCP_SUB_LEN_FAIL;
++ return;
++ }
++
++ if (unlikely(tp->send_mp_fclose)) {
++ opts->options |= OPTION_MPTCP;
++ opts->mptcp_options |= OPTION_MP_FCLOSE;
++ opts->mp_capable.receiver_key = mpcb->mptcp_rem_key;
++ *size += MPTCP_SUB_LEN_FCLOSE_ALIGN;
++ return;
++ }
++
++ /* 1. If we are the sender of the infinite-mapping, we need the
++ * MPTCPHDR_INF-flag, because a retransmission of the
++ * infinite-announcment still needs the mptcp-option.
++ *
++ * We need infinite_cutoff_seq, because retransmissions from before
++ * the infinite-cutoff-moment still need the MPTCP-signalling to stay
++ * consistent.
++ *
++ * 2. If we are the receiver of the infinite-mapping, we always skip
++ * mptcp-options, because acknowledgments from before the
++ * infinite-mapping point have already been sent out.
++ *
++ * I know, the whole infinite-mapping stuff is ugly...
++ *
++ * TODO: Handle wrapped data-sequence numbers
++ * (even if it's very unlikely)
++ */
++ if (unlikely(mpcb->infinite_mapping_snd) &&
++ tp->mptcp->fully_established &&
++ ((mpcb->send_infinite_mapping && tcb &&
++ !(tcb->mptcp_flags & MPTCPHDR_INF) &&
++ !before(tcb->seq, tp->mptcp->infinite_cutoff_seq)) ||
++ !mpcb->send_infinite_mapping))
++ return;
++
++ if (unlikely(tp->mptcp->include_mpc)) {
++ opts->options |= OPTION_MPTCP;
++ opts->mptcp_options |= OPTION_MP_CAPABLE |
++ OPTION_TYPE_ACK;
++ *size += MPTCP_SUB_LEN_CAPABLE_ACK_ALIGN;
++ opts->mp_capable.sender_key = mpcb->mptcp_loc_key;
++ opts->mp_capable.receiver_key = mpcb->mptcp_rem_key;
++ opts->dss_csum = mpcb->dss_csum;
++
++ if (skb)
++ tp->mptcp->include_mpc = 0;
++ }
++ if (unlikely(tp->mptcp->pre_established)) {
++ opts->options |= OPTION_MPTCP;
++ opts->mptcp_options |= OPTION_MP_JOIN | OPTION_TYPE_ACK;
++ *size += MPTCP_SUB_LEN_JOIN_ACK_ALIGN;
++ }
++
++ if (!tp->mptcp->include_mpc && !tp->mptcp->pre_established) {
++ opts->options |= OPTION_MPTCP;
++ opts->mptcp_options |= OPTION_DATA_ACK;
++ /* If !skb, we come from tcp_current_mss and thus we always
++ * assume that the DSS-option will be set for the data-packet.
++ */
++ if (skb && !mptcp_is_data_seq(skb)) {
++ opts->data_ack = meta_tp->rcv_nxt;
++
++ *size += MPTCP_SUB_LEN_ACK_ALIGN;
++ } else {
++ opts->data_ack = meta_tp->rcv_nxt;
++
++ /* Doesn't matter, if csum included or not. It will be
++ * either 10 or 12, and thus aligned = 12
++ */
++ *size += MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN;
++ }
++
++ *size += MPTCP_SUB_LEN_DSS_ALIGN;
++ }
++
++ if (mpcb->pm_ops->addr_signal)
++ mpcb->pm_ops->addr_signal(sk, size, opts, skb);
++
++ if (unlikely(tp->mptcp->send_mp_prio) &&
++ MAX_TCP_OPTION_SPACE - *size >= MPTCP_SUB_LEN_PRIO_ALIGN) {
++ opts->options |= OPTION_MPTCP;
++ opts->mptcp_options |= OPTION_MP_PRIO;
++ if (skb)
++ tp->mptcp->send_mp_prio = 0;
++ *size += MPTCP_SUB_LEN_PRIO_ALIGN;
++ }
++
++ return;
++}
++
++u16 mptcp_select_window(struct sock *sk)
++{
++ u16 new_win = tcp_select_window(sk);
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct tcp_sock *meta_tp = mptcp_meta_tp(tp);
++
++ meta_tp->rcv_wnd = tp->rcv_wnd;
++ meta_tp->rcv_wup = meta_tp->rcv_nxt;
++
++ return new_win;
++}
++
++void mptcp_options_write(__be32 *ptr, struct tcp_sock *tp,
++ struct tcp_out_options *opts,
++ struct sk_buff *skb)
++{
++ if (unlikely(OPTION_MP_CAPABLE & opts->mptcp_options)) {
++ struct mp_capable *mpc = (struct mp_capable *)ptr;
++
++ mpc->kind = TCPOPT_MPTCP;
++
++ if ((OPTION_TYPE_SYN & opts->mptcp_options) ||
++ (OPTION_TYPE_SYNACK & opts->mptcp_options)) {
++ mpc->sender_key = opts->mp_capable.sender_key;
++ mpc->len = MPTCP_SUB_LEN_CAPABLE_SYN;
++ ptr += MPTCP_SUB_LEN_CAPABLE_SYN_ALIGN >> 2;
++ } else if (OPTION_TYPE_ACK & opts->mptcp_options) {
++ mpc->sender_key = opts->mp_capable.sender_key;
++ mpc->receiver_key = opts->mp_capable.receiver_key;
++ mpc->len = MPTCP_SUB_LEN_CAPABLE_ACK;
++ ptr += MPTCP_SUB_LEN_CAPABLE_ACK_ALIGN >> 2;
++ }
++
++ mpc->sub = MPTCP_SUB_CAPABLE;
++ mpc->ver = 0;
++ mpc->a = opts->dss_csum;
++ mpc->b = 0;
++ mpc->rsv = 0;
++ mpc->h = 1;
++ }
++
++ if (unlikely(OPTION_MP_JOIN & opts->mptcp_options)) {
++ struct mp_join *mpj = (struct mp_join *)ptr;
++
++ mpj->kind = TCPOPT_MPTCP;
++ mpj->sub = MPTCP_SUB_JOIN;
++ mpj->rsv = 0;
++ mpj->addr_id = opts->addr_id;
++
++ if (OPTION_TYPE_SYN & opts->mptcp_options) {
++ mpj->len = MPTCP_SUB_LEN_JOIN_SYN;
++ mpj->u.syn.token = opts->mp_join_syns.token;
++ mpj->u.syn.nonce = opts->mp_join_syns.sender_nonce;
++ mpj->b = tp->mptcp->low_prio;
++ ptr += MPTCP_SUB_LEN_JOIN_SYN_ALIGN >> 2;
++ } else if (OPTION_TYPE_SYNACK & opts->mptcp_options) {
++ mpj->len = MPTCP_SUB_LEN_JOIN_SYNACK;
++ mpj->u.synack.mac =
++ opts->mp_join_syns.sender_truncated_mac;
++ mpj->u.synack.nonce = opts->mp_join_syns.sender_nonce;
++ mpj->b = tp->mptcp->low_prio;
++ ptr += MPTCP_SUB_LEN_JOIN_SYNACK_ALIGN >> 2;
++ } else if (OPTION_TYPE_ACK & opts->mptcp_options) {
++ mpj->len = MPTCP_SUB_LEN_JOIN_ACK;
++ memcpy(mpj->u.ack.mac, &tp->mptcp->sender_mac[0], 20);
++ ptr += MPTCP_SUB_LEN_JOIN_ACK_ALIGN >> 2;
++ }
++ }
++ if (unlikely(OPTION_ADD_ADDR & opts->mptcp_options)) {
++ struct mp_add_addr *mpadd = (struct mp_add_addr *)ptr;
++
++ mpadd->kind = TCPOPT_MPTCP;
++ if (opts->add_addr_v4) {
++ mpadd->len = MPTCP_SUB_LEN_ADD_ADDR4;
++ mpadd->sub = MPTCP_SUB_ADD_ADDR;
++ mpadd->ipver = 4;
++ mpadd->addr_id = opts->add_addr4.addr_id;
++ mpadd->u.v4.addr = opts->add_addr4.addr;
++ ptr += MPTCP_SUB_LEN_ADD_ADDR4_ALIGN >> 2;
++ } else if (opts->add_addr_v6) {
++ mpadd->len = MPTCP_SUB_LEN_ADD_ADDR6;
++ mpadd->sub = MPTCP_SUB_ADD_ADDR;
++ mpadd->ipver = 6;
++ mpadd->addr_id = opts->add_addr6.addr_id;
++ memcpy(&mpadd->u.v6.addr, &opts->add_addr6.addr,
++ sizeof(mpadd->u.v6.addr));
++ ptr += MPTCP_SUB_LEN_ADD_ADDR6_ALIGN >> 2;
++ }
++ }
++ if (unlikely(OPTION_REMOVE_ADDR & opts->mptcp_options)) {
++ struct mp_remove_addr *mprem = (struct mp_remove_addr *)ptr;
++ u8 *addrs_id;
++ int id, len, len_align;
++
++ len = mptcp_sub_len_remove_addr(opts->remove_addrs);
++ len_align = mptcp_sub_len_remove_addr_align(opts->remove_addrs);
++
++ mprem->kind = TCPOPT_MPTCP;
++ mprem->len = len;
++ mprem->sub = MPTCP_SUB_REMOVE_ADDR;
++ mprem->rsv = 0;
++ addrs_id = &mprem->addrs_id;
++
++ mptcp_for_each_bit_set(opts->remove_addrs, id)
++ *(addrs_id++) = id;
++
++ /* Fill the rest with NOP's */
++ if (len_align > len) {
++ int i;
++ for (i = 0; i < len_align - len; i++)
++ *(addrs_id++) = TCPOPT_NOP;
++ }
++
++ ptr += len_align >> 2;
++ }
++ if (unlikely(OPTION_MP_FAIL & opts->mptcp_options)) {
++ struct mp_fail *mpfail = (struct mp_fail *)ptr;
++
++ mpfail->kind = TCPOPT_MPTCP;
++ mpfail->len = MPTCP_SUB_LEN_FAIL;
++ mpfail->sub = MPTCP_SUB_FAIL;
++ mpfail->rsv1 = 0;
++ mpfail->rsv2 = 0;
++ mpfail->data_seq = htonll(((u64)opts->data_ack << 32) | opts->data_seq);
++
++ ptr += MPTCP_SUB_LEN_FAIL_ALIGN >> 2;
++ }
++ if (unlikely(OPTION_MP_FCLOSE & opts->mptcp_options)) {
++ struct mp_fclose *mpfclose = (struct mp_fclose *)ptr;
++
++ mpfclose->kind = TCPOPT_MPTCP;
++ mpfclose->len = MPTCP_SUB_LEN_FCLOSE;
++ mpfclose->sub = MPTCP_SUB_FCLOSE;
++ mpfclose->rsv1 = 0;
++ mpfclose->rsv2 = 0;
++ mpfclose->key = opts->mp_capable.receiver_key;
++
++ ptr += MPTCP_SUB_LEN_FCLOSE_ALIGN >> 2;
++ }
++
++ if (OPTION_DATA_ACK & opts->mptcp_options) {
++ if (!mptcp_is_data_seq(skb)) {
++ struct mp_dss *mdss = (struct mp_dss *)ptr;
++
++ mdss->kind = TCPOPT_MPTCP;
++ mdss->sub = MPTCP_SUB_DSS;
++ mdss->rsv1 = 0;
++ mdss->rsv2 = 0;
++ mdss->F = 0;
++ mdss->m = 0;
++ mdss->M = 0;
++ mdss->a = 0;
++ mdss->A = 1;
++ mdss->len = mptcp_sub_len_dss(mdss, tp->mpcb->dss_csum);
++
++ ptr++;
++ *ptr++ = htonl(opts->data_ack);
++ } else {
++ /**** Just update the data_ack ****/
++
++ /* Get pointer to data_ack-field. MPTCP is always at
++ * the end of the TCP-options.
++ */
++ /* TODO if we allow sending 64-bit dseq's we have to change "16" */
++ __be32 *dack = (__be32 *)(skb->data + (tcp_hdr(skb)->doff << 2) - 16);
++
++ *dack = htonl(opts->data_ack);
++ }
++ }
++ if (unlikely(OPTION_MP_PRIO & opts->mptcp_options)) {
++ struct mp_prio *mpprio = (struct mp_prio *)ptr;
++
++ mpprio->kind = TCPOPT_MPTCP;
++ mpprio->len = MPTCP_SUB_LEN_PRIO;
++ mpprio->sub = MPTCP_SUB_PRIO;
++ mpprio->rsv = 0;
++ mpprio->b = tp->mptcp->low_prio;
++ mpprio->addr_id = TCPOPT_NOP;
++
++ ptr += MPTCP_SUB_LEN_PRIO_ALIGN >> 2;
++ }
++}
++
++/* Returns the next segment to be sent from the mptcp meta-queue.
++ * (chooses the reinject queue if any segment is waiting in it, otherwise,
++ * chooses the normal write queue).
++ * Sets *@reinject to 1 if the returned segment comes from the
++ * reinject queue. Sets it to 0 if it is the regular send-head of the meta-sk,
++ * and sets it to -1 if it is a meta-level retransmission to optimize the
++ * receive-buffer.
++ */
++struct sk_buff *mptcp_next_segment(struct sock *meta_sk, int *reinject)
++{
++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
++ struct sk_buff *skb = NULL;
++ if (reinject)
++ *reinject = 0;
++
++ /* If we are in fallback-mode, just take from the meta-send-queue */
++ if (mpcb->infinite_mapping_snd || mpcb->send_infinite_mapping)
++ return tcp_send_head(meta_sk);
++
++ skb = skb_peek(&mpcb->reinject_queue);
++
++ if (skb) {
++ if (reinject)
++ *reinject = 1;
++ } else {
++ skb = tcp_send_head(meta_sk);
++
++ if (!skb && meta_sk->sk_socket &&
++ test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags) &&
++ sk_stream_wspace(meta_sk) < sk_stream_min_wspace(meta_sk)) {
++ struct sock *subsk = get_available_subflow(meta_sk, NULL, NULL);
++ if (!subsk)
++ return NULL;
++
++ skb = mptcp_rcv_buf_optimization(subsk, 0);
++ if (skb && reinject)
++ *reinject = -1;
++ }
++ }
++ return skb;
++}
++
++/* Sends the datafin */
++void mptcp_send_fin(struct sock *meta_sk)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct sk_buff *skb = tcp_write_queue_tail(meta_sk);
++ int mss_now;
++
++ if ((1 << meta_sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
++ meta_tp->mpcb->passive_close = 1;
++
++ /* Optimization, tack on the FIN if we have a queue of
++ * unsent frames. But be careful about outgoing SACKS
++ * and IP options.
++ */
++ mss_now = mptcp_current_mss(meta_sk);
++
++ if (tcp_send_head(meta_sk) != NULL) {
++ TCP_SKB_CB(skb)->mptcp_flags |= MPTCPHDR_FIN;
++ TCP_SKB_CB(skb)->end_seq++;
++ meta_tp->write_seq++;
++ } else {
++ /* Socket is locked, keep trying until memory is available. */
++ for (;;) {
++ skb = alloc_skb_fclone(MAX_TCP_HEADER,
++ meta_sk->sk_allocation);
++ if (skb)
++ break;
++ yield();
++ }
++ /* Reserve space for headers and prepare control bits. */
++ skb_reserve(skb, MAX_TCP_HEADER);
++
++ tcp_init_nondata_skb(skb, meta_tp->write_seq, TCPHDR_ACK);
++ TCP_SKB_CB(skb)->end_seq++;
++ TCP_SKB_CB(skb)->mptcp_flags |= MPTCPHDR_FIN | MPTCPHDR_SEQ;
++ tcp_queue_skb(meta_sk, skb);
++ }
++ __tcp_push_pending_frames(meta_sk, mss_now, TCP_NAGLE_OFF);
++}
++
++void mptcp_send_active_reset(struct sock *meta_sk, gfp_t priority)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ struct sock *sk = NULL, *sk_it = NULL, *tmpsk;
++
++ if (!mpcb->cnt_subflows)
++ return;
++
++ WARN_ON(meta_tp->send_mp_fclose);
++
++ /* First - select a socket */
++ sk = mptcp_select_ack_sock(meta_sk, 0);
++
++ /* May happen if no subflow is in an appropriate state */
++ if (!sk)
++ return;
++
++ /* We are in infinite mode - just send a reset */
++ if (mpcb->infinite_mapping_snd || mpcb->infinite_mapping_rcv) {
++ sk->sk_err = ECONNRESET;
++ if (tcp_need_reset(sk->sk_state))
++ tcp_send_active_reset(sk, priority);
++ mptcp_sub_force_close(sk);
++ return;
++ }
++
++
++ tcp_sk(sk)->send_mp_fclose = 1;
++ /** Reset all other subflows */
++
++ /* tcp_done must be handled with bh disabled */
++ if (!in_serving_softirq())
++ local_bh_disable();
++
++ mptcp_for_each_sk_safe(mpcb, sk_it, tmpsk) {
++ if (tcp_sk(sk_it)->send_mp_fclose)
++ continue;
++
++ sk_it->sk_err = ECONNRESET;
++ if (tcp_need_reset(sk_it->sk_state))
++ tcp_send_active_reset(sk_it, GFP_ATOMIC);
++ mptcp_sub_force_close(sk_it);
++ }
++
++ if (!in_serving_softirq())
++ local_bh_enable();
++
++ tcp_send_ack(sk);
++ inet_csk_reset_keepalive_timer(sk, inet_csk(sk)->icsk_rto);
++
++ meta_tp->send_mp_fclose = 1;
++}
++
++static void mptcp_ack_retransmit_timer(struct sock *sk)
++{
++ struct sk_buff *skb;
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct inet_connection_sock *icsk = inet_csk(sk);
++
++ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
++ goto out; /* Routing failure or similar */
++
++ if (!tp->retrans_stamp)
++ tp->retrans_stamp = tcp_time_stamp ? : 1;
++
++ if (tcp_write_timeout(sk)) {
++ tp->mptcp->pre_established = 0;
++ sk_stop_timer(sk, &tp->mptcp->mptcp_ack_timer);
++ tcp_send_active_reset(sk, GFP_ATOMIC);
++ goto out;
++ }
++
++ skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
++ if (skb == NULL) {
++ sk_reset_timer(sk, &tp->mptcp->mptcp_ack_timer,
++ jiffies + icsk->icsk_rto);
++ return;
++ }
++
++ /* Reserve space for headers and prepare control bits */
++ skb_reserve(skb, MAX_TCP_HEADER);
++ tcp_init_nondata_skb(skb, tp->snd_una, TCPHDR_ACK);
++
++ TCP_SKB_CB(skb)->when = tcp_time_stamp;
++ if (tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC) > 0) {
++ /* Retransmission failed because of local congestion,
++ * do not backoff.
++ */
++ if (!icsk->icsk_retransmits)
++ icsk->icsk_retransmits = 1;
++ sk_reset_timer(sk, &tp->mptcp->mptcp_ack_timer,
++ jiffies + icsk->icsk_rto);
++ return;
++ }
++
++
++ icsk->icsk_retransmits++;
++ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
++ sk_reset_timer(sk, &tp->mptcp->mptcp_ack_timer,
++ jiffies + icsk->icsk_rto);
++ if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0)) {
++ __sk_dst_reset(sk);
++ }
++
++out:;
++}
++
++void mptcp_ack_handler(unsigned long data)
++{
++ struct sock *sk = (struct sock *)data;
++ struct sock *meta_sk = mptcp_meta_sk(sk);
++
++ bh_lock_sock(meta_sk);
++ if (sock_owned_by_user(meta_sk)) {
++ /* Try again later */
++ sk_reset_timer(sk, &tcp_sk(sk)->mptcp->mptcp_ack_timer,
++ jiffies + (HZ / 20));
++ goto out_unlock;
++ }
++
++ if (sk->sk_state == TCP_CLOSE)
++ goto out_unlock;
++
++ mptcp_ack_retransmit_timer(sk);
++
++ sk_mem_reclaim(sk);
++
++out_unlock:
++ bh_unlock_sock(meta_sk);
++ sock_put(sk);
++}
++
++/* Similar to tcp_retransmit_skb
++ *
++ * The diff is that we handle the retransmission-stats (retrans_stamp) at the
++ * meta-level.
++ */
++int mptcp_retransmit_skb(struct sock *meta_sk, struct sk_buff *skb)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct sock *subsk;
++ struct sk_buff *subskb;
++ unsigned int limit, tso_segs, mss_now;
++ int err = -1, oldpcount;
++
++ /* Do not sent more than we queued. 1/4 is reserved for possible
++ * copying overhead: fragmentation, tunneling, mangling etc.
++ *
++ * This is a meta-retransmission thus we check on the meta-socket.
++ */
++ if (atomic_read(&meta_sk->sk_wmem_alloc) >
++ min(meta_sk->sk_wmem_queued + (meta_sk->sk_wmem_queued >> 2), meta_sk->sk_sndbuf)) {
++ return -EAGAIN;
++ }
++
++ /* We need to make sure that the retransmitted segment can be sent on a
++ * subflow right now. If it is too big, it needs to be fragmented.
++ */
++ subsk = get_available_subflow(meta_sk, skb, &mss_now);
++ if (!subsk) {
++ /* We want to increase icsk_retransmits, thus return 0, so that
++ * mptcp_retransmit_timer enters the desired branch.
++ */
++ err = 0;
++ goto failed;
++ }
++
++ /* If the segment was cloned (e.g. a meta retransmission), the header
++ * must be expanded/copied so that there is no corruption of TSO
++ * information.
++ */
++ if (skb_unclone(skb, GFP_ATOMIC)) {
++ err = ENOMEM;
++ goto failed;
++ }
++
++ oldpcount = tcp_skb_pcount(skb);
++ tcp_set_skb_tso_segs(meta_sk, skb, mss_now);
++ tso_segs = tcp_skb_pcount(skb);
++ BUG_ON(!tso_segs);
++
++ /* The MSS might have changed and so the number of segments. We
++ * need to account for this change.
++ */
++ if (unlikely(oldpcount != tso_segs))
++ tcp_adjust_pcount(meta_sk, skb, oldpcount - tso_segs);
++
++ limit = mss_now;
++ if (tso_segs > 1 && !tcp_urg_mode(meta_tp))
++ limit = tcp_mss_split_point(subsk, skb, mss_now,
++ min_t(unsigned int,
++ tcp_cwnd_test(tcp_sk(subsk), skb),
++ subsk->sk_gso_max_segs),
++ TCP_NAGLE_OFF);
++
++ if (skb->len > limit &&
++ unlikely(mptso_fragment(meta_sk, skb, limit, mss_now,
++ GFP_ATOMIC, 0)))
++ goto failed;
++
++ subskb = mptcp_skb_entail(subsk, skb, -1);
++ if (!subskb)
++ goto failed;
++
++ TCP_SKB_CB(skb)->when = tcp_time_stamp;
++ TCP_SKB_CB(subskb)->when = tcp_time_stamp;
++ err = tcp_transmit_skb(subsk, subskb, 1, GFP_ATOMIC);
++ if (!err) {
++ /* Update global TCP statistics. */
++ TCP_INC_STATS(sock_net(meta_sk), TCP_MIB_RETRANSSEGS);
++
++ /* Diff to tcp_retransmit_skb */
++
++ /* Save stamp of the first retransmit. */
++ if (!meta_tp->retrans_stamp)
++ meta_tp->retrans_stamp = TCP_SKB_CB(subskb)->when;
++ mptcp_sub_event_new_data_sent(subsk, subskb, skb);
++ } else {
++ mptcp_transmit_skb_failed(subsk, skb, subskb);
++ }
++
++failed:
++ return err;
++}
++
++/* Similar to tcp_retransmit_timer
++ *
++ * The diff is that we have to handle retransmissions of the FAST_CLOSE-message
++ * and that we don't have an srtt estimation at the meta-level.
++ */
++void mptcp_retransmit_timer(struct sock *meta_sk)
++{
++ struct tcp_sock *meta_tp = tcp_sk(meta_sk);
++ struct mptcp_cb *mpcb = meta_tp->mpcb;
++ struct inet_connection_sock *meta_icsk = inet_csk(meta_sk);
++ int err;
++
++ /* In fallback, retransmission is handled at the subflow-level */
++ if (!meta_tp->packets_out || mpcb->infinite_mapping_snd ||
++ mpcb->send_infinite_mapping)
++ return;
++
++ WARN_ON(tcp_write_queue_empty(meta_sk));
++
++ if (!meta_tp->snd_wnd && !sock_flag(meta_sk, SOCK_DEAD) &&
++ !((1 << meta_sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
++ /* Receiver dastardly shrinks window. Our retransmits
++ * become zero probes, but we should not timeout this
++ * connection. If the socket is an orphan, time it out,
++ * we cannot allow such beasts to hang infinitely.
++ */
++ struct inet_sock *meta_inet = inet_sk(meta_sk);
++ if (meta_sk->sk_family == AF_INET) {
++ LIMIT_NETDEBUG(KERN_DEBUG "MPTCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
++ &meta_inet->inet_daddr,
++ ntohs(meta_inet->inet_dport),
++ meta_inet->inet_num, meta_tp->snd_una,
++ meta_tp->snd_nxt);
++ }
++#if IS_ENABLED(CONFIG_IPV6)
++ else if (meta_sk->sk_family == AF_INET6) {
++ LIMIT_NETDEBUG(KERN_DEBUG "MPTCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
++ &meta_sk->sk_v6_daddr,
++ ntohs(meta_inet->inet_dport),
++ meta_inet->inet_num, meta_tp->snd_una,
++ meta_tp->snd_nxt);
++ }
++#endif
++ if (tcp_time_stamp - meta_tp->rcv_tstamp > TCP_RTO_MAX) {
++ tcp_write_err(meta_sk);
++ return;
++ }
++
++ mptcp_retransmit_skb(meta_sk, tcp_write_queue_head(meta_sk));
++ goto out_reset_timer;
++ }
++
++ if (tcp_write_timeout(meta_sk))
++ return;
++
++ if (meta_icsk->icsk_retransmits == 0)
++ NET_INC_STATS_BH(sock_net(meta_sk), LINUX_MIB_TCPTIMEOUTS);
++
++ meta_icsk->icsk_ca_state = TCP_CA_Loss;
++
++ err = mptcp_retransmit_skb(meta_sk, tcp_write_queue_head(meta_sk));
++ if (err > 0) {
++ /* Retransmission failed because of local congestion,
++ * do not backoff.
++ */
++ if (!meta_icsk->icsk_retransmits)
++ meta_icsk->icsk_retransmits = 1;
++ inet_csk_reset_xmit_timer(meta_sk, ICSK_TIME_RETRANS,
++ min(meta_icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
++ TCP_RTO_MAX);
++ return;
++ }
++
++ /* Increase the timeout each time we retransmit. Note that
++ * we do not increase the rtt estimate. rto is initialized
++ * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
++ * that doubling rto each time is the least we can get away with.
++ * In KA9Q, Karn uses this for the first few times, and then
++ * goes to quadratic. netBSD doubles, but only goes up to *64,
++ * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
++ * defined in the protocol as the maximum possible RTT. I guess
++ * we'll have to use something other than TCP to talk to the
++ * University of Mars.
++ *
++ * PAWS allows us longer timeouts and large windows, so once
++ * implemented ftp to mars will work nicely. We will have to fix
++ * the 120 second clamps though!
++ */
++ meta_icsk->icsk_backoff++;
++ meta_icsk->icsk_retransmits++;
++
++out_reset_timer:
++ /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
++ * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
++ * might be increased if the stream oscillates between thin and thick,
++ * thus the old value might already be too high compared to the value
++ * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
++ * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
++ * exponential backoff behaviour to avoid continue hammering
++ * linear-timeout retransmissions into a black hole
++ */
++ if (meta_sk->sk_state == TCP_ESTABLISHED &&
++ (meta_tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
++ tcp_stream_is_thin(meta_tp) &&
++ meta_icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
++ meta_icsk->icsk_backoff = 0;
++ /* We cannot do the same as in tcp_write_timer because the
++ * srtt is not set here.
++ */
++ mptcp_set_rto(meta_sk);
++ } else {
++ /* Use normal (exponential) backoff */
++ meta_icsk->icsk_rto = min(meta_icsk->icsk_rto << 1, TCP_RTO_MAX);
++ }
++ inet_csk_reset_xmit_timer(meta_sk, ICSK_TIME_RETRANS, meta_icsk->icsk_rto, TCP_RTO_MAX);
++
++ return;
++}
++
++/* Modify values to an mptcp-level for the initial window of new subflows */
++void mptcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
++ __u32 *window_clamp, int wscale_ok,
++ __u8 *rcv_wscale, __u32 init_rcv_wnd,
++ const struct sock *sk)
++{
++ struct mptcp_cb *mpcb = tcp_sk(sk)->mpcb;
++
++ *window_clamp = mpcb->orig_window_clamp;
++ __space = tcp_win_from_space(mpcb->orig_sk_rcvbuf);
++
++ tcp_select_initial_window(__space, mss, rcv_wnd, window_clamp,
++ wscale_ok, rcv_wscale, init_rcv_wnd, sk);
++}
++
++unsigned int mptcp_current_mss(struct sock *meta_sk)
++{
++ unsigned int mss = 0;
++ struct sock *sk;
++
++ mptcp_for_each_sk(tcp_sk(meta_sk)->mpcb, sk) {
++ int this_mss;
++
++ if (!mptcp_sk_can_send(sk))
++ continue;
++
++ this_mss = tcp_current_mss(sk);
++ if (this_mss > mss)
++ mss = this_mss;
++ }
++
++ /* If no subflow is available, we take a default-mss from the
++ * meta-socket.
++ */
++ return !mss ? tcp_current_mss(meta_sk) : mss;
++}
++
++int mptcp_select_size(const struct sock *meta_sk, bool sg)
++{
++ int mss = 0; /* We look for the smallest MSS */
++ struct sock *sk;
++
++ mptcp_for_each_sk(tcp_sk(meta_sk)->mpcb, sk) {
++ int this_mss;
++
++ if (!mptcp_sk_can_send(sk))
++ continue;
++
++ this_mss = tcp_sk(sk)->mss_cache;
++ if (this_mss > mss)
++ mss = this_mss;
++ }
++
++ if (sg) {
++ if (mptcp_sk_can_gso(meta_sk)) {
++ mss = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
++ } else {
++ int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
++
++ if (mss >= pgbreak &&
++ mss <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
++ mss = pgbreak;
++ }
++ }
++
++ return !mss ? tcp_sk(meta_sk)->mss_cache : mss;
++}
++
++int mptcp_check_snd_buf(const struct tcp_sock *tp)
++{
++ struct sock *sk;
++ u32 rtt_max = tp->srtt;
++ u64 bw_est;
++
++ if (!tp->srtt)
++ return tp->reordering + 1;
++
++ mptcp_for_each_sk(tp->mpcb, sk) {
++ if (!mptcp_sk_can_send(sk))
++ continue;
++
++ if (rtt_max < tcp_sk(sk)->srtt)
++ rtt_max = tcp_sk(sk)->srtt;
++ }
++
++ bw_est = div64_u64(((u64)tp->snd_cwnd * rtt_max) << 16,
++ (u64)tp->srtt);
++
++ return max_t(unsigned int, (u32)(bw_est >> 16),
++ tp->reordering + 1);
++
++}
++
++unsigned int mptcp_xmit_size_goal(struct sock *meta_sk, u32 mss_now,
++ int large_allowed)
++{
++ struct sock *sk;
++ u32 xmit_size_goal = 0;
++
++ if (large_allowed && mptcp_sk_can_gso(meta_sk)) {
++ mptcp_for_each_sk(tcp_sk(meta_sk)->mpcb, sk) {
++ int this_size_goal;
++
++ if (!mptcp_sk_can_send(sk))
++ continue;
++
++ this_size_goal = tcp_xmit_size_goal(sk, mss_now, 1);
++ if (this_size_goal > xmit_size_goal)
++ xmit_size_goal = this_size_goal;
++ }
++ }
++
++ return max(xmit_size_goal, mss_now);
++}
++
++/* Similar to tcp_trim_head - but we correctly copy the DSS-option */
++int mptcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
++{
++ int dsslen = MPTCP_SUB_LEN_DSS_ALIGN + MPTCP_SUB_LEN_ACK_ALIGN +
++ MPTCP_SUB_LEN_SEQ_ALIGN;
++ char dss[dsslen];
++
++ /* DSS-option must be recovered afterwards. */
++ memcpy(dss, skb->data - dsslen, dsslen);
++
++ if (skb_cloned(skb)) {
++ /* pskb_expand_head will delete our DSS-option. We have to copy
++ * it back if pskb_expand_head succeeds.
++ */
++
++ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
++ return -ENOMEM;
++
++ memcpy(skb->data - dsslen, dss, dsslen);
++ }
++
++ __pskb_trim_head(skb, len);
++
++ /* Put the DSS-option back in our header */
++ memcpy(skb->data - dsslen, dss, dsslen);
++
++ TCP_SKB_CB(skb)->seq += len;
++ skb->ip_summed = CHECKSUM_PARTIAL;
++
++ skb->truesize -= len;
++ sk->sk_wmem_queued -= len;
++ sk_mem_uncharge(sk, len);
++ sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
++
++ /* Any change of skb->len requires recalculation of tso factor. */
++ if (tcp_skb_pcount(skb) > 1)
++ tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
++
++ return 0;
++}
+diff --git a/net/mptcp/mptcp_pm.c b/net/mptcp/mptcp_pm.c
+new file mode 100644
+index 0000000..1f78ae1
+--- /dev/null
++++ b/net/mptcp/mptcp_pm.c
+@@ -0,0 +1,170 @@
++/*
++ * MPTCP implementation - MPTCP-subflow-management
++ *
++ * Initial Design & Implementation:
++ * Sébastien Barré <sebastien.barre@uclouvain.be>
++ *
++ * Current Maintainer & Author:
++ * Christoph Paasch <christoph.paasch@uclouvain.be>
++ *
++ * Additional authors:
++ * Jaakko Korkeaniemi <jaakko.korkeaniemi@aalto.fi>
++ * Gregory Detal <gregory.detal@uclouvain.be>
++ * Fabien Duchêne <fabien.duchene@uclouvain.be>
++ * Andreas Seelinger <Andreas.Seelinger@rwth-aachen.de>
++ * Lavkesh Lahngir <lavkesh51@gmail.com>
++ * Andreas Ripke <ripke@neclab.eu>
++ * Vlad Dogaru <vlad.dogaru@intel.com>
++ * Octavian Purdila <octavian.purdila@intel.com>
++ * John Ronan <jronan@tssg.org>
++ * Catalin Nicutar <catalin.nicutar@gmail.com>
++ * Brandon Heller <brandonh@stanford.edu>
++ *
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++
++#include <linux/module.h>
++#include <net/mptcp.h>
++
++static DEFINE_SPINLOCK(mptcp_pm_list_lock);
++static LIST_HEAD(mptcp_pm_list);
++
++static int mptcp_default_index(sa_family_t family, union inet_addr *addr,
++ struct net *net)
++{
++ return 0;
++}
++
++struct mptcp_pm_ops mptcp_pm_default = {
++ .get_local_index = mptcp_default_index,
++ .get_local_id = mptcp_default_index, /* We do not care */
++ .name = "default",
++ .owner = THIS_MODULE,
++};
++
++static struct mptcp_pm_ops *mptcp_pm_find(const char *name)
++{
++ struct mptcp_pm_ops *e;
++
++ list_for_each_entry_rcu(e, &mptcp_pm_list, list) {
++ if (strcmp(e->name, name) == 0)
++ return e;
++ }
++
++ return NULL;
++}
++
++int mptcp_register_path_manager(struct mptcp_pm_ops *pm)
++{
++ int ret = 0;
++
++ if (!pm->get_local_index || !pm->get_local_id)
++ return -EINVAL;
++
++ spin_lock(&mptcp_pm_list_lock);
++ if (mptcp_pm_find(pm->name)) {
++ pr_notice("%s already registered\n", pm->name);
++ ret = -EEXIST;
++ } else {
++ list_add_tail_rcu(&pm->list, &mptcp_pm_list);
++ pr_info("%s registered\n", pm->name);
++ }
++ spin_unlock(&mptcp_pm_list_lock);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(mptcp_register_path_manager);
++
++void mptcp_unregister_path_manager(struct mptcp_pm_ops *pm)
++{
++ spin_lock(&mptcp_pm_list_lock);
++ list_del_rcu(&pm->list);
++ spin_unlock(&mptcp_pm_list_lock);
++}
++EXPORT_SYMBOL_GPL(mptcp_unregister_path_manager);
++
++void mptcp_get_default_path_manager(char *name)
++{
++ struct mptcp_pm_ops *pm;
++
++ BUG_ON(list_empty(&mptcp_pm_list));
++
++ rcu_read_lock();
++ pm = list_entry(mptcp_pm_list.next, struct mptcp_pm_ops, list);
++ strncpy(name, pm->name, MPTCP_PM_NAME_MAX);
++ rcu_read_unlock();
++}
++
++int mptcp_set_default_path_manager(const char *name)
++{
++ struct mptcp_pm_ops *pm;
++ int ret = -ENOENT;
++
++ spin_lock(&mptcp_pm_list_lock);
++ pm = mptcp_pm_find(name);
++#ifdef CONFIG_MODULES
++ if (!pm && capable(CAP_NET_ADMIN)) {
++ spin_unlock(&mptcp_pm_list_lock);
++
++ request_module("mptcp_%s", name);
++ spin_lock(&mptcp_pm_list_lock);
++ pm = mptcp_pm_find(name);
++ }
++#endif
++
++ if (pm) {
++ list_move(&pm->list, &mptcp_pm_list);
++ ret = 0;
++ } else {
++ pr_info("%s is not available\n", name);
++ }
++ spin_unlock(&mptcp_pm_list_lock);
++
++ return ret;
++}
++
++void mptcp_init_path_manager(struct mptcp_cb *mpcb)
++{
++ struct mptcp_pm_ops *pm;
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(pm, &mptcp_pm_list, list) {
++ if (try_module_get(pm->owner)) {
++ mpcb->pm_ops = pm;
++ break;
++ }
++ }
++ rcu_read_unlock();
++}
++
++/* Manage refcounts on socket close. */
++void mptcp_cleanup_path_manager(struct mptcp_cb *mpcb)
++{
++ module_put(mpcb->pm_ops->owner);
++}
++
++/* Fallback to the default path-manager. */
++void mptcp_fallback_default(struct mptcp_cb *mpcb)
++{
++ struct mptcp_pm_ops *pm;
++
++ mptcp_cleanup_path_manager(mpcb);
++ pm = mptcp_pm_find("default");
++
++ /* Cannot fail - it's the default module */
++ try_module_get(pm->owner);
++ mpcb->pm_ops = pm;
++}
++EXPORT_SYMBOL_GPL(mptcp_fallback_default);
++
++/* Set default value from kernel configuration at bootup */
++static int __init mptcp_path_manager_default(void)
++{
++ return mptcp_set_default_path_manager(CONFIG_DEFAULT_MPTCP_PM);
++}
++late_initcall(mptcp_path_manager_default);
+diff --git a/net/mptcp/mptcp_wvegas.c b/net/mptcp/mptcp_wvegas.c
+new file mode 100644
+index 0000000..8e1fd50
+--- /dev/null
++++ b/net/mptcp/mptcp_wvegas.c
+@@ -0,0 +1,270 @@
++/*
++ * MPTCP implementation - WEIGHTED VEGAS
++ *
++ * Algorithm design:
++ * Yu Cao <cyAnalyst@126.com>
++ * Mingwei Xu <xmw@csnet1.cs.tsinghua.edu.cn>
++ * Xiaoming Fu <fu@cs.uni-goettinggen.de>
++ *
++ * Implementation:
++ * Yu Cao <cyAnalyst@126.com>
++ * Enhuan Dong <deh13@mails.tsinghua.edu.cn>
++ *
++ * Ported to the official MPTCP-kernel:
++ * Christoph Paasch <christoph.paasch@uclouvain.be>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++#include <linux/skbuff.h>
++#include <net/tcp.h>
++#include <net/mptcp.h>
++#include <linux/module.h>
++#include <linux/tcp.h>
++
++static int initial_alpha = 2;
++static int total_alpha = 10;
++static int gamma = 1;
++
++module_param(initial_alpha, int, 0644);
++MODULE_PARM_DESC(initial_alpha, "initial alpha for all subflows");
++module_param(total_alpha, int, 0644);
++MODULE_PARM_DESC(total_alpha, "total alpha for all subflows");
++module_param(gamma, int, 0644);
++MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
++
++#define MPTCP_WVEGAS_SCALE 16
++
++/* wVegas variables */
++struct wvegas {
++ u32 beg_snd_nxt; /* right edge during last RTT */
++ u8 doing_wvegas_now;/* if true, do wvegas for this RTT */
++
++ u16 cnt_rtt; /* # of RTTs measured within last RTT */
++ u32 sampled_rtt; /* cumulative RTTs measured within last RTT (in usec) */
++ u32 base_rtt; /* the min of all wVegas RTT measurements seen (in usec) */
++
++ u64 instant_rate; /* cwnd / srtt_us, unit: pkts/us * 2^16 */
++ u64 weight; /* the ratio of subflow's rate to the total rate, * 2^16 */
++ int alpha; /* alpha for each subflows */
++
++ u32 queue_delay; /* queue delay*/
++};
++
++
++static inline u64 mptcp_wvegas_scale(u32 val, int scale)
++{
++ return (u64) val << scale;
++}
++
++static void wvegas_enable(struct sock *sk)
++{
++ const struct tcp_sock *tp = tcp_sk(sk);
++ struct wvegas *wvegas = inet_csk_ca(sk);
++
++ wvegas->doing_wvegas_now = 1;
++
++ wvegas->beg_snd_nxt = tp->snd_nxt;
++
++ wvegas->cnt_rtt = 0;
++ wvegas->sampled_rtt = 0;
++
++ wvegas->instant_rate = 0;
++ wvegas->alpha = initial_alpha;
++ wvegas->weight = mptcp_wvegas_scale(1, MPTCP_WVEGAS_SCALE);
++
++ wvegas->queue_delay = 0;
++}
++
++static inline void wvegas_disable(struct sock *sk)
++{
++ struct wvegas *wvegas = inet_csk_ca(sk);
++
++ wvegas->doing_wvegas_now = 0;
++}
++
++static void mptcp_wvegas_init(struct sock *sk)
++{
++ struct wvegas *wvegas = inet_csk_ca(sk);
++
++ wvegas->base_rtt = 0x7fffffff;
++ wvegas_enable(sk);
++}
++
++static inline u64 mptcp_wvegas_rate(u32 cwnd, u32 rtt_us)
++{
++ return div_u64(mptcp_wvegas_scale(cwnd, MPTCP_WVEGAS_SCALE), rtt_us);
++}
++
++static void mptcp_wvegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
++{
++ struct wvegas *wvegas = inet_csk_ca(sk);
++ u32 vrtt;
++
++ if (rtt_us < 0)
++ return;
++
++ vrtt = rtt_us + 1;
++
++ if (vrtt < wvegas->base_rtt)
++ wvegas->base_rtt = vrtt;
++
++ wvegas->sampled_rtt += vrtt;
++ wvegas->cnt_rtt++;
++}
++
++static void mptcp_wvegas_state(struct sock *sk, u8 ca_state)
++{
++ if (ca_state == TCP_CA_Open)
++ wvegas_enable(sk);
++ else
++ wvegas_disable(sk);
++}
++
++static void mptcp_wvegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
++{
++ if (event == CA_EVENT_CWND_RESTART) {
++ mptcp_wvegas_init(sk);
++ } else if (event == CA_EVENT_LOSS) {
++ struct wvegas *wvegas = inet_csk_ca(sk);
++ wvegas->instant_rate = 0;
++ }
++}
++
++static inline u32 mptcp_wvegas_ssthresh(struct tcp_sock *tp)
++{
++ return min(tp->snd_ssthresh, tp->snd_cwnd - 1);
++}
++
++static u64 mptcp_wvegas_weight(struct mptcp_cb *mpcb, struct sock *sk)
++{
++ u64 total_rate = 0;
++ struct sock *sub_sk;
++ struct wvegas *wvegas = inet_csk_ca(sk);
++
++ if (!mpcb)
++ return wvegas->weight;
++
++
++ mptcp_for_each_sk(mpcb, sub_sk) {
++ struct wvegas *sub_wvegas = inet_csk_ca(sub_sk);
++
++ /* sampled_rtt is initialized by 0 */
++ if (mptcp_sk_can_send(sub_sk) && (sub_wvegas->sampled_rtt > 0))
++ total_rate += sub_wvegas->instant_rate;
++ }
++
++ if (total_rate && wvegas->instant_rate)
++ return div64_u64(mptcp_wvegas_scale(wvegas->instant_rate, MPTCP_WVEGAS_SCALE), total_rate);
++ else
++ return wvegas->weight;
++}
++
++static void mptcp_wvegas_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
++{
++ struct tcp_sock *tp = tcp_sk(sk);
++ struct wvegas *wvegas = inet_csk_ca(sk);
++
++ if (!wvegas->doing_wvegas_now) {
++ tcp_reno_cong_avoid(sk, ack, acked, in_flight);
++ return;
++ }
++
++ if (after(ack, wvegas->beg_snd_nxt)) {
++ wvegas->beg_snd_nxt = tp->snd_nxt;
++
++ if (wvegas->cnt_rtt <= 2) {
++ tcp_reno_cong_avoid(sk, ack, acked, in_flight);
++ } else {
++ u32 rtt, diff, q_delay;
++ u64 target_cwnd;
++
++ rtt = wvegas->sampled_rtt / wvegas->cnt_rtt;
++ target_cwnd = div_u64(((u64)tp->snd_cwnd * wvegas->base_rtt), rtt);
++
++ diff = div_u64((u64)tp->snd_cwnd * (rtt - wvegas->base_rtt), rtt);
++
++ if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
++ tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
++ tp->snd_ssthresh = mptcp_wvegas_ssthresh(tp);
++
++ } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
++ tcp_slow_start(tp, acked);
++ } else {
++ if (diff >= wvegas->alpha) {
++ wvegas->instant_rate = mptcp_wvegas_rate(tp->snd_cwnd, rtt);
++ wvegas->weight = mptcp_wvegas_weight(tp->mpcb, sk);
++ wvegas->alpha = max(2U, (u32)((wvegas->weight * total_alpha) >> MPTCP_WVEGAS_SCALE));
++ }
++ if (diff > wvegas->alpha) {
++ tp->snd_cwnd--;
++ tp->snd_ssthresh = mptcp_wvegas_ssthresh(tp);
++ } else if (diff < wvegas->alpha) {
++ tp->snd_cwnd++;
++ }
++
++ /* Try to drain link queue if needed*/
++ q_delay = rtt - wvegas->base_rtt;
++ if ((wvegas->queue_delay == 0) || (wvegas->queue_delay > q_delay))
++ wvegas->queue_delay = q_delay;
++
++ if (q_delay >= 2 * wvegas->queue_delay) {
++ u32 backoff_factor = div_u64(mptcp_wvegas_scale(wvegas->base_rtt, MPTCP_WVEGAS_SCALE), 2 * rtt);
++ tp->snd_cwnd = ((u64)tp->snd_cwnd * backoff_factor) >> MPTCP_WVEGAS_SCALE;
++ wvegas->queue_delay = 0;
++ }
++ }
++
++ if (tp->snd_cwnd < 2)
++ tp->snd_cwnd = 2;
++ else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
++ tp->snd_cwnd = tp->snd_cwnd_clamp;
++
++ tp->snd_ssthresh = tcp_current_ssthresh(sk);
++ }
++
++ wvegas->cnt_rtt = 0;
++ wvegas->sampled_rtt = 0;
++ }
++ /* Use normal slow start */
++ else if (tp->snd_cwnd <= tp->snd_ssthresh)
++ tcp_slow_start(tp, acked);
++}
++
++
++static struct tcp_congestion_ops mptcp_wvegas __read_mostly = {
++ .flags = TCP_CONG_RTT_STAMP,
++ .init = mptcp_wvegas_init,
++ .ssthresh = tcp_reno_ssthresh,
++ .cong_avoid = mptcp_wvegas_cong_avoid,
++ .min_cwnd = tcp_reno_min_cwnd,
++ .pkts_acked = mptcp_wvegas_pkts_acked,
++ .set_state = mptcp_wvegas_state,
++ .cwnd_event = mptcp_wvegas_cwnd_event,
++
++ .owner = THIS_MODULE,
++ .name = "wvegas",
++};
++
++static int __init mptcp_wvegas_register(void)
++{
++ BUILD_BUG_ON(sizeof(struct wvegas) > ICSK_CA_PRIV_SIZE);
++ tcp_register_congestion_control(&mptcp_wvegas);
++ return 0;
++}
++
++static void __exit mptcp_wvegas_unregister(void)
++{
++ tcp_unregister_congestion_control(&mptcp_wvegas);
++}
++
++module_init(mptcp_wvegas_register);
++module_exit(mptcp_wvegas_unregister);
++
++MODULE_AUTHOR("Yu Cao, Enhuan Dong");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("MPTCP wVegas");
++MODULE_VERSION("0.1");
diff --git a/target/linux/patches/3.14.43/mtd-rootfs.patch b/target/linux/patches/3.14.43/mtd-rootfs.patch
new file mode 100644
index 000000000..775d5fc80
--- /dev/null
+++ b/target/linux/patches/3.14.43/mtd-rootfs.patch
@@ -0,0 +1,26 @@
+diff -Nur linux-3.5.orig//drivers/mtd/mtdpart.c linux-3.5/drivers/mtd/mtdpart.c
+--- linux-3.5.orig//drivers/mtd/mtdpart.c 2012-07-21 22:58:29.000000000 +0200
++++ linux-3.5/drivers/mtd/mtdpart.c 2012-07-31 23:59:07.000000000 +0200
+@@ -30,6 +30,7 @@
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
+ #include <linux/err.h>
++#include <linux/root_dev.h>
+
+ #include "mtdcore.h"
+
+@@ -637,6 +638,14 @@
+ if (IS_ERR(slave))
+ return PTR_ERR(slave);
+
++ if (strcmp(parts[i].name, "rootfs") == 0) {
++ if (ROOT_DEV == 0) {
++ printk(KERN_NOTICE "mtd: partition \"rootfs\" "
++ "set to be root filesystem\n");
++ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, i);
++ }
++ }
++
+ mutex_lock(&mtd_partitions_mutex);
+ list_add(&slave->list, &mtd_partitions);
+ mutex_unlock(&mtd_partitions_mutex);
diff --git a/target/linux/patches/3.14.43/nfsv3-tcp.patch b/target/linux/patches/3.14.43/nfsv3-tcp.patch
new file mode 100644
index 000000000..68ed95b08
--- /dev/null
+++ b/target/linux/patches/3.14.43/nfsv3-tcp.patch
@@ -0,0 +1,12 @@
+diff -Nur linux-3.13.11.orig/fs/nfs/nfsroot.c linux-3.13.11/fs/nfs/nfsroot.c
+--- linux-3.13.11.orig/fs/nfs/nfsroot.c 2014-04-23 01:49:33.000000000 +0200
++++ linux-3.13.11/fs/nfs/nfsroot.c 2014-05-13 16:09:25.000000000 +0200
+@@ -87,7 +87,7 @@
+ #define NFS_ROOT "/tftpboot/%s"
+
+ /* Default NFSROOT mount options. */
+-#define NFS_DEF_OPTIONS "vers=2,udp,rsize=4096,wsize=4096"
++#define NFS_DEF_OPTIONS "nfsvers=3,proto=tcp,rsize=4096,wsize=4096"
+
+ /* Parameters passed from the kernel command line */
+ static char nfs_root_parms[256] __initdata = "";
diff --git a/target/linux/patches/3.14.43/non-static.patch b/target/linux/patches/3.14.43/non-static.patch
new file mode 100644
index 000000000..a967703d0
--- /dev/null
+++ b/target/linux/patches/3.14.43/non-static.patch
@@ -0,0 +1,33 @@
+diff -Nur linux-2.6.39-rc6.orig/fs/namei.c linux-2.6.39-rc6/fs/namei.c
+--- linux-2.6.39-rc6.orig/fs/namei.c 2011-05-04 04:59:13.000000000 +0200
++++ linux-2.6.39-rc6/fs/namei.c 2011-05-05 11:30:14.000000000 +0200
+@@ -1769,7 +1769,7 @@
+ * needs parent already locked. Doesn't follow mounts.
+ * SMP-safe.
+ */
+-static struct dentry *lookup_hash(struct nameidata *nd)
++struct dentry *lookup_hash(struct nameidata *nd)
+ {
+ return __lookup_hash(&nd->last, nd->path.dentry, nd);
+ }
+diff -Nur linux-2.6.39-rc6.orig/fs/splice.c linux-2.6.39-rc6/fs/splice.c
+--- linux-2.6.39-rc6.orig/fs/splice.c 2011-05-04 04:59:13.000000000 +0200
++++ linux-2.6.39-rc6/fs/splice.c 2011-05-05 11:31:04.000000000 +0200
+@@ -1081,7 +1081,7 @@
+ /*
+ * Attempt to initiate a splice from pipe to file.
+ */
+-static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
++long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
+ {
+ ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
+@@ -1109,7 +1109,7 @@
+ /*
+ * Attempt to initiate a splice from a file to a pipe.
+ */
+-static long do_splice_to(struct file *in, loff_t *ppos,
++long do_splice_to(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+ {
diff --git a/target/linux/patches/3.14.43/ppc64-missing-zlib.patch b/target/linux/patches/3.14.43/ppc64-missing-zlib.patch
new file mode 100644
index 000000000..c6e0616be
--- /dev/null
+++ b/target/linux/patches/3.14.43/ppc64-missing-zlib.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-3.11.5.orig/arch/powerpc/platforms/pseries/Kconfig linux-3.11.5/arch/powerpc/platforms/pseries/Kconfig
+--- linux-3.11.5.orig/arch/powerpc/platforms/pseries/Kconfig 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/arch/powerpc/platforms/pseries/Kconfig 2013-11-01 15:23:09.000000000 +0100
+@@ -17,6 +17,7 @@
+ select PPC_NATIVE
+ select PPC_PCI_CHOICE if EXPERT
+ select ZLIB_DEFLATE
++ select ZLIB_INFLATE
+ select PPC_DOORBELL
+ select HAVE_CONTEXT_TRACKING
+ select HOTPLUG_CPU if SMP
diff --git a/target/linux/patches/3.14.43/regmap-boolean.patch b/target/linux/patches/3.14.43/regmap-boolean.patch
new file mode 100644
index 000000000..d73620e4d
--- /dev/null
+++ b/target/linux/patches/3.14.43/regmap-boolean.patch
@@ -0,0 +1,24 @@
+diff -Nur linux-3.14.4.orig/drivers/base/regmap/Kconfig linux-3.14.4/drivers/base/regmap/Kconfig
+--- linux-3.14.4.orig/drivers/base/regmap/Kconfig 2014-05-13 13:33:14.000000000 +0200
++++ linux-3.14.4/drivers/base/regmap/Kconfig 2014-05-21 13:46:48.928985115 +0200
+@@ -10,16 +10,16 @@
+ bool
+
+ config REGMAP_I2C
+- tristate
++ boolean
+
+ config REGMAP_SPI
+- tristate
++ boolean
+
+ config REGMAP_SPMI
+- tristate
++ boolean
+
+ config REGMAP_MMIO
+- tristate
++ boolean
+
+ config REGMAP_IRQ
+ bool
diff --git a/target/linux/patches/3.14.43/relocs.patch b/target/linux/patches/3.14.43/relocs.patch
new file mode 100644
index 000000000..69a7c88a9
--- /dev/null
+++ b/target/linux/patches/3.14.43/relocs.patch
@@ -0,0 +1,2709 @@
+diff -Nur linux-3.13.6.orig/arch/x86/tools/relocs.c linux-3.13.6/arch/x86/tools/relocs.c
+--- linux-3.13.6.orig/arch/x86/tools/relocs.c 2014-03-07 07:07:02.000000000 +0100
++++ linux-3.13.6/arch/x86/tools/relocs.c 2014-03-15 19:39:45.000000000 +0100
+@@ -126,6 +126,7 @@
+
+ if (err) {
+ regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf);
++ printf("foo: %s\n", sym_regex[i]);
+ die("%s", errbuf);
+ }
+ }
+diff -Nur linux-3.13.6.orig/arch/x86/tools/relocs.h linux-3.13.6/arch/x86/tools/relocs.h
+--- linux-3.13.6.orig/arch/x86/tools/relocs.h 2014-03-07 07:07:02.000000000 +0100
++++ linux-3.13.6/arch/x86/tools/relocs.h 2014-03-15 18:48:40.000000000 +0100
+@@ -9,11 +9,19 @@
+ #include <string.h>
+ #include <errno.h>
+ #include <unistd.h>
++#ifdef __linux__
+ #include <elf.h>
+ #include <byteswap.h>
+ #define USE_BSD
+ #include <endian.h>
++#else
++#include "elf.h"
++#endif
++#ifdef __APPLE__
++#include <pcreposix.h>
++#else
+ #include <regex.h>
++#endif
+ #include <tools/le_byteshift.h>
+
+ void die(char *fmt, ...);
+diff -Nur linux-3.13.6.orig/tools/include/elf.h linux-3.13.6/tools/include/elf.h
+--- linux-3.13.6.orig/tools/include/elf.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.13.6/tools/include/elf.h 2014-03-15 18:47:36.000000000 +0100
+@@ -0,0 +1,2671 @@
++#ifndef _ELF_H
++#define _ELF_H
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include <stdint.h>
++
++typedef uint16_t Elf32_Half;
++typedef uint16_t Elf64_Half;
++
++typedef uint32_t Elf32_Word;
++typedef int32_t Elf32_Sword;
++typedef uint32_t Elf64_Word;
++typedef int32_t Elf64_Sword;
++
++typedef uint64_t Elf32_Xword;
++typedef int64_t Elf32_Sxword;
++typedef uint64_t Elf64_Xword;
++typedef int64_t Elf64_Sxword;
++
++typedef uint32_t Elf32_Addr;
++typedef uint64_t Elf64_Addr;
++
++typedef uint32_t Elf32_Off;
++typedef uint64_t Elf64_Off;
++
++typedef uint16_t Elf32_Section;
++typedef uint16_t Elf64_Section;
++
++typedef Elf32_Half Elf32_Versym;
++typedef Elf64_Half Elf64_Versym;
++
++#define EI_NIDENT (16)
++
++typedef struct {
++ unsigned char e_ident[EI_NIDENT];
++ Elf32_Half e_type;
++ Elf32_Half e_machine;
++ Elf32_Word e_version;
++ Elf32_Addr e_entry;
++ Elf32_Off e_phoff;
++ Elf32_Off e_shoff;
++ Elf32_Word e_flags;
++ Elf32_Half e_ehsize;
++ Elf32_Half e_phentsize;
++ Elf32_Half e_phnum;
++ Elf32_Half e_shentsize;
++ Elf32_Half e_shnum;
++ Elf32_Half e_shstrndx;
++} Elf32_Ehdr;
++
++typedef struct {
++ unsigned char e_ident[EI_NIDENT];
++ Elf64_Half e_type;
++ Elf64_Half e_machine;
++ Elf64_Word e_version;
++ Elf64_Addr e_entry;
++ Elf64_Off e_phoff;
++ Elf64_Off e_shoff;
++ Elf64_Word e_flags;
++ Elf64_Half e_ehsize;
++ Elf64_Half e_phentsize;
++ Elf64_Half e_phnum;
++ Elf64_Half e_shentsize;
++ Elf64_Half e_shnum;
++ Elf64_Half e_shstrndx;
++} Elf64_Ehdr;
++
++#define EI_MAG0 0
++#define ELFMAG0 0x7f
++
++#define EI_MAG1 1
++#define ELFMAG1 'E'
++
++#define EI_MAG2 2
++#define ELFMAG2 'L'
++
++#define EI_MAG3 3
++#define ELFMAG3 'F'
++
++
++#define ELFMAG "\177ELF"
++#define SELFMAG 4
++
++#define EI_CLASS 4
++#define ELFCLASSNONE 0
++#define ELFCLASS32 1
++#define ELFCLASS64 2
++#define ELFCLASSNUM 3
++
++#define EI_DATA 5
++#define ELFDATANONE 0
++#define ELFDATA2LSB 1
++#define ELFDATA2MSB 2
++#define ELFDATANUM 3
++
++#define EI_VERSION 6
++
++
++#define EI_OSABI 7
++#define ELFOSABI_NONE 0
++#define ELFOSABI_SYSV 0
++#define ELFOSABI_HPUX 1
++#define ELFOSABI_NETBSD 2
++#define ELFOSABI_LINUX 3
++#define ELFOSABI_GNU 3
++#define ELFOSABI_SOLARIS 6
++#define ELFOSABI_AIX 7
++#define ELFOSABI_IRIX 8
++#define ELFOSABI_FREEBSD 9
++#define ELFOSABI_TRU64 10
++#define ELFOSABI_MODESTO 11
++#define ELFOSABI_OPENBSD 12
++#define ELFOSABI_ARM 97
++#define ELFOSABI_STANDALONE 255
++
++#define EI_ABIVERSION 8
++
++#define EI_PAD 9
++
++
++
++#define ET_NONE 0
++#define ET_REL 1
++#define ET_EXEC 2
++#define ET_DYN 3
++#define ET_CORE 4
++#define ET_NUM 5
++#define ET_LOOS 0xfe00
++#define ET_HIOS 0xfeff
++#define ET_LOPROC 0xff00
++#define ET_HIPROC 0xffff
++
++
++
++#define EM_NONE 0
++#define EM_M32 1
++#define EM_SPARC 2
++#define EM_386 3
++#define EM_68K 4
++#define EM_88K 5
++#define EM_860 7
++#define EM_MIPS 8
++#define EM_S370 9
++#define EM_MIPS_RS3_LE 10
++
++#define EM_PARISC 15
++#define EM_VPP500 17
++#define EM_SPARC32PLUS 18
++#define EM_960 19
++#define EM_PPC 20
++#define EM_PPC64 21
++#define EM_S390 22
++
++#define EM_V800 36
++#define EM_FR20 37
++#define EM_RH32 38
++#define EM_RCE 39
++#define EM_ARM 40
++#define EM_FAKE_ALPHA 41
++#define EM_SH 42
++#define EM_SPARCV9 43
++#define EM_TRICORE 44
++#define EM_ARC 45
++#define EM_H8_300 46
++#define EM_H8_300H 47
++#define EM_H8S 48
++#define EM_H8_500 49
++#define EM_IA_64 50
++#define EM_MIPS_X 51
++#define EM_COLDFIRE 52
++#define EM_68HC12 53
++#define EM_MMA 54
++#define EM_PCP 55
++#define EM_NCPU 56
++#define EM_NDR1 57
++#define EM_STARCORE 58
++#define EM_ME16 59
++#define EM_ST100 60
++#define EM_TINYJ 61
++#define EM_X86_64 62
++#define EM_PDSP 63
++
++#define EM_FX66 66
++#define EM_ST9PLUS 67
++#define EM_ST7 68
++#define EM_68HC16 69
++#define EM_68HC11 70
++#define EM_68HC08 71
++#define EM_68HC05 72
++#define EM_SVX 73
++#define EM_ST19 74
++#define EM_VAX 75
++#define EM_CRIS 76
++#define EM_JAVELIN 77
++#define EM_FIREPATH 78
++#define EM_ZSP 79
++#define EM_MMIX 80
++#define EM_HUANY 81
++#define EM_PRISM 82
++#define EM_AVR 83
++#define EM_FR30 84
++#define EM_D10V 85
++#define EM_D30V 86
++#define EM_V850 87
++#define EM_M32R 88
++#define EM_MN10300 89
++#define EM_MN10200 90
++#define EM_PJ 91
++#define EM_OPENRISC 92
++#define EM_ARC_A5 93
++#define EM_XTENSA 94
++#define EM_AARCH64 183
++#define EM_TILEPRO 188
++#define EM_MICROBLAZE 189
++#define EM_TILEGX 191
++#define EM_NUM 192
++#define EM_ALPHA 0x9026
++
++#define EV_NONE 0
++#define EV_CURRENT 1
++#define EV_NUM 2
++
++typedef struct {
++ Elf32_Word sh_name;
++ Elf32_Word sh_type;
++ Elf32_Word sh_flags;
++ Elf32_Addr sh_addr;
++ Elf32_Off sh_offset;
++ Elf32_Word sh_size;
++ Elf32_Word sh_link;
++ Elf32_Word sh_info;
++ Elf32_Word sh_addralign;
++ Elf32_Word sh_entsize;
++} Elf32_Shdr;
++
++typedef struct {
++ Elf64_Word sh_name;
++ Elf64_Word sh_type;
++ Elf64_Xword sh_flags;
++ Elf64_Addr sh_addr;
++ Elf64_Off sh_offset;
++ Elf64_Xword sh_size;
++ Elf64_Word sh_link;
++ Elf64_Word sh_info;
++ Elf64_Xword sh_addralign;
++ Elf64_Xword sh_entsize;
++} Elf64_Shdr;
++
++
++
++#define SHN_UNDEF 0
++#define SHN_LORESERVE 0xff00
++#define SHN_LOPROC 0xff00
++#define SHN_BEFORE 0xff00
++
++#define SHN_AFTER 0xff01
++
++#define SHN_HIPROC 0xff1f
++#define SHN_LOOS 0xff20
++#define SHN_HIOS 0xff3f
++#define SHN_ABS 0xfff1
++#define SHN_COMMON 0xfff2
++#define SHN_XINDEX 0xffff
++#define SHN_HIRESERVE 0xffff
++
++
++
++#define SHT_NULL 0
++#define SHT_PROGBITS 1
++#define SHT_SYMTAB 2
++#define SHT_STRTAB 3
++#define SHT_RELA 4
++#define SHT_HASH 5
++#define SHT_DYNAMIC 6
++#define SHT_NOTE 7
++#define SHT_NOBITS 8
++#define SHT_REL 9
++#define SHT_SHLIB 10
++#define SHT_DYNSYM 11
++#define SHT_INIT_ARRAY 14
++#define SHT_FINI_ARRAY 15
++#define SHT_PREINIT_ARRAY 16
++#define SHT_GROUP 17
++#define SHT_SYMTAB_SHNDX 18
++#define SHT_NUM 19
++#define SHT_LOOS 0x60000000
++#define SHT_GNU_ATTRIBUTES 0x6ffffff5
++#define SHT_GNU_HASH 0x6ffffff6
++#define SHT_GNU_LIBLIST 0x6ffffff7
++#define SHT_CHECKSUM 0x6ffffff8
++#define SHT_LOSUNW 0x6ffffffa
++#define SHT_SUNW_move 0x6ffffffa
++#define SHT_SUNW_COMDAT 0x6ffffffb
++#define SHT_SUNW_syminfo 0x6ffffffc
++#define SHT_GNU_verdef 0x6ffffffd
++#define SHT_GNU_verneed 0x6ffffffe
++#define SHT_GNU_versym 0x6fffffff
++#define SHT_HISUNW 0x6fffffff
++#define SHT_HIOS 0x6fffffff
++#define SHT_LOPROC 0x70000000
++#define SHT_HIPROC 0x7fffffff
++#define SHT_LOUSER 0x80000000
++#define SHT_HIUSER 0x8fffffff
++
++#define SHF_WRITE (1 << 0)
++#define SHF_ALLOC (1 << 1)
++#define SHF_EXECINSTR (1 << 2)
++#define SHF_MERGE (1 << 4)
++#define SHF_STRINGS (1 << 5)
++#define SHF_INFO_LINK (1 << 6)
++#define SHF_LINK_ORDER (1 << 7)
++#define SHF_OS_NONCONFORMING (1 << 8)
++
++#define SHF_GROUP (1 << 9)
++#define SHF_TLS (1 << 10)
++#define SHF_MASKOS 0x0ff00000
++#define SHF_MASKPROC 0xf0000000
++#define SHF_ORDERED (1 << 30)
++#define SHF_EXCLUDE (1 << 31)
++
++#define GRP_COMDAT 0x1
++
++typedef struct {
++ Elf32_Word st_name;
++ Elf32_Addr st_value;
++ Elf32_Word st_size;
++ unsigned char st_info;
++ unsigned char st_other;
++ Elf32_Section st_shndx;
++} Elf32_Sym;
++
++typedef struct {
++ Elf64_Word st_name;
++ unsigned char st_info;
++ unsigned char st_other;
++ Elf64_Section st_shndx;
++ Elf64_Addr st_value;
++ Elf64_Xword st_size;
++} Elf64_Sym;
++
++typedef struct {
++ Elf32_Half si_boundto;
++ Elf32_Half si_flags;
++} Elf32_Syminfo;
++
++typedef struct {
++ Elf64_Half si_boundto;
++ Elf64_Half si_flags;
++} Elf64_Syminfo;
++
++#define SYMINFO_BT_SELF 0xffff
++#define SYMINFO_BT_PARENT 0xfffe
++#define SYMINFO_BT_LOWRESERVE 0xff00
++
++#define SYMINFO_FLG_DIRECT 0x0001
++#define SYMINFO_FLG_PASSTHRU 0x0002
++#define SYMINFO_FLG_COPY 0x0004
++#define SYMINFO_FLG_LAZYLOAD 0x0008
++
++#define SYMINFO_NONE 0
++#define SYMINFO_CURRENT 1
++#define SYMINFO_NUM 2
++
++#define ELF32_ST_BIND(val) (((unsigned char) (val)) >> 4)
++#define ELF32_ST_TYPE(val) ((val) & 0xf)
++#define ELF32_ST_INFO(bind, type) (((bind) << 4) + ((type) & 0xf))
++
++#define ELF64_ST_BIND(val) ELF32_ST_BIND (val)
++#define ELF64_ST_TYPE(val) ELF32_ST_TYPE (val)
++#define ELF64_ST_INFO(bind, type) ELF32_ST_INFO ((bind), (type))
++
++#define STB_LOCAL 0
++#define STB_GLOBAL 1
++#define STB_WEAK 2
++#define STB_NUM 3
++#define STB_LOOS 10
++#define STB_GNU_UNIQUE 10
++#define STB_HIOS 12
++#define STB_LOPROC 13
++#define STB_HIPROC 15
++
++#define STT_NOTYPE 0
++#define STT_OBJECT 1
++#define STT_FUNC 2
++#define STT_SECTION 3
++#define STT_FILE 4
++#define STT_COMMON 5
++#define STT_TLS 6
++#define STT_NUM 7
++#define STT_LOOS 10
++#define STT_GNU_IFUNC 10
++#define STT_HIOS 12
++#define STT_LOPROC 13
++#define STT_HIPROC 15
++
++#define STN_UNDEF 0
++
++#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
++#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
++
++#define STV_DEFAULT 0
++#define STV_INTERNAL 1
++#define STV_HIDDEN 2
++#define STV_PROTECTED 3
++
++
++
++
++typedef struct
++{
++ Elf32_Addr r_offset;
++ Elf32_Word r_info;
++} Elf32_Rel;
++
++typedef struct {
++ Elf64_Addr r_offset;
++ Elf64_Xword r_info;
++} Elf64_Rel;
++
++
++
++typedef struct {
++ Elf32_Addr r_offset;
++ Elf32_Word r_info;
++ Elf32_Sword r_addend;
++} Elf32_Rela;
++
++typedef struct {
++ Elf64_Addr r_offset;
++ Elf64_Xword r_info;
++ Elf64_Sxword r_addend;
++} Elf64_Rela;
++
++
++
++#define ELF32_R_SYM(val) ((val) >> 8)
++#define ELF32_R_TYPE(val) ((val) & 0xff)
++#define ELF32_R_INFO(sym, type) (((sym) << 8) + ((type) & 0xff))
++
++#define ELF64_R_SYM(i) ((i) >> 32)
++#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
++#define ELF64_R_INFO(sym,type) ((((Elf64_Xword) (sym)) << 32) + (type))
++
++
++
++typedef struct {
++ Elf32_Word p_type;
++ Elf32_Off p_offset;
++ Elf32_Addr p_vaddr;
++ Elf32_Addr p_paddr;
++ Elf32_Word p_filesz;
++ Elf32_Word p_memsz;
++ Elf32_Word p_flags;
++ Elf32_Word p_align;
++} Elf32_Phdr;
++
++typedef struct {
++ Elf64_Word p_type;
++ Elf64_Word p_flags;
++ Elf64_Off p_offset;
++ Elf64_Addr p_vaddr;
++ Elf64_Addr p_paddr;
++ Elf64_Xword p_filesz;
++ Elf64_Xword p_memsz;
++ Elf64_Xword p_align;
++} Elf64_Phdr;
++
++
++
++#define PT_NULL 0
++#define PT_LOAD 1
++#define PT_DYNAMIC 2
++#define PT_INTERP 3
++#define PT_NOTE 4
++#define PT_SHLIB 5
++#define PT_PHDR 6
++#define PT_TLS 7
++#define PT_NUM 8
++#define PT_LOOS 0x60000000
++#define PT_GNU_EH_FRAME 0x6474e550
++#define PT_GNU_STACK 0x6474e551
++#define PT_GNU_RELRO 0x6474e552
++#define PT_LOSUNW 0x6ffffffa
++#define PT_SUNWBSS 0x6ffffffa
++#define PT_SUNWSTACK 0x6ffffffb
++#define PT_HISUNW 0x6fffffff
++#define PT_HIOS 0x6fffffff
++#define PT_LOPROC 0x70000000
++#define PT_HIPROC 0x7fffffff
++
++
++#define PN_XNUM 0xffff
++
++
++#define PF_X (1 << 0)
++#define PF_W (1 << 1)
++#define PF_R (1 << 2)
++#define PF_MASKOS 0x0ff00000
++#define PF_MASKPROC 0xf0000000
++
++
++
++#define NT_PRSTATUS 1
++#define NT_FPREGSET 2
++#define NT_PRPSINFO 3
++#define NT_PRXREG 4
++#define NT_TASKSTRUCT 4
++#define NT_PLATFORM 5
++#define NT_AUXV 6
++#define NT_GWINDOWS 7
++#define NT_ASRS 8
++#define NT_PSTATUS 10
++#define NT_PSINFO 13
++#define NT_PRCRED 14
++#define NT_UTSNAME 15
++#define NT_LWPSTATUS 16
++#define NT_LWPSINFO 17
++#define NT_PRFPXREG 20
++#define NT_SIGINFO 0x53494749
++#define NT_FILE 0x46494c45
++#define NT_PRXFPREG 0x46e62b7f
++#define NT_PPC_VMX 0x100
++#define NT_PPC_SPE 0x101
++#define NT_PPC_VSX 0x102
++#define NT_386_TLS 0x200
++#define NT_386_IOPERM 0x201
++#define NT_X86_XSTATE 0x202
++#define NT_S390_HIGH_GPRS 0x300
++#define NT_S390_TIMER 0x301
++#define NT_S390_TODCMP 0x302
++#define NT_S390_TODPREG 0x303
++#define NT_S390_CTRS 0x304
++#define NT_S390_PREFIX 0x305
++#define NT_S390_LAST_BREAK 0x306
++#define NT_S390_SYSTEM_CALL 0x307
++#define NT_S390_TDB 0x308
++#define NT_ARM_VFP 0x400
++#define NT_ARM_TLS 0x401
++#define NT_ARM_HW_BREAK 0x402
++#define NT_ARM_HW_WATCH 0x403
++#define NT_METAG_CBUF 0x500
++#define NT_METAG_RPIPE 0x501
++#define NT_METAG_TLS 0x502
++#define NT_VERSION 1
++
++
++
++
++typedef struct {
++ Elf32_Sword d_tag;
++ union {
++ Elf32_Word d_val;
++ Elf32_Addr d_ptr;
++ } d_un;
++} Elf32_Dyn;
++
++typedef struct {
++ Elf64_Sxword d_tag;
++ union {
++ Elf64_Xword d_val;
++ Elf64_Addr d_ptr;
++ } d_un;
++} Elf64_Dyn;
++
++
++
++#define DT_NULL 0
++#define DT_NEEDED 1
++#define DT_PLTRELSZ 2
++#define DT_PLTGOT 3
++#define DT_HASH 4
++#define DT_STRTAB 5
++#define DT_SYMTAB 6
++#define DT_RELA 7
++#define DT_RELASZ 8
++#define DT_RELAENT 9
++#define DT_STRSZ 10
++#define DT_SYMENT 11
++#define DT_INIT 12
++#define DT_FINI 13
++#define DT_SONAME 14
++#define DT_RPATH 15
++#define DT_SYMBOLIC 16
++#define DT_REL 17
++#define DT_RELSZ 18
++#define DT_RELENT 19
++#define DT_PLTREL 20
++#define DT_DEBUG 21
++#define DT_TEXTREL 22
++#define DT_JMPREL 23
++#define DT_BIND_NOW 24
++#define DT_INIT_ARRAY 25
++#define DT_FINI_ARRAY 26
++#define DT_INIT_ARRAYSZ 27
++#define DT_FINI_ARRAYSZ 28
++#define DT_RUNPATH 29
++#define DT_FLAGS 30
++#define DT_ENCODING 32
++#define DT_PREINIT_ARRAY 32
++#define DT_PREINIT_ARRAYSZ 33
++#define DT_NUM 34
++#define DT_LOOS 0x6000000d
++#define DT_HIOS 0x6ffff000
++#define DT_LOPROC 0x70000000
++#define DT_HIPROC 0x7fffffff
++#define DT_PROCNUM DT_MIPS_NUM
++
++#define DT_VALRNGLO 0x6ffffd00
++#define DT_GNU_PRELINKED 0x6ffffdf5
++#define DT_GNU_CONFLICTSZ 0x6ffffdf6
++#define DT_GNU_LIBLISTSZ 0x6ffffdf7
++#define DT_CHECKSUM 0x6ffffdf8
++#define DT_PLTPADSZ 0x6ffffdf9
++#define DT_MOVEENT 0x6ffffdfa
++#define DT_MOVESZ 0x6ffffdfb
++#define DT_FEATURE_1 0x6ffffdfc
++#define DT_POSFLAG_1 0x6ffffdfd
++
++#define DT_SYMINSZ 0x6ffffdfe
++#define DT_SYMINENT 0x6ffffdff
++#define DT_VALRNGHI 0x6ffffdff
++#define DT_VALTAGIDX(tag) (DT_VALRNGHI - (tag))
++#define DT_VALNUM 12
++
++#define DT_ADDRRNGLO 0x6ffffe00
++#define DT_GNU_HASH 0x6ffffef5
++#define DT_TLSDESC_PLT 0x6ffffef6
++#define DT_TLSDESC_GOT 0x6ffffef7
++#define DT_GNU_CONFLICT 0x6ffffef8
++#define DT_GNU_LIBLIST 0x6ffffef9
++#define DT_CONFIG 0x6ffffefa
++#define DT_DEPAUDIT 0x6ffffefb
++#define DT_AUDIT 0x6ffffefc
++#define DT_PLTPAD 0x6ffffefd
++#define DT_MOVETAB 0x6ffffefe
++#define DT_SYMINFO 0x6ffffeff
++#define DT_ADDRRNGHI 0x6ffffeff
++#define DT_ADDRTAGIDX(tag) (DT_ADDRRNGHI - (tag))
++#define DT_ADDRNUM 11
++
++
++
++#define DT_VERSYM 0x6ffffff0
++
++#define DT_RELACOUNT 0x6ffffff9
++#define DT_RELCOUNT 0x6ffffffa
++
++
++#define DT_FLAGS_1 0x6ffffffb
++#define DT_VERDEF 0x6ffffffc
++
++#define DT_VERDEFNUM 0x6ffffffd
++#define DT_VERNEED 0x6ffffffe
++
++#define DT_VERNEEDNUM 0x6fffffff
++#define DT_VERSIONTAGIDX(tag) (DT_VERNEEDNUM - (tag))
++#define DT_VERSIONTAGNUM 16
++
++
++
++#define DT_AUXILIARY 0x7ffffffd
++#define DT_FILTER 0x7fffffff
++#define DT_EXTRATAGIDX(tag) ((Elf32_Word)-((Elf32_Sword) (tag) <<1>>1)-1)
++#define DT_EXTRANUM 3
++
++
++#define DF_ORIGIN 0x00000001
++#define DF_SYMBOLIC 0x00000002
++#define DF_TEXTREL 0x00000004
++#define DF_BIND_NOW 0x00000008
++#define DF_STATIC_TLS 0x00000010
++
++
++
++#define DF_1_NOW 0x00000001
++#define DF_1_GLOBAL 0x00000002
++#define DF_1_GROUP 0x00000004
++#define DF_1_NODELETE 0x00000008
++#define DF_1_LOADFLTR 0x00000010
++#define DF_1_INITFIRST 0x00000020
++#define DF_1_NOOPEN 0x00000040
++#define DF_1_ORIGIN 0x00000080
++#define DF_1_DIRECT 0x00000100
++#define DF_1_TRANS 0x00000200
++#define DF_1_INTERPOSE 0x00000400
++#define DF_1_NODEFLIB 0x00000800
++#define DF_1_NODUMP 0x00001000
++#define DF_1_CONFALT 0x00002000
++#define DF_1_ENDFILTEE 0x00004000
++#define DF_1_DISPRELDNE 0x00008000
++#define DF_1_DISPRELPND 0x00010000
++#define DF_1_NODIRECT 0x00020000
++#define DF_1_IGNMULDEF 0x00040000
++#define DF_1_NOKSYMS 0x00080000
++#define DF_1_NOHDR 0x00100000
++#define DF_1_EDITED 0x00200000
++#define DF_1_NORELOC 0x00400000
++#define DF_1_SYMINTPOSE 0x00800000
++#define DF_1_GLOBAUDIT 0x01000000
++#define DF_1_SINGLETON 0x02000000
++
++#define DTF_1_PARINIT 0x00000001
++#define DTF_1_CONFEXP 0x00000002
++
++
++#define DF_P1_LAZYLOAD 0x00000001
++#define DF_P1_GROUPPERM 0x00000002
++
++
++
++
++typedef struct {
++ Elf32_Half vd_version;
++ Elf32_Half vd_flags;
++ Elf32_Half vd_ndx;
++ Elf32_Half vd_cnt;
++ Elf32_Word vd_hash;
++ Elf32_Word vd_aux;
++ Elf32_Word vd_next;
++} Elf32_Verdef;
++
++typedef struct {
++ Elf64_Half vd_version;
++ Elf64_Half vd_flags;
++ Elf64_Half vd_ndx;
++ Elf64_Half vd_cnt;
++ Elf64_Word vd_hash;
++ Elf64_Word vd_aux;
++ Elf64_Word vd_next;
++} Elf64_Verdef;
++
++
++
++#define VER_DEF_NONE 0
++#define VER_DEF_CURRENT 1
++#define VER_DEF_NUM 2
++
++
++#define VER_FLG_BASE 0x1
++#define VER_FLG_WEAK 0x2
++
++
++#define VER_NDX_LOCAL 0
++#define VER_NDX_GLOBAL 1
++#define VER_NDX_LORESERVE 0xff00
++#define VER_NDX_ELIMINATE 0xff01
++
++
++
++typedef struct {
++ Elf32_Word vda_name;
++ Elf32_Word vda_next;
++} Elf32_Verdaux;
++
++typedef struct {
++ Elf64_Word vda_name;
++ Elf64_Word vda_next;
++} Elf64_Verdaux;
++
++
++
++
++typedef struct {
++ Elf32_Half vn_version;
++ Elf32_Half vn_cnt;
++ Elf32_Word vn_file;
++ Elf32_Word vn_aux;
++ Elf32_Word vn_next;
++} Elf32_Verneed;
++
++typedef struct {
++ Elf64_Half vn_version;
++ Elf64_Half vn_cnt;
++ Elf64_Word vn_file;
++ Elf64_Word vn_aux;
++ Elf64_Word vn_next;
++} Elf64_Verneed;
++
++
++
++#define VER_NEED_NONE 0
++#define VER_NEED_CURRENT 1
++#define VER_NEED_NUM 2
++
++
++
++typedef struct {
++ Elf32_Word vna_hash;
++ Elf32_Half vna_flags;
++ Elf32_Half vna_other;
++ Elf32_Word vna_name;
++ Elf32_Word vna_next;
++} Elf32_Vernaux;
++
++typedef struct {
++ Elf64_Word vna_hash;
++ Elf64_Half vna_flags;
++ Elf64_Half vna_other;
++ Elf64_Word vna_name;
++ Elf64_Word vna_next;
++} Elf64_Vernaux;
++
++
++
++#define VER_FLG_WEAK 0x2
++
++
++
++typedef struct {
++ uint32_t a_type;
++ union {
++ uint32_t a_val;
++ } a_un;
++} Elf32_auxv_t;
++
++typedef struct {
++ uint64_t a_type;
++ union {
++ uint64_t a_val;
++ } a_un;
++} Elf64_auxv_t;
++
++
++
++#define AT_NULL 0
++#define AT_IGNORE 1
++#define AT_EXECFD 2
++#define AT_PHDR 3
++#define AT_PHENT 4
++#define AT_PHNUM 5
++#define AT_PAGESZ 6
++#define AT_BASE 7
++#define AT_FLAGS 8
++#define AT_ENTRY 9
++#define AT_NOTELF 10
++#define AT_UID 11
++#define AT_EUID 12
++#define AT_GID 13
++#define AT_EGID 14
++#define AT_CLKTCK 17
++
++
++#define AT_PLATFORM 15
++#define AT_HWCAP 16
++
++
++
++
++#define AT_FPUCW 18
++
++
++#define AT_DCACHEBSIZE 19
++#define AT_ICACHEBSIZE 20
++#define AT_UCACHEBSIZE 21
++
++
++
++#define AT_IGNOREPPC 22
++
++#define AT_SECURE 23
++
++#define AT_BASE_PLATFORM 24
++
++#define AT_RANDOM 25
++
++#define AT_HWCAP2 26
++
++#define AT_EXECFN 31
++
++
++
++#define AT_SYSINFO 32
++#define AT_SYSINFO_EHDR 33
++
++
++
++#define AT_L1I_CACHESHAPE 34
++#define AT_L1D_CACHESHAPE 35
++#define AT_L2_CACHESHAPE 36
++#define AT_L3_CACHESHAPE 37
++
++
++
++
++typedef struct {
++ Elf32_Word n_namesz;
++ Elf32_Word n_descsz;
++ Elf32_Word n_type;
++} Elf32_Nhdr;
++
++typedef struct {
++ Elf64_Word n_namesz;
++ Elf64_Word n_descsz;
++ Elf64_Word n_type;
++} Elf64_Nhdr;
++
++
++
++
++#define ELF_NOTE_SOLARIS "SUNW Solaris"
++
++
++#define ELF_NOTE_GNU "GNU"
++
++
++
++
++
++#define ELF_NOTE_PAGESIZE_HINT 1
++
++
++#define NT_GNU_ABI_TAG 1
++#define ELF_NOTE_ABI NT_GNU_ABI_TAG
++
++
++
++#define ELF_NOTE_OS_LINUX 0
++#define ELF_NOTE_OS_GNU 1
++#define ELF_NOTE_OS_SOLARIS2 2
++#define ELF_NOTE_OS_FREEBSD 3
++
++#define NT_GNU_BUILD_ID 3
++#define NT_GNU_GOLD_VERSION 4
++
++
++
++typedef struct {
++ Elf32_Xword m_value;
++ Elf32_Word m_info;
++ Elf32_Word m_poffset;
++ Elf32_Half m_repeat;
++ Elf32_Half m_stride;
++} Elf32_Move;
++
++typedef struct {
++ Elf64_Xword m_value;
++ Elf64_Xword m_info;
++ Elf64_Xword m_poffset;
++ Elf64_Half m_repeat;
++ Elf64_Half m_stride;
++} Elf64_Move;
++
++
++#define ELF32_M_SYM(info) ((info) >> 8)
++#define ELF32_M_SIZE(info) ((unsigned char) (info))
++#define ELF32_M_INFO(sym, size) (((sym) << 8) + (unsigned char) (size))
++
++#define ELF64_M_SYM(info) ELF32_M_SYM (info)
++#define ELF64_M_SIZE(info) ELF32_M_SIZE (info)
++#define ELF64_M_INFO(sym, size) ELF32_M_INFO (sym, size)
++
++#define EF_CPU32 0x00810000
++
++#define R_68K_NONE 0
++#define R_68K_32 1
++#define R_68K_16 2
++#define R_68K_8 3
++#define R_68K_PC32 4
++#define R_68K_PC16 5
++#define R_68K_PC8 6
++#define R_68K_GOT32 7
++#define R_68K_GOT16 8
++#define R_68K_GOT8 9
++#define R_68K_GOT32O 10
++#define R_68K_GOT16O 11
++#define R_68K_GOT8O 12
++#define R_68K_PLT32 13
++#define R_68K_PLT16 14
++#define R_68K_PLT8 15
++#define R_68K_PLT32O 16
++#define R_68K_PLT16O 17
++#define R_68K_PLT8O 18
++#define R_68K_COPY 19
++#define R_68K_GLOB_DAT 20
++#define R_68K_JMP_SLOT 21
++#define R_68K_RELATIVE 22
++#define R_68K_NUM 23
++
++#define R_386_NONE 0
++#define R_386_32 1
++#define R_386_PC32 2
++#define R_386_GOT32 3
++#define R_386_PLT32 4
++#define R_386_COPY 5
++#define R_386_GLOB_DAT 6
++#define R_386_JMP_SLOT 7
++#define R_386_RELATIVE 8
++#define R_386_GOTOFF 9
++#define R_386_GOTPC 10
++#define R_386_32PLT 11
++#define R_386_TLS_TPOFF 14
++#define R_386_TLS_IE 15
++#define R_386_TLS_GOTIE 16
++#define R_386_TLS_LE 17
++#define R_386_TLS_GD 18
++#define R_386_TLS_LDM 19
++#define R_386_16 20
++#define R_386_PC16 21
++#define R_386_8 22
++#define R_386_PC8 23
++#define R_386_TLS_GD_32 24
++#define R_386_TLS_GD_PUSH 25
++#define R_386_TLS_GD_CALL 26
++#define R_386_TLS_GD_POP 27
++#define R_386_TLS_LDM_32 28
++#define R_386_TLS_LDM_PUSH 29
++#define R_386_TLS_LDM_CALL 30
++#define R_386_TLS_LDM_POP 31
++#define R_386_TLS_LDO_32 32
++#define R_386_TLS_IE_32 33
++#define R_386_TLS_LE_32 34
++#define R_386_TLS_DTPMOD32 35
++#define R_386_TLS_DTPOFF32 36
++#define R_386_TLS_TPOFF32 37
++#define R_386_SIZE32 38
++#define R_386_TLS_GOTDESC 39
++#define R_386_TLS_DESC_CALL 40
++#define R_386_TLS_DESC 41
++#define R_386_IRELATIVE 42
++#define R_386_NUM 43
++
++
++
++
++
++#define STT_SPARC_REGISTER 13
++
++
++
++#define EF_SPARCV9_MM 3
++#define EF_SPARCV9_TSO 0
++#define EF_SPARCV9_PSO 1
++#define EF_SPARCV9_RMO 2
++#define EF_SPARC_LEDATA 0x800000
++#define EF_SPARC_EXT_MASK 0xFFFF00
++#define EF_SPARC_32PLUS 0x000100
++#define EF_SPARC_SUN_US1 0x000200
++#define EF_SPARC_HAL_R1 0x000400
++#define EF_SPARC_SUN_US3 0x000800
++
++
++
++#define R_SPARC_NONE 0
++#define R_SPARC_8 1
++#define R_SPARC_16 2
++#define R_SPARC_32 3
++#define R_SPARC_DISP8 4
++#define R_SPARC_DISP16 5
++#define R_SPARC_DISP32 6
++#define R_SPARC_WDISP30 7
++#define R_SPARC_WDISP22 8
++#define R_SPARC_HI22 9
++#define R_SPARC_22 10
++#define R_SPARC_13 11
++#define R_SPARC_LO10 12
++#define R_SPARC_GOT10 13
++#define R_SPARC_GOT13 14
++#define R_SPARC_GOT22 15
++#define R_SPARC_PC10 16
++#define R_SPARC_PC22 17
++#define R_SPARC_WPLT30 18
++#define R_SPARC_COPY 19
++#define R_SPARC_GLOB_DAT 20
++#define R_SPARC_JMP_SLOT 21
++#define R_SPARC_RELATIVE 22
++#define R_SPARC_UA32 23
++
++
++
++#define R_SPARC_PLT32 24
++#define R_SPARC_HIPLT22 25
++#define R_SPARC_LOPLT10 26
++#define R_SPARC_PCPLT32 27
++#define R_SPARC_PCPLT22 28
++#define R_SPARC_PCPLT10 29
++#define R_SPARC_10 30
++#define R_SPARC_11 31
++#define R_SPARC_64 32
++#define R_SPARC_OLO10 33
++#define R_SPARC_HH22 34
++#define R_SPARC_HM10 35
++#define R_SPARC_LM22 36
++#define R_SPARC_PC_HH22 37
++#define R_SPARC_PC_HM10 38
++#define R_SPARC_PC_LM22 39
++#define R_SPARC_WDISP16 40
++#define R_SPARC_WDISP19 41
++#define R_SPARC_GLOB_JMP 42
++#define R_SPARC_7 43
++#define R_SPARC_5 44
++#define R_SPARC_6 45
++#define R_SPARC_DISP64 46
++#define R_SPARC_PLT64 47
++#define R_SPARC_HIX22 48
++#define R_SPARC_LOX10 49
++#define R_SPARC_H44 50
++#define R_SPARC_M44 51
++#define R_SPARC_L44 52
++#define R_SPARC_REGISTER 53
++#define R_SPARC_UA64 54
++#define R_SPARC_UA16 55
++#define R_SPARC_TLS_GD_HI22 56
++#define R_SPARC_TLS_GD_LO10 57
++#define R_SPARC_TLS_GD_ADD 58
++#define R_SPARC_TLS_GD_CALL 59
++#define R_SPARC_TLS_LDM_HI22 60
++#define R_SPARC_TLS_LDM_LO10 61
++#define R_SPARC_TLS_LDM_ADD 62
++#define R_SPARC_TLS_LDM_CALL 63
++#define R_SPARC_TLS_LDO_HIX22 64
++#define R_SPARC_TLS_LDO_LOX10 65
++#define R_SPARC_TLS_LDO_ADD 66
++#define R_SPARC_TLS_IE_HI22 67
++#define R_SPARC_TLS_IE_LO10 68
++#define R_SPARC_TLS_IE_LD 69
++#define R_SPARC_TLS_IE_LDX 70
++#define R_SPARC_TLS_IE_ADD 71
++#define R_SPARC_TLS_LE_HIX22 72
++#define R_SPARC_TLS_LE_LOX10 73
++#define R_SPARC_TLS_DTPMOD32 74
++#define R_SPARC_TLS_DTPMOD64 75
++#define R_SPARC_TLS_DTPOFF32 76
++#define R_SPARC_TLS_DTPOFF64 77
++#define R_SPARC_TLS_TPOFF32 78
++#define R_SPARC_TLS_TPOFF64 79
++#define R_SPARC_GOTDATA_HIX22 80
++#define R_SPARC_GOTDATA_LOX10 81
++#define R_SPARC_GOTDATA_OP_HIX22 82
++#define R_SPARC_GOTDATA_OP_LOX10 83
++#define R_SPARC_GOTDATA_OP 84
++#define R_SPARC_H34 85
++#define R_SPARC_SIZE32 86
++#define R_SPARC_SIZE64 87
++#define R_SPARC_GNU_VTINHERIT 250
++#define R_SPARC_GNU_VTENTRY 251
++#define R_SPARC_REV32 252
++
++#define R_SPARC_NUM 253
++
++
++
++#define DT_SPARC_REGISTER 0x70000001
++#define DT_SPARC_NUM 2
++
++
++#define EF_MIPS_NOREORDER 1
++#define EF_MIPS_PIC 2
++#define EF_MIPS_CPIC 4
++#define EF_MIPS_XGOT 8
++#define EF_MIPS_64BIT_WHIRL 16
++#define EF_MIPS_ABI2 32
++#define EF_MIPS_ABI_ON32 64
++#define EF_MIPS_ARCH 0xf0000000
++
++
++
++#define EF_MIPS_ARCH_1 0x00000000
++#define EF_MIPS_ARCH_2 0x10000000
++#define EF_MIPS_ARCH_3 0x20000000
++#define EF_MIPS_ARCH_4 0x30000000
++#define EF_MIPS_ARCH_5 0x40000000
++#define EF_MIPS_ARCH_32 0x50000000
++#define EF_MIPS_ARCH_64 0x60000000
++#define EF_MIPS_ARCH_32R2 0x70000000
++#define EF_MIPS_ARCH_64R2 0x80000000
++
++
++#define E_MIPS_ARCH_1 0x00000000
++#define E_MIPS_ARCH_2 0x10000000
++#define E_MIPS_ARCH_3 0x20000000
++#define E_MIPS_ARCH_4 0x30000000
++#define E_MIPS_ARCH_5 0x40000000
++#define E_MIPS_ARCH_32 0x50000000
++#define E_MIPS_ARCH_64 0x60000000
++
++
++
++#define SHN_MIPS_ACOMMON 0xff00
++#define SHN_MIPS_TEXT 0xff01
++#define SHN_MIPS_DATA 0xff02
++#define SHN_MIPS_SCOMMON 0xff03
++#define SHN_MIPS_SUNDEFINED 0xff04
++
++
++
++#define SHT_MIPS_LIBLIST 0x70000000
++#define SHT_MIPS_MSYM 0x70000001
++#define SHT_MIPS_CONFLICT 0x70000002
++#define SHT_MIPS_GPTAB 0x70000003
++#define SHT_MIPS_UCODE 0x70000004
++#define SHT_MIPS_DEBUG 0x70000005
++#define SHT_MIPS_REGINFO 0x70000006
++#define SHT_MIPS_PACKAGE 0x70000007
++#define SHT_MIPS_PACKSYM 0x70000008
++#define SHT_MIPS_RELD 0x70000009
++#define SHT_MIPS_IFACE 0x7000000b
++#define SHT_MIPS_CONTENT 0x7000000c
++#define SHT_MIPS_OPTIONS 0x7000000d
++#define SHT_MIPS_SHDR 0x70000010
++#define SHT_MIPS_FDESC 0x70000011
++#define SHT_MIPS_EXTSYM 0x70000012
++#define SHT_MIPS_DENSE 0x70000013
++#define SHT_MIPS_PDESC 0x70000014
++#define SHT_MIPS_LOCSYM 0x70000015
++#define SHT_MIPS_AUXSYM 0x70000016
++#define SHT_MIPS_OPTSYM 0x70000017
++#define SHT_MIPS_LOCSTR 0x70000018
++#define SHT_MIPS_LINE 0x70000019
++#define SHT_MIPS_RFDESC 0x7000001a
++#define SHT_MIPS_DELTASYM 0x7000001b
++#define SHT_MIPS_DELTAINST 0x7000001c
++#define SHT_MIPS_DELTACLASS 0x7000001d
++#define SHT_MIPS_DWARF 0x7000001e
++#define SHT_MIPS_DELTADECL 0x7000001f
++#define SHT_MIPS_SYMBOL_LIB 0x70000020
++#define SHT_MIPS_EVENTS 0x70000021
++#define SHT_MIPS_TRANSLATE 0x70000022
++#define SHT_MIPS_PIXIE 0x70000023
++#define SHT_MIPS_XLATE 0x70000024
++#define SHT_MIPS_XLATE_DEBUG 0x70000025
++#define SHT_MIPS_WHIRL 0x70000026
++#define SHT_MIPS_EH_REGION 0x70000027
++#define SHT_MIPS_XLATE_OLD 0x70000028
++#define SHT_MIPS_PDR_EXCEPTION 0x70000029
++
++
++
++#define SHF_MIPS_GPREL 0x10000000
++#define SHF_MIPS_MERGE 0x20000000
++#define SHF_MIPS_ADDR 0x40000000
++#define SHF_MIPS_STRINGS 0x80000000
++#define SHF_MIPS_NOSTRIP 0x08000000
++#define SHF_MIPS_LOCAL 0x04000000
++#define SHF_MIPS_NAMES 0x02000000
++#define SHF_MIPS_NODUPE 0x01000000
++
++
++
++
++
++#define STO_MIPS_DEFAULT 0x0
++#define STO_MIPS_INTERNAL 0x1
++#define STO_MIPS_HIDDEN 0x2
++#define STO_MIPS_PROTECTED 0x3
++#define STO_MIPS_PLT 0x8
++#define STO_MIPS_SC_ALIGN_UNUSED 0xff
++
++
++#define STB_MIPS_SPLIT_COMMON 13
++
++
++
++typedef union {
++ struct {
++ Elf32_Word gt_current_g_value;
++ Elf32_Word gt_unused;
++ } gt_header;
++ struct {
++ Elf32_Word gt_g_value;
++ Elf32_Word gt_bytes;
++ } gt_entry;
++} Elf32_gptab;
++
++
++
++typedef struct {
++ Elf32_Word ri_gprmask;
++ Elf32_Word ri_cprmask[4];
++ Elf32_Sword ri_gp_value;
++} Elf32_RegInfo;
++
++
++
++typedef struct {
++ unsigned char kind;
++
++ unsigned char size;
++ Elf32_Section section;
++
++ Elf32_Word info;
++} Elf_Options;
++
++
++
++#define ODK_NULL 0
++#define ODK_REGINFO 1
++#define ODK_EXCEPTIONS 2
++#define ODK_PAD 3
++#define ODK_HWPATCH 4
++#define ODK_FILL 5
++#define ODK_TAGS 6
++#define ODK_HWAND 7
++#define ODK_HWOR 8
++
++
++
++#define OEX_FPU_MIN 0x1f
++#define OEX_FPU_MAX 0x1f00
++#define OEX_PAGE0 0x10000
++#define OEX_SMM 0x20000
++#define OEX_FPDBUG 0x40000
++#define OEX_PRECISEFP OEX_FPDBUG
++#define OEX_DISMISS 0x80000
++
++#define OEX_FPU_INVAL 0x10
++#define OEX_FPU_DIV0 0x08
++#define OEX_FPU_OFLO 0x04
++#define OEX_FPU_UFLO 0x02
++#define OEX_FPU_INEX 0x01
++
++
++
++#define OHW_R4KEOP 0x1
++#define OHW_R8KPFETCH 0x2
++#define OHW_R5KEOP 0x4
++#define OHW_R5KCVTL 0x8
++
++#define OPAD_PREFIX 0x1
++#define OPAD_POSTFIX 0x2
++#define OPAD_SYMBOL 0x4
++
++
++
++typedef struct {
++ Elf32_Word hwp_flags1;
++ Elf32_Word hwp_flags2;
++} Elf_Options_Hw;
++
++
++
++#define OHWA0_R4KEOP_CHECKED 0x00000001
++#define OHWA1_R4KEOP_CLEAN 0x00000002
++
++
++
++#define R_MIPS_NONE 0
++#define R_MIPS_16 1
++#define R_MIPS_32 2
++#define R_MIPS_REL32 3
++#define R_MIPS_26 4
++#define R_MIPS_HI16 5
++#define R_MIPS_LO16 6
++#define R_MIPS_GPREL16 7
++#define R_MIPS_LITERAL 8
++#define R_MIPS_GOT16 9
++#define R_MIPS_PC16 10
++#define R_MIPS_CALL16 11
++#define R_MIPS_GPREL32 12
++
++#define R_MIPS_SHIFT5 16
++#define R_MIPS_SHIFT6 17
++#define R_MIPS_64 18
++#define R_MIPS_GOT_DISP 19
++#define R_MIPS_GOT_PAGE 20
++#define R_MIPS_GOT_OFST 21
++#define R_MIPS_GOT_HI16 22
++#define R_MIPS_GOT_LO16 23
++#define R_MIPS_SUB 24
++#define R_MIPS_INSERT_A 25
++#define R_MIPS_INSERT_B 26
++#define R_MIPS_DELETE 27
++#define R_MIPS_HIGHER 28
++#define R_MIPS_HIGHEST 29
++#define R_MIPS_CALL_HI16 30
++#define R_MIPS_CALL_LO16 31
++#define R_MIPS_SCN_DISP 32
++#define R_MIPS_REL16 33
++#define R_MIPS_ADD_IMMEDIATE 34
++#define R_MIPS_PJUMP 35
++#define R_MIPS_RELGOT 36
++#define R_MIPS_JALR 37
++#define R_MIPS_TLS_DTPMOD32 38
++#define R_MIPS_TLS_DTPREL32 39
++#define R_MIPS_TLS_DTPMOD64 40
++#define R_MIPS_TLS_DTPREL64 41
++#define R_MIPS_TLS_GD 42
++#define R_MIPS_TLS_LDM 43
++#define R_MIPS_TLS_DTPREL_HI16 44
++#define R_MIPS_TLS_DTPREL_LO16 45
++#define R_MIPS_TLS_GOTTPREL 46
++#define R_MIPS_TLS_TPREL32 47
++#define R_MIPS_TLS_TPREL64 48
++#define R_MIPS_TLS_TPREL_HI16 49
++#define R_MIPS_TLS_TPREL_LO16 50
++#define R_MIPS_GLOB_DAT 51
++#define R_MIPS_COPY 126
++#define R_MIPS_JUMP_SLOT 127
++
++#define R_MIPS_NUM 128
++
++
++
++#define PT_MIPS_REGINFO 0x70000000
++#define PT_MIPS_RTPROC 0x70000001
++#define PT_MIPS_OPTIONS 0x70000002
++
++
++
++#define PF_MIPS_LOCAL 0x10000000
++
++
++
++#define DT_MIPS_RLD_VERSION 0x70000001
++#define DT_MIPS_TIME_STAMP 0x70000002
++#define DT_MIPS_ICHECKSUM 0x70000003
++#define DT_MIPS_IVERSION 0x70000004
++#define DT_MIPS_FLAGS 0x70000005
++#define DT_MIPS_BASE_ADDRESS 0x70000006
++#define DT_MIPS_MSYM 0x70000007
++#define DT_MIPS_CONFLICT 0x70000008
++#define DT_MIPS_LIBLIST 0x70000009
++#define DT_MIPS_LOCAL_GOTNO 0x7000000a
++#define DT_MIPS_CONFLICTNO 0x7000000b
++#define DT_MIPS_LIBLISTNO 0x70000010
++#define DT_MIPS_SYMTABNO 0x70000011
++#define DT_MIPS_UNREFEXTNO 0x70000012
++#define DT_MIPS_GOTSYM 0x70000013
++#define DT_MIPS_HIPAGENO 0x70000014
++#define DT_MIPS_RLD_MAP 0x70000016
++#define DT_MIPS_DELTA_CLASS 0x70000017
++#define DT_MIPS_DELTA_CLASS_NO 0x70000018
++
++#define DT_MIPS_DELTA_INSTANCE 0x70000019
++#define DT_MIPS_DELTA_INSTANCE_NO 0x7000001a
++
++#define DT_MIPS_DELTA_RELOC 0x7000001b
++#define DT_MIPS_DELTA_RELOC_NO 0x7000001c
++
++#define DT_MIPS_DELTA_SYM 0x7000001d
++
++#define DT_MIPS_DELTA_SYM_NO 0x7000001e
++
++#define DT_MIPS_DELTA_CLASSSYM 0x70000020
++
++#define DT_MIPS_DELTA_CLASSSYM_NO 0x70000021
++
++#define DT_MIPS_CXX_FLAGS 0x70000022
++#define DT_MIPS_PIXIE_INIT 0x70000023
++#define DT_MIPS_SYMBOL_LIB 0x70000024
++#define DT_MIPS_LOCALPAGE_GOTIDX 0x70000025
++#define DT_MIPS_LOCAL_GOTIDX 0x70000026
++#define DT_MIPS_HIDDEN_GOTIDX 0x70000027
++#define DT_MIPS_PROTECTED_GOTIDX 0x70000028
++#define DT_MIPS_OPTIONS 0x70000029
++#define DT_MIPS_INTERFACE 0x7000002a
++#define DT_MIPS_DYNSTR_ALIGN 0x7000002b
++#define DT_MIPS_INTERFACE_SIZE 0x7000002c
++#define DT_MIPS_RLD_TEXT_RESOLVE_ADDR 0x7000002d
++
++#define DT_MIPS_PERF_SUFFIX 0x7000002e
++
++#define DT_MIPS_COMPACT_SIZE 0x7000002f
++#define DT_MIPS_GP_VALUE 0x70000030
++#define DT_MIPS_AUX_DYNAMIC 0x70000031
++
++#define DT_MIPS_PLTGOT 0x70000032
++
++#define DT_MIPS_RWPLT 0x70000034
++#define DT_MIPS_NUM 0x35
++
++
++
++#define RHF_NONE 0
++#define RHF_QUICKSTART (1 << 0)
++#define RHF_NOTPOT (1 << 1)
++#define RHF_NO_LIBRARY_REPLACEMENT (1 << 2)
++#define RHF_NO_MOVE (1 << 3)
++#define RHF_SGI_ONLY (1 << 4)
++#define RHF_GUARANTEE_INIT (1 << 5)
++#define RHF_DELTA_C_PLUS_PLUS (1 << 6)
++#define RHF_GUARANTEE_START_INIT (1 << 7)
++#define RHF_PIXIE (1 << 8)
++#define RHF_DEFAULT_DELAY_LOAD (1 << 9)
++#define RHF_REQUICKSTART (1 << 10)
++#define RHF_REQUICKSTARTED (1 << 11)
++#define RHF_CORD (1 << 12)
++#define RHF_NO_UNRES_UNDEF (1 << 13)
++#define RHF_RLD_ORDER_SAFE (1 << 14)
++
++
++
++typedef struct
++{
++ Elf32_Word l_name;
++ Elf32_Word l_time_stamp;
++ Elf32_Word l_checksum;
++ Elf32_Word l_version;
++ Elf32_Word l_flags;
++} Elf32_Lib;
++
++typedef struct
++{
++ Elf64_Word l_name;
++ Elf64_Word l_time_stamp;
++ Elf64_Word l_checksum;
++ Elf64_Word l_version;
++ Elf64_Word l_flags;
++} Elf64_Lib;
++
++
++
++
++#define LL_NONE 0
++#define LL_EXACT_MATCH (1 << 0)
++#define LL_IGNORE_INT_VER (1 << 1)
++#define LL_REQUIRE_MINOR (1 << 2)
++#define LL_EXPORTS (1 << 3)
++#define LL_DELAY_LOAD (1 << 4)
++#define LL_DELTA (1 << 5)
++
++
++
++typedef Elf32_Addr Elf32_Conflict;
++
++
++
++
++
++
++#define EF_PARISC_TRAPNIL 0x00010000
++#define EF_PARISC_EXT 0x00020000
++#define EF_PARISC_LSB 0x00040000
++#define EF_PARISC_WIDE 0x00080000
++#define EF_PARISC_NO_KABP 0x00100000
++
++#define EF_PARISC_LAZYSWAP 0x00400000
++#define EF_PARISC_ARCH 0x0000ffff
++
++
++
++#define EFA_PARISC_1_0 0x020b
++#define EFA_PARISC_1_1 0x0210
++#define EFA_PARISC_2_0 0x0214
++
++
++
++#define SHN_PARISC_ANSI_COMMON 0xff00
++
++#define SHN_PARISC_HUGE_COMMON 0xff01
++
++
++
++#define SHT_PARISC_EXT 0x70000000
++#define SHT_PARISC_UNWIND 0x70000001
++#define SHT_PARISC_DOC 0x70000002
++
++
++
++#define SHF_PARISC_SHORT 0x20000000
++#define SHF_PARISC_HUGE 0x40000000
++#define SHF_PARISC_SBP 0x80000000
++
++
++
++#define STT_PARISC_MILLICODE 13
++
++#define STT_HP_OPAQUE (STT_LOOS + 0x1)
++#define STT_HP_STUB (STT_LOOS + 0x2)
++
++
++
++#define R_PARISC_NONE 0
++#define R_PARISC_DIR32 1
++#define R_PARISC_DIR21L 2
++#define R_PARISC_DIR17R 3
++#define R_PARISC_DIR17F 4
++#define R_PARISC_DIR14R 6
++#define R_PARISC_PCREL32 9
++#define R_PARISC_PCREL21L 10
++#define R_PARISC_PCREL17R 11
++#define R_PARISC_PCREL17F 12
++#define R_PARISC_PCREL14R 14
++#define R_PARISC_DPREL21L 18
++#define R_PARISC_DPREL14R 22
++#define R_PARISC_GPREL21L 26
++#define R_PARISC_GPREL14R 30
++#define R_PARISC_LTOFF21L 34
++#define R_PARISC_LTOFF14R 38
++#define R_PARISC_SECREL32 41
++#define R_PARISC_SEGBASE 48
++#define R_PARISC_SEGREL32 49
++#define R_PARISC_PLTOFF21L 50
++#define R_PARISC_PLTOFF14R 54
++#define R_PARISC_LTOFF_FPTR32 57
++#define R_PARISC_LTOFF_FPTR21L 58
++#define R_PARISC_LTOFF_FPTR14R 62
++#define R_PARISC_FPTR64 64
++#define R_PARISC_PLABEL32 65
++#define R_PARISC_PLABEL21L 66
++#define R_PARISC_PLABEL14R 70
++#define R_PARISC_PCREL64 72
++#define R_PARISC_PCREL22F 74
++#define R_PARISC_PCREL14WR 75
++#define R_PARISC_PCREL14DR 76
++#define R_PARISC_PCREL16F 77
++#define R_PARISC_PCREL16WF 78
++#define R_PARISC_PCREL16DF 79
++#define R_PARISC_DIR64 80
++#define R_PARISC_DIR14WR 83
++#define R_PARISC_DIR14DR 84
++#define R_PARISC_DIR16F 85
++#define R_PARISC_DIR16WF 86
++#define R_PARISC_DIR16DF 87
++#define R_PARISC_GPREL64 88
++#define R_PARISC_GPREL14WR 91
++#define R_PARISC_GPREL14DR 92
++#define R_PARISC_GPREL16F 93
++#define R_PARISC_GPREL16WF 94
++#define R_PARISC_GPREL16DF 95
++#define R_PARISC_LTOFF64 96
++#define R_PARISC_LTOFF14WR 99
++#define R_PARISC_LTOFF14DR 100
++#define R_PARISC_LTOFF16F 101
++#define R_PARISC_LTOFF16WF 102
++#define R_PARISC_LTOFF16DF 103
++#define R_PARISC_SECREL64 104
++#define R_PARISC_SEGREL64 112
++#define R_PARISC_PLTOFF14WR 115
++#define R_PARISC_PLTOFF14DR 116
++#define R_PARISC_PLTOFF16F 117
++#define R_PARISC_PLTOFF16WF 118
++#define R_PARISC_PLTOFF16DF 119
++#define R_PARISC_LTOFF_FPTR64 120
++#define R_PARISC_LTOFF_FPTR14WR 123
++#define R_PARISC_LTOFF_FPTR14DR 124
++#define R_PARISC_LTOFF_FPTR16F 125
++#define R_PARISC_LTOFF_FPTR16WF 126
++#define R_PARISC_LTOFF_FPTR16DF 127
++#define R_PARISC_LORESERVE 128
++#define R_PARISC_COPY 128
++#define R_PARISC_IPLT 129
++#define R_PARISC_EPLT 130
++#define R_PARISC_TPREL32 153
++#define R_PARISC_TPREL21L 154
++#define R_PARISC_TPREL14R 158
++#define R_PARISC_LTOFF_TP21L 162
++#define R_PARISC_LTOFF_TP14R 166
++#define R_PARISC_LTOFF_TP14F 167
++#define R_PARISC_TPREL64 216
++#define R_PARISC_TPREL14WR 219
++#define R_PARISC_TPREL14DR 220
++#define R_PARISC_TPREL16F 221
++#define R_PARISC_TPREL16WF 222
++#define R_PARISC_TPREL16DF 223
++#define R_PARISC_LTOFF_TP64 224
++#define R_PARISC_LTOFF_TP14WR 227
++#define R_PARISC_LTOFF_TP14DR 228
++#define R_PARISC_LTOFF_TP16F 229
++#define R_PARISC_LTOFF_TP16WF 230
++#define R_PARISC_LTOFF_TP16DF 231
++#define R_PARISC_GNU_VTENTRY 232
++#define R_PARISC_GNU_VTINHERIT 233
++#define R_PARISC_TLS_GD21L 234
++#define R_PARISC_TLS_GD14R 235
++#define R_PARISC_TLS_GDCALL 236
++#define R_PARISC_TLS_LDM21L 237
++#define R_PARISC_TLS_LDM14R 238
++#define R_PARISC_TLS_LDMCALL 239
++#define R_PARISC_TLS_LDO21L 240
++#define R_PARISC_TLS_LDO14R 241
++#define R_PARISC_TLS_DTPMOD32 242
++#define R_PARISC_TLS_DTPMOD64 243
++#define R_PARISC_TLS_DTPOFF32 244
++#define R_PARISC_TLS_DTPOFF64 245
++#define R_PARISC_TLS_LE21L R_PARISC_TPREL21L
++#define R_PARISC_TLS_LE14R R_PARISC_TPREL14R
++#define R_PARISC_TLS_IE21L R_PARISC_LTOFF_TP21L
++#define R_PARISC_TLS_IE14R R_PARISC_LTOFF_TP14R
++#define R_PARISC_TLS_TPREL32 R_PARISC_TPREL32
++#define R_PARISC_TLS_TPREL64 R_PARISC_TPREL64
++#define R_PARISC_HIRESERVE 255
++
++
++
++#define PT_HP_TLS (PT_LOOS + 0x0)
++#define PT_HP_CORE_NONE (PT_LOOS + 0x1)
++#define PT_HP_CORE_VERSION (PT_LOOS + 0x2)
++#define PT_HP_CORE_KERNEL (PT_LOOS + 0x3)
++#define PT_HP_CORE_COMM (PT_LOOS + 0x4)
++#define PT_HP_CORE_PROC (PT_LOOS + 0x5)
++#define PT_HP_CORE_LOADABLE (PT_LOOS + 0x6)
++#define PT_HP_CORE_STACK (PT_LOOS + 0x7)
++#define PT_HP_CORE_SHM (PT_LOOS + 0x8)
++#define PT_HP_CORE_MMF (PT_LOOS + 0x9)
++#define PT_HP_PARALLEL (PT_LOOS + 0x10)
++#define PT_HP_FASTBIND (PT_LOOS + 0x11)
++#define PT_HP_OPT_ANNOT (PT_LOOS + 0x12)
++#define PT_HP_HSL_ANNOT (PT_LOOS + 0x13)
++#define PT_HP_STACK (PT_LOOS + 0x14)
++
++#define PT_PARISC_ARCHEXT 0x70000000
++#define PT_PARISC_UNWIND 0x70000001
++
++
++
++#define PF_PARISC_SBP 0x08000000
++
++#define PF_HP_PAGE_SIZE 0x00100000
++#define PF_HP_FAR_SHARED 0x00200000
++#define PF_HP_NEAR_SHARED 0x00400000
++#define PF_HP_CODE 0x01000000
++#define PF_HP_MODIFY 0x02000000
++#define PF_HP_LAZYSWAP 0x04000000
++#define PF_HP_SBP 0x08000000
++
++
++
++
++
++
++#define EF_ALPHA_32BIT 1
++#define EF_ALPHA_CANRELAX 2
++
++
++
++
++#define SHT_ALPHA_DEBUG 0x70000001
++#define SHT_ALPHA_REGINFO 0x70000002
++
++
++
++#define SHF_ALPHA_GPREL 0x10000000
++
++
++#define STO_ALPHA_NOPV 0x80
++#define STO_ALPHA_STD_GPLOAD 0x88
++
++
++
++#define R_ALPHA_NONE 0
++#define R_ALPHA_REFLONG 1
++#define R_ALPHA_REFQUAD 2
++#define R_ALPHA_GPREL32 3
++#define R_ALPHA_LITERAL 4
++#define R_ALPHA_LITUSE 5
++#define R_ALPHA_GPDISP 6
++#define R_ALPHA_BRADDR 7
++#define R_ALPHA_HINT 8
++#define R_ALPHA_SREL16 9
++#define R_ALPHA_SREL32 10
++#define R_ALPHA_SREL64 11
++#define R_ALPHA_GPRELHIGH 17
++#define R_ALPHA_GPRELLOW 18
++#define R_ALPHA_GPREL16 19
++#define R_ALPHA_COPY 24
++#define R_ALPHA_GLOB_DAT 25
++#define R_ALPHA_JMP_SLOT 26
++#define R_ALPHA_RELATIVE 27
++#define R_ALPHA_TLS_GD_HI 28
++#define R_ALPHA_TLSGD 29
++#define R_ALPHA_TLS_LDM 30
++#define R_ALPHA_DTPMOD64 31
++#define R_ALPHA_GOTDTPREL 32
++#define R_ALPHA_DTPREL64 33
++#define R_ALPHA_DTPRELHI 34
++#define R_ALPHA_DTPRELLO 35
++#define R_ALPHA_DTPREL16 36
++#define R_ALPHA_GOTTPREL 37
++#define R_ALPHA_TPREL64 38
++#define R_ALPHA_TPRELHI 39
++#define R_ALPHA_TPRELLO 40
++#define R_ALPHA_TPREL16 41
++
++#define R_ALPHA_NUM 46
++
++
++#define LITUSE_ALPHA_ADDR 0
++#define LITUSE_ALPHA_BASE 1
++#define LITUSE_ALPHA_BYTOFF 2
++#define LITUSE_ALPHA_JSR 3
++#define LITUSE_ALPHA_TLS_GD 4
++#define LITUSE_ALPHA_TLS_LDM 5
++
++
++#define DT_ALPHA_PLTRO (DT_LOPROC + 0)
++#define DT_ALPHA_NUM 1
++
++
++
++
++#define EF_PPC_EMB 0x80000000
++
++
++#define EF_PPC_RELOCATABLE 0x00010000
++#define EF_PPC_RELOCATABLE_LIB 0x00008000
++
++
++
++#define R_PPC_NONE 0
++#define R_PPC_ADDR32 1
++#define R_PPC_ADDR24 2
++#define R_PPC_ADDR16 3
++#define R_PPC_ADDR16_LO 4
++#define R_PPC_ADDR16_HI 5
++#define R_PPC_ADDR16_HA 6
++#define R_PPC_ADDR14 7
++#define R_PPC_ADDR14_BRTAKEN 8
++#define R_PPC_ADDR14_BRNTAKEN 9
++#define R_PPC_REL24 10
++#define R_PPC_REL14 11
++#define R_PPC_REL14_BRTAKEN 12
++#define R_PPC_REL14_BRNTAKEN 13
++#define R_PPC_GOT16 14
++#define R_PPC_GOT16_LO 15
++#define R_PPC_GOT16_HI 16
++#define R_PPC_GOT16_HA 17
++#define R_PPC_PLTREL24 18
++#define R_PPC_COPY 19
++#define R_PPC_GLOB_DAT 20
++#define R_PPC_JMP_SLOT 21
++#define R_PPC_RELATIVE 22
++#define R_PPC_LOCAL24PC 23
++#define R_PPC_UADDR32 24
++#define R_PPC_UADDR16 25
++#define R_PPC_REL32 26
++#define R_PPC_PLT32 27
++#define R_PPC_PLTREL32 28
++#define R_PPC_PLT16_LO 29
++#define R_PPC_PLT16_HI 30
++#define R_PPC_PLT16_HA 31
++#define R_PPC_SDAREL16 32
++#define R_PPC_SECTOFF 33
++#define R_PPC_SECTOFF_LO 34
++#define R_PPC_SECTOFF_HI 35
++#define R_PPC_SECTOFF_HA 36
++
++
++#define R_PPC_TLS 67
++#define R_PPC_DTPMOD32 68
++#define R_PPC_TPREL16 69
++#define R_PPC_TPREL16_LO 70
++#define R_PPC_TPREL16_HI 71
++#define R_PPC_TPREL16_HA 72
++#define R_PPC_TPREL32 73
++#define R_PPC_DTPREL16 74
++#define R_PPC_DTPREL16_LO 75
++#define R_PPC_DTPREL16_HI 76
++#define R_PPC_DTPREL16_HA 77
++#define R_PPC_DTPREL32 78
++#define R_PPC_GOT_TLSGD16 79
++#define R_PPC_GOT_TLSGD16_LO 80
++#define R_PPC_GOT_TLSGD16_HI 81
++#define R_PPC_GOT_TLSGD16_HA 82
++#define R_PPC_GOT_TLSLD16 83
++#define R_PPC_GOT_TLSLD16_LO 84
++#define R_PPC_GOT_TLSLD16_HI 85
++#define R_PPC_GOT_TLSLD16_HA 86
++#define R_PPC_GOT_TPREL16 87
++#define R_PPC_GOT_TPREL16_LO 88
++#define R_PPC_GOT_TPREL16_HI 89
++#define R_PPC_GOT_TPREL16_HA 90
++#define R_PPC_GOT_DTPREL16 91
++#define R_PPC_GOT_DTPREL16_LO 92
++#define R_PPC_GOT_DTPREL16_HI 93
++#define R_PPC_GOT_DTPREL16_HA 94
++
++
++
++#define R_PPC_EMB_NADDR32 101
++#define R_PPC_EMB_NADDR16 102
++#define R_PPC_EMB_NADDR16_LO 103
++#define R_PPC_EMB_NADDR16_HI 104
++#define R_PPC_EMB_NADDR16_HA 105
++#define R_PPC_EMB_SDAI16 106
++#define R_PPC_EMB_SDA2I16 107
++#define R_PPC_EMB_SDA2REL 108
++#define R_PPC_EMB_SDA21 109
++#define R_PPC_EMB_MRKREF 110
++#define R_PPC_EMB_RELSEC16 111
++#define R_PPC_EMB_RELST_LO 112
++#define R_PPC_EMB_RELST_HI 113
++#define R_PPC_EMB_RELST_HA 114
++#define R_PPC_EMB_BIT_FLD 115
++#define R_PPC_EMB_RELSDA 116
++
++
++#define R_PPC_DIAB_SDA21_LO 180
++#define R_PPC_DIAB_SDA21_HI 181
++#define R_PPC_DIAB_SDA21_HA 182
++#define R_PPC_DIAB_RELSDA_LO 183
++#define R_PPC_DIAB_RELSDA_HI 184
++#define R_PPC_DIAB_RELSDA_HA 185
++
++
++#define R_PPC_IRELATIVE 248
++
++
++#define R_PPC_REL16 249
++#define R_PPC_REL16_LO 250
++#define R_PPC_REL16_HI 251
++#define R_PPC_REL16_HA 252
++
++
++
++#define R_PPC_TOC16 255
++
++
++#define DT_PPC_GOT (DT_LOPROC + 0)
++#define DT_PPC_NUM 1
++
++
++#define R_PPC64_NONE R_PPC_NONE
++#define R_PPC64_ADDR32 R_PPC_ADDR32
++#define R_PPC64_ADDR24 R_PPC_ADDR24
++#define R_PPC64_ADDR16 R_PPC_ADDR16
++#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO
++#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI
++#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA
++#define R_PPC64_ADDR14 R_PPC_ADDR14
++#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN
++#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN
++#define R_PPC64_REL24 R_PPC_REL24
++#define R_PPC64_REL14 R_PPC_REL14
++#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN
++#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN
++#define R_PPC64_GOT16 R_PPC_GOT16
++#define R_PPC64_GOT16_LO R_PPC_GOT16_LO
++#define R_PPC64_GOT16_HI R_PPC_GOT16_HI
++#define R_PPC64_GOT16_HA R_PPC_GOT16_HA
++
++#define R_PPC64_COPY R_PPC_COPY
++#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT
++#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT
++#define R_PPC64_RELATIVE R_PPC_RELATIVE
++
++#define R_PPC64_UADDR32 R_PPC_UADDR32
++#define R_PPC64_UADDR16 R_PPC_UADDR16
++#define R_PPC64_REL32 R_PPC_REL32
++#define R_PPC64_PLT32 R_PPC_PLT32
++#define R_PPC64_PLTREL32 R_PPC_PLTREL32
++#define R_PPC64_PLT16_LO R_PPC_PLT16_LO
++#define R_PPC64_PLT16_HI R_PPC_PLT16_HI
++#define R_PPC64_PLT16_HA R_PPC_PLT16_HA
++
++#define R_PPC64_SECTOFF R_PPC_SECTOFF
++#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO
++#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI
++#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA
++#define R_PPC64_ADDR30 37
++#define R_PPC64_ADDR64 38
++#define R_PPC64_ADDR16_HIGHER 39
++#define R_PPC64_ADDR16_HIGHERA 40
++#define R_PPC64_ADDR16_HIGHEST 41
++#define R_PPC64_ADDR16_HIGHESTA 42
++#define R_PPC64_UADDR64 43
++#define R_PPC64_REL64 44
++#define R_PPC64_PLT64 45
++#define R_PPC64_PLTREL64 46
++#define R_PPC64_TOC16 47
++#define R_PPC64_TOC16_LO 48
++#define R_PPC64_TOC16_HI 49
++#define R_PPC64_TOC16_HA 50
++#define R_PPC64_TOC 51
++#define R_PPC64_PLTGOT16 52
++#define R_PPC64_PLTGOT16_LO 53
++#define R_PPC64_PLTGOT16_HI 54
++#define R_PPC64_PLTGOT16_HA 55
++
++#define R_PPC64_ADDR16_DS 56
++#define R_PPC64_ADDR16_LO_DS 57
++#define R_PPC64_GOT16_DS 58
++#define R_PPC64_GOT16_LO_DS 59
++#define R_PPC64_PLT16_LO_DS 60
++#define R_PPC64_SECTOFF_DS 61
++#define R_PPC64_SECTOFF_LO_DS 62
++#define R_PPC64_TOC16_DS 63
++#define R_PPC64_TOC16_LO_DS 64
++#define R_PPC64_PLTGOT16_DS 65
++#define R_PPC64_PLTGOT16_LO_DS 66
++
++
++#define R_PPC64_TLS 67
++#define R_PPC64_DTPMOD64 68
++#define R_PPC64_TPREL16 69
++#define R_PPC64_TPREL16_LO 70
++#define R_PPC64_TPREL16_HI 71
++#define R_PPC64_TPREL16_HA 72
++#define R_PPC64_TPREL64 73
++#define R_PPC64_DTPREL16 74
++#define R_PPC64_DTPREL16_LO 75
++#define R_PPC64_DTPREL16_HI 76
++#define R_PPC64_DTPREL16_HA 77
++#define R_PPC64_DTPREL64 78
++#define R_PPC64_GOT_TLSGD16 79
++#define R_PPC64_GOT_TLSGD16_LO 80
++#define R_PPC64_GOT_TLSGD16_HI 81
++#define R_PPC64_GOT_TLSGD16_HA 82
++#define R_PPC64_GOT_TLSLD16 83
++#define R_PPC64_GOT_TLSLD16_LO 84
++#define R_PPC64_GOT_TLSLD16_HI 85
++#define R_PPC64_GOT_TLSLD16_HA 86
++#define R_PPC64_GOT_TPREL16_DS 87
++#define R_PPC64_GOT_TPREL16_LO_DS 88
++#define R_PPC64_GOT_TPREL16_HI 89
++#define R_PPC64_GOT_TPREL16_HA 90
++#define R_PPC64_GOT_DTPREL16_DS 91
++#define R_PPC64_GOT_DTPREL16_LO_DS 92
++#define R_PPC64_GOT_DTPREL16_HI 93
++#define R_PPC64_GOT_DTPREL16_HA 94
++#define R_PPC64_TPREL16_DS 95
++#define R_PPC64_TPREL16_LO_DS 96
++#define R_PPC64_TPREL16_HIGHER 97
++#define R_PPC64_TPREL16_HIGHERA 98
++#define R_PPC64_TPREL16_HIGHEST 99
++#define R_PPC64_TPREL16_HIGHESTA 100
++#define R_PPC64_DTPREL16_DS 101
++#define R_PPC64_DTPREL16_LO_DS 102
++#define R_PPC64_DTPREL16_HIGHER 103
++#define R_PPC64_DTPREL16_HIGHERA 104
++#define R_PPC64_DTPREL16_HIGHEST 105
++#define R_PPC64_DTPREL16_HIGHESTA 106
++
++
++#define R_PPC64_JMP_IREL 247
++#define R_PPC64_IRELATIVE 248
++#define R_PPC64_REL16 249
++#define R_PPC64_REL16_LO 250
++#define R_PPC64_REL16_HI 251
++#define R_PPC64_REL16_HA 252
++
++
++#define DT_PPC64_GLINK (DT_LOPROC + 0)
++#define DT_PPC64_OPD (DT_LOPROC + 1)
++#define DT_PPC64_OPDSZ (DT_LOPROC + 2)
++#define DT_PPC64_NUM 3
++
++
++
++
++
++#define EF_ARM_RELEXEC 0x01
++#define EF_ARM_HASENTRY 0x02
++#define EF_ARM_INTERWORK 0x04
++#define EF_ARM_APCS_26 0x08
++#define EF_ARM_APCS_FLOAT 0x10
++#define EF_ARM_PIC 0x20
++#define EF_ARM_ALIGN8 0x40
++#define EF_ARM_NEW_ABI 0x80
++#define EF_ARM_OLD_ABI 0x100
++#define EF_ARM_SOFT_FLOAT 0x200
++#define EF_ARM_VFP_FLOAT 0x400
++#define EF_ARM_MAVERICK_FLOAT 0x800
++
++#define EF_ARM_ABI_FLOAT_SOFT 0x200
++#define EF_ARM_ABI_FLOAT_HARD 0x400
++
++
++#define EF_ARM_SYMSARESORTED 0x04
++#define EF_ARM_DYNSYMSUSESEGIDX 0x08
++#define EF_ARM_MAPSYMSFIRST 0x10
++#define EF_ARM_EABIMASK 0XFF000000
++
++
++#define EF_ARM_BE8 0x00800000
++#define EF_ARM_LE8 0x00400000
++
++#define EF_ARM_EABI_VERSION(flags) ((flags) & EF_ARM_EABIMASK)
++#define EF_ARM_EABI_UNKNOWN 0x00000000
++#define EF_ARM_EABI_VER1 0x01000000
++#define EF_ARM_EABI_VER2 0x02000000
++#define EF_ARM_EABI_VER3 0x03000000
++#define EF_ARM_EABI_VER4 0x04000000
++#define EF_ARM_EABI_VER5 0x05000000
++
++
++#define STT_ARM_TFUNC STT_LOPROC
++#define STT_ARM_16BIT STT_HIPROC
++
++
++#define SHF_ARM_ENTRYSECT 0x10000000
++#define SHF_ARM_COMDEF 0x80000000
++
++
++
++#define PF_ARM_SB 0x10000000
++
++#define PF_ARM_PI 0x20000000
++#define PF_ARM_ABS 0x40000000
++
++
++#define PT_ARM_EXIDX (PT_LOPROC + 1)
++
++
++#define SHT_ARM_EXIDX (SHT_LOPROC + 1)
++#define SHT_ARM_PREEMPTMAP (SHT_LOPROC + 2)
++#define SHT_ARM_ATTRIBUTES (SHT_LOPROC + 3)
++
++
++#define R_AARCH64_NONE 0
++#define R_AARCH64_ABS64 257
++#define R_AARCH64_ABS32 258
++#define R_AARCH64_COPY 1024
++#define R_AARCH64_GLOB_DAT 1025
++#define R_AARCH64_JUMP_SLOT 1026
++#define R_AARCH64_RELATIVE 1027
++#define R_AARCH64_TLS_DTPMOD64 1028
++#define R_AARCH64_TLS_DTPREL64 1029
++#define R_AARCH64_TLS_TPREL64 1030
++#define R_AARCH64_TLSDESC 1031
++
++
++#define R_ARM_NONE 0
++#define R_ARM_PC24 1
++#define R_ARM_ABS32 2
++#define R_ARM_REL32 3
++#define R_ARM_PC13 4
++#define R_ARM_ABS16 5
++#define R_ARM_ABS12 6
++#define R_ARM_THM_ABS5 7
++#define R_ARM_ABS8 8
++#define R_ARM_SBREL32 9
++#define R_ARM_THM_PC22 10
++#define R_ARM_THM_PC8 11
++#define R_ARM_AMP_VCALL9 12
++#define R_ARM_TLS_DESC 13
++#define R_ARM_THM_SWI8 14
++#define R_ARM_XPC25 15
++#define R_ARM_THM_XPC22 16
++#define R_ARM_TLS_DTPMOD32 17
++#define R_ARM_TLS_DTPOFF32 18
++#define R_ARM_TLS_TPOFF32 19
++#define R_ARM_COPY 20
++#define R_ARM_GLOB_DAT 21
++#define R_ARM_JUMP_SLOT 22
++#define R_ARM_RELATIVE 23
++#define R_ARM_GOTOFF 24
++#define R_ARM_GOTPC 25
++#define R_ARM_GOT32 26
++#define R_ARM_PLT32 27
++#define R_ARM_CALL 28
++#define R_ARM_JUMP24 29
++#define R_ARM_THM_JUMP24 30
++#define R_ARM_BASE_ABS 31
++#define R_ARM_ALU_PCREL_7_0 32
++#define R_ARM_ALU_PCREL_15_8 33
++#define R_ARM_ALU_PCREL_23_15 34
++#define R_ARM_LDR_SBREL_11_0 35
++#define R_ARM_ALU_SBREL_19_12 36
++#define R_ARM_ALU_SBREL_27_20 37
++#define R_ARM_TARGET1 38
++#define R_ARM_SBREL31 39
++#define R_ARM_V4BX 40
++#define R_ARM_TARGET2 41
++#define R_ARM_PREL31 42
++#define R_ARM_MOVW_ABS_NC 43
++#define R_ARM_MOVT_ABS 44
++#define R_ARM_MOVW_PREL_NC 45
++#define R_ARM_MOVT_PREL 46
++#define R_ARM_THM_MOVW_ABS_NC 47
++#define R_ARM_THM_MOVT_ABS 48
++#define R_ARM_THM_MOVW_PREL_NC 49
++#define R_ARM_THM_MOVT_PREL 50
++#define R_ARM_THM_JUMP19 51
++#define R_ARM_THM_JUMP6 52
++#define R_ARM_THM_ALU_PREL_11_0 53
++#define R_ARM_THM_PC12 54
++#define R_ARM_ABS32_NOI 55
++#define R_ARM_REL32_NOI 56
++#define R_ARM_ALU_PC_G0_NC 57
++#define R_ARM_ALU_PC_G0 58
++#define R_ARM_ALU_PC_G1_NC 59
++#define R_ARM_ALU_PC_G1 60
++#define R_ARM_ALU_PC_G2 61
++#define R_ARM_LDR_PC_G1 62
++#define R_ARM_LDR_PC_G2 63
++#define R_ARM_LDRS_PC_G0 64
++#define R_ARM_LDRS_PC_G1 65
++#define R_ARM_LDRS_PC_G2 66
++#define R_ARM_LDC_PC_G0 67
++#define R_ARM_LDC_PC_G1 68
++#define R_ARM_LDC_PC_G2 69
++#define R_ARM_ALU_SB_G0_NC 70
++#define R_ARM_ALU_SB_G0 71
++#define R_ARM_ALU_SB_G1_NC 72
++#define R_ARM_ALU_SB_G1 73
++#define R_ARM_ALU_SB_G2 74
++#define R_ARM_LDR_SB_G0 75
++#define R_ARM_LDR_SB_G1 76
++#define R_ARM_LDR_SB_G2 77
++#define R_ARM_LDRS_SB_G0 78
++#define R_ARM_LDRS_SB_G1 79
++#define R_ARM_LDRS_SB_G2 80
++#define R_ARM_LDC_SB_G0 81
++#define R_ARM_LDC_SB_G1 82
++#define R_ARM_LDC_SB_G2 83
++#define R_ARM_MOVW_BREL_NC 84
++#define R_ARM_MOVT_BREL 85
++#define R_ARM_MOVW_BREL 86
++#define R_ARM_THM_MOVW_BREL_NC 87
++#define R_ARM_THM_MOVT_BREL 88
++#define R_ARM_THM_MOVW_BREL 89
++#define R_ARM_TLS_GOTDESC 90
++#define R_ARM_TLS_CALL 91
++#define R_ARM_TLS_DESCSEQ 92
++#define R_ARM_THM_TLS_CALL 93
++#define R_ARM_PLT32_ABS 94
++#define R_ARM_GOT_ABS 95
++#define R_ARM_GOT_PREL 96
++#define R_ARM_GOT_BREL12 97
++#define R_ARM_GOTOFF12 98
++#define R_ARM_GOTRELAX 99
++#define R_ARM_GNU_VTENTRY 100
++#define R_ARM_GNU_VTINHERIT 101
++#define R_ARM_THM_PC11 102
++#define R_ARM_THM_PC9 103
++#define R_ARM_TLS_GD32 104
++
++#define R_ARM_TLS_LDM32 105
++
++#define R_ARM_TLS_LDO32 106
++
++#define R_ARM_TLS_IE32 107
++
++#define R_ARM_TLS_LE32 108
++#define R_ARM_TLS_LDO12 109
++#define R_ARM_TLS_LE12 110
++#define R_ARM_TLS_IE12GP 111
++#define R_ARM_ME_TOO 128
++#define R_ARM_THM_TLS_DESCSEQ 129
++#define R_ARM_THM_TLS_DESCSEQ16 129
++#define R_ARM_THM_TLS_DESCSEQ32 130
++#define R_ARM_THM_GOT_BREL12 131
++#define R_ARM_IRELATIVE 160
++#define R_ARM_RXPC25 249
++#define R_ARM_RSBREL32 250
++#define R_ARM_THM_RPC22 251
++#define R_ARM_RREL32 252
++#define R_ARM_RABS22 253
++#define R_ARM_RPC24 254
++#define R_ARM_RBASE 255
++
++#define R_ARM_NUM 256
++
++
++
++
++#define EF_IA_64_MASKOS 0x0000000f
++#define EF_IA_64_ABI64 0x00000010
++#define EF_IA_64_ARCH 0xff000000
++
++
++#define PT_IA_64_ARCHEXT (PT_LOPROC + 0)
++#define PT_IA_64_UNWIND (PT_LOPROC + 1)
++#define PT_IA_64_HP_OPT_ANOT (PT_LOOS + 0x12)
++#define PT_IA_64_HP_HSL_ANOT (PT_LOOS + 0x13)
++#define PT_IA_64_HP_STACK (PT_LOOS + 0x14)
++
++
++#define PF_IA_64_NORECOV 0x80000000
++
++
++#define SHT_IA_64_EXT (SHT_LOPROC + 0)
++#define SHT_IA_64_UNWIND (SHT_LOPROC + 1)
++
++
++#define SHF_IA_64_SHORT 0x10000000
++#define SHF_IA_64_NORECOV 0x20000000
++
++
++#define DT_IA_64_PLT_RESERVE (DT_LOPROC + 0)
++#define DT_IA_64_NUM 1
++
++
++#define R_IA64_NONE 0x00
++#define R_IA64_IMM14 0x21
++#define R_IA64_IMM22 0x22
++#define R_IA64_IMM64 0x23
++#define R_IA64_DIR32MSB 0x24
++#define R_IA64_DIR32LSB 0x25
++#define R_IA64_DIR64MSB 0x26
++#define R_IA64_DIR64LSB 0x27
++#define R_IA64_GPREL22 0x2a
++#define R_IA64_GPREL64I 0x2b
++#define R_IA64_GPREL32MSB 0x2c
++#define R_IA64_GPREL32LSB 0x2d
++#define R_IA64_GPREL64MSB 0x2e
++#define R_IA64_GPREL64LSB 0x2f
++#define R_IA64_LTOFF22 0x32
++#define R_IA64_LTOFF64I 0x33
++#define R_IA64_PLTOFF22 0x3a
++#define R_IA64_PLTOFF64I 0x3b
++#define R_IA64_PLTOFF64MSB 0x3e
++#define R_IA64_PLTOFF64LSB 0x3f
++#define R_IA64_FPTR64I 0x43
++#define R_IA64_FPTR32MSB 0x44
++#define R_IA64_FPTR32LSB 0x45
++#define R_IA64_FPTR64MSB 0x46
++#define R_IA64_FPTR64LSB 0x47
++#define R_IA64_PCREL60B 0x48
++#define R_IA64_PCREL21B 0x49
++#define R_IA64_PCREL21M 0x4a
++#define R_IA64_PCREL21F 0x4b
++#define R_IA64_PCREL32MSB 0x4c
++#define R_IA64_PCREL32LSB 0x4d
++#define R_IA64_PCREL64MSB 0x4e
++#define R_IA64_PCREL64LSB 0x4f
++#define R_IA64_LTOFF_FPTR22 0x52
++#define R_IA64_LTOFF_FPTR64I 0x53
++#define R_IA64_LTOFF_FPTR32MSB 0x54
++#define R_IA64_LTOFF_FPTR32LSB 0x55
++#define R_IA64_LTOFF_FPTR64MSB 0x56
++#define R_IA64_LTOFF_FPTR64LSB 0x57
++#define R_IA64_SEGREL32MSB 0x5c
++#define R_IA64_SEGREL32LSB 0x5d
++#define R_IA64_SEGREL64MSB 0x5e
++#define R_IA64_SEGREL64LSB 0x5f
++#define R_IA64_SECREL32MSB 0x64
++#define R_IA64_SECREL32LSB 0x65
++#define R_IA64_SECREL64MSB 0x66
++#define R_IA64_SECREL64LSB 0x67
++#define R_IA64_REL32MSB 0x6c
++#define R_IA64_REL32LSB 0x6d
++#define R_IA64_REL64MSB 0x6e
++#define R_IA64_REL64LSB 0x6f
++#define R_IA64_LTV32MSB 0x74
++#define R_IA64_LTV32LSB 0x75
++#define R_IA64_LTV64MSB 0x76
++#define R_IA64_LTV64LSB 0x77
++#define R_IA64_PCREL21BI 0x79
++#define R_IA64_PCREL22 0x7a
++#define R_IA64_PCREL64I 0x7b
++#define R_IA64_IPLTMSB 0x80
++#define R_IA64_IPLTLSB 0x81
++#define R_IA64_COPY 0x84
++#define R_IA64_SUB 0x85
++#define R_IA64_LTOFF22X 0x86
++#define R_IA64_LDXMOV 0x87
++#define R_IA64_TPREL14 0x91
++#define R_IA64_TPREL22 0x92
++#define R_IA64_TPREL64I 0x93
++#define R_IA64_TPREL64MSB 0x96
++#define R_IA64_TPREL64LSB 0x97
++#define R_IA64_LTOFF_TPREL22 0x9a
++#define R_IA64_DTPMOD64MSB 0xa6
++#define R_IA64_DTPMOD64LSB 0xa7
++#define R_IA64_LTOFF_DTPMOD22 0xaa
++#define R_IA64_DTPREL14 0xb1
++#define R_IA64_DTPREL22 0xb2
++#define R_IA64_DTPREL64I 0xb3
++#define R_IA64_DTPREL32MSB 0xb4
++#define R_IA64_DTPREL32LSB 0xb5
++#define R_IA64_DTPREL64MSB 0xb6
++#define R_IA64_DTPREL64LSB 0xb7
++#define R_IA64_LTOFF_DTPREL22 0xba
++
++
++
++
++#define R_SH_NONE 0
++#define R_SH_DIR32 1
++#define R_SH_REL32 2
++#define R_SH_DIR8WPN 3
++#define R_SH_IND12W 4
++#define R_SH_DIR8WPL 5
++#define R_SH_DIR8WPZ 6
++#define R_SH_DIR8BP 7
++#define R_SH_DIR8W 8
++#define R_SH_DIR8L 9
++#define R_SH_SWITCH16 25
++#define R_SH_SWITCH32 26
++#define R_SH_USES 27
++#define R_SH_COUNT 28
++#define R_SH_ALIGN 29
++#define R_SH_CODE 30
++#define R_SH_DATA 31
++#define R_SH_LABEL 32
++#define R_SH_SWITCH8 33
++#define R_SH_GNU_VTINHERIT 34
++#define R_SH_GNU_VTENTRY 35
++#define R_SH_TLS_GD_32 144
++#define R_SH_TLS_LD_32 145
++#define R_SH_TLS_LDO_32 146
++#define R_SH_TLS_IE_32 147
++#define R_SH_TLS_LE_32 148
++#define R_SH_TLS_DTPMOD32 149
++#define R_SH_TLS_DTPOFF32 150
++#define R_SH_TLS_TPOFF32 151
++#define R_SH_GOT32 160
++#define R_SH_PLT32 161
++#define R_SH_COPY 162
++#define R_SH_GLOB_DAT 163
++#define R_SH_JMP_SLOT 164
++#define R_SH_RELATIVE 165
++#define R_SH_GOTOFF 166
++#define R_SH_GOTPC 167
++
++#define R_SH_NUM 256
++
++
++
++#define R_390_NONE 0
++#define R_390_8 1
++#define R_390_12 2
++#define R_390_16 3
++#define R_390_32 4
++#define R_390_PC32 5
++#define R_390_GOT12 6
++#define R_390_GOT32 7
++#define R_390_PLT32 8
++#define R_390_COPY 9
++#define R_390_GLOB_DAT 10
++#define R_390_JMP_SLOT 11
++#define R_390_RELATIVE 12
++#define R_390_GOTOFF32 13
++#define R_390_GOTPC 14
++#define R_390_GOT16 15
++#define R_390_PC16 16
++#define R_390_PC16DBL 17
++#define R_390_PLT16DBL 18
++#define R_390_PC32DBL 19
++#define R_390_PLT32DBL 20
++#define R_390_GOTPCDBL 21
++#define R_390_64 22
++#define R_390_PC64 23
++#define R_390_GOT64 24
++#define R_390_PLT64 25
++#define R_390_GOTENT 26
++#define R_390_GOTOFF16 27
++#define R_390_GOTOFF64 28
++#define R_390_GOTPLT12 29
++#define R_390_GOTPLT16 30
++#define R_390_GOTPLT32 31
++#define R_390_GOTPLT64 32
++#define R_390_GOTPLTENT 33
++#define R_390_PLTOFF16 34
++#define R_390_PLTOFF32 35
++#define R_390_PLTOFF64 36
++#define R_390_TLS_LOAD 37
++#define R_390_TLS_GDCALL 38
++
++#define R_390_TLS_LDCALL 39
++
++#define R_390_TLS_GD32 40
++
++#define R_390_TLS_GD64 41
++
++#define R_390_TLS_GOTIE12 42
++
++#define R_390_TLS_GOTIE32 43
++
++#define R_390_TLS_GOTIE64 44
++
++#define R_390_TLS_LDM32 45
++
++#define R_390_TLS_LDM64 46
++
++#define R_390_TLS_IE32 47
++
++#define R_390_TLS_IE64 48
++
++#define R_390_TLS_IEENT 49
++
++#define R_390_TLS_LE32 50
++
++#define R_390_TLS_LE64 51
++
++#define R_390_TLS_LDO32 52
++
++#define R_390_TLS_LDO64 53
++
++#define R_390_TLS_DTPMOD 54
++#define R_390_TLS_DTPOFF 55
++#define R_390_TLS_TPOFF 56
++
++#define R_390_20 57
++#define R_390_GOT20 58
++#define R_390_GOTPLT20 59
++#define R_390_TLS_GOTIE20 60
++
++
++#define R_390_NUM 61
++
++
++
++#define R_CRIS_NONE 0
++#define R_CRIS_8 1
++#define R_CRIS_16 2
++#define R_CRIS_32 3
++#define R_CRIS_8_PCREL 4
++#define R_CRIS_16_PCREL 5
++#define R_CRIS_32_PCREL 6
++#define R_CRIS_GNU_VTINHERIT 7
++#define R_CRIS_GNU_VTENTRY 8
++#define R_CRIS_COPY 9
++#define R_CRIS_GLOB_DAT 10
++#define R_CRIS_JUMP_SLOT 11
++#define R_CRIS_RELATIVE 12
++#define R_CRIS_16_GOT 13
++#define R_CRIS_32_GOT 14
++#define R_CRIS_16_GOTPLT 15
++#define R_CRIS_32_GOTPLT 16
++#define R_CRIS_32_GOTREL 17
++#define R_CRIS_32_PLT_GOTREL 18
++#define R_CRIS_32_PLT_PCREL 19
++
++#define R_CRIS_NUM 20
++
++
++
++#define R_X86_64_NONE 0
++#define R_X86_64_64 1
++#define R_X86_64_PC32 2
++#define R_X86_64_GOT32 3
++#define R_X86_64_PLT32 4
++#define R_X86_64_COPY 5
++#define R_X86_64_GLOB_DAT 6
++#define R_X86_64_JUMP_SLOT 7
++#define R_X86_64_RELATIVE 8
++#define R_X86_64_GOTPCREL 9
++
++#define R_X86_64_32 10
++#define R_X86_64_32S 11
++#define R_X86_64_16 12
++#define R_X86_64_PC16 13
++#define R_X86_64_8 14
++#define R_X86_64_PC8 15
++#define R_X86_64_DTPMOD64 16
++#define R_X86_64_DTPOFF64 17
++#define R_X86_64_TPOFF64 18
++#define R_X86_64_TLSGD 19
++
++#define R_X86_64_TLSLD 20
++
++#define R_X86_64_DTPOFF32 21
++#define R_X86_64_GOTTPOFF 22
++
++#define R_X86_64_TPOFF32 23
++#define R_X86_64_PC64 24
++#define R_X86_64_GOTOFF64 25
++#define R_X86_64_GOTPC32 26
++#define R_X86_64_GOT64 27
++#define R_X86_64_GOTPCREL64 28
++#define R_X86_64_GOTPC64 29
++#define R_X86_64_GOTPLT64 30
++#define R_X86_64_PLTOFF64 31
++#define R_X86_64_SIZE32 32
++#define R_X86_64_SIZE64 33
++
++#define R_X86_64_GOTPC32_TLSDESC 34
++#define R_X86_64_TLSDESC_CALL 35
++
++#define R_X86_64_TLSDESC 36
++#define R_X86_64_IRELATIVE 37
++#define R_X86_64_RELATIVE64 38
++#define R_X86_64_NUM 39
++
++
++
++#define R_MN10300_NONE 0
++#define R_MN10300_32 1
++#define R_MN10300_16 2
++#define R_MN10300_8 3
++#define R_MN10300_PCREL32 4
++#define R_MN10300_PCREL16 5
++#define R_MN10300_PCREL8 6
++#define R_MN10300_GNU_VTINHERIT 7
++#define R_MN10300_GNU_VTENTRY 8
++#define R_MN10300_24 9
++#define R_MN10300_GOTPC32 10
++#define R_MN10300_GOTPC16 11
++#define R_MN10300_GOTOFF32 12
++#define R_MN10300_GOTOFF24 13
++#define R_MN10300_GOTOFF16 14
++#define R_MN10300_PLT32 15
++#define R_MN10300_PLT16 16
++#define R_MN10300_GOT32 17
++#define R_MN10300_GOT24 18
++#define R_MN10300_GOT16 19
++#define R_MN10300_COPY 20
++#define R_MN10300_GLOB_DAT 21
++#define R_MN10300_JMP_SLOT 22
++#define R_MN10300_RELATIVE 23
++
++#define R_MN10300_NUM 24
++
++
++
++#define R_M32R_NONE 0
++#define R_M32R_16 1
++#define R_M32R_32 2
++#define R_M32R_24 3
++#define R_M32R_10_PCREL 4
++#define R_M32R_18_PCREL 5
++#define R_M32R_26_PCREL 6
++#define R_M32R_HI16_ULO 7
++#define R_M32R_HI16_SLO 8
++#define R_M32R_LO16 9
++#define R_M32R_SDA16 10
++#define R_M32R_GNU_VTINHERIT 11
++#define R_M32R_GNU_VTENTRY 12
++
++#define R_M32R_16_RELA 33
++#define R_M32R_32_RELA 34
++#define R_M32R_24_RELA 35
++#define R_M32R_10_PCREL_RELA 36
++#define R_M32R_18_PCREL_RELA 37
++#define R_M32R_26_PCREL_RELA 38
++#define R_M32R_HI16_ULO_RELA 39
++#define R_M32R_HI16_SLO_RELA 40
++#define R_M32R_LO16_RELA 41
++#define R_M32R_SDA16_RELA 42
++#define R_M32R_RELA_GNU_VTINHERIT 43
++#define R_M32R_RELA_GNU_VTENTRY 44
++#define R_M32R_REL32 45
++
++#define R_M32R_GOT24 48
++#define R_M32R_26_PLTREL 49
++#define R_M32R_COPY 50
++#define R_M32R_GLOB_DAT 51
++#define R_M32R_JMP_SLOT 52
++#define R_M32R_RELATIVE 53
++#define R_M32R_GOTOFF 54
++#define R_M32R_GOTPC24 55
++#define R_M32R_GOT16_HI_ULO 56
++
++#define R_M32R_GOT16_HI_SLO 57
++
++#define R_M32R_GOT16_LO 58
++#define R_M32R_GOTPC_HI_ULO 59
++
++#define R_M32R_GOTPC_HI_SLO 60
++
++#define R_M32R_GOTPC_LO 61
++
++#define R_M32R_GOTOFF_HI_ULO 62
++
++#define R_M32R_GOTOFF_HI_SLO 63
++
++#define R_M32R_GOTOFF_LO 64
++#define R_M32R_NUM 256
++
++#define R_MICROBLAZE_NONE 0
++#define R_MICROBLAZE_32 1
++#define R_MICROBLAZE_32_PCREL 2
++#define R_MICROBLAZE_64_PCREL 3
++#define R_MICROBLAZE_32_PCREL_LO 4
++#define R_MICROBLAZE_64 5
++#define R_MICROBLAZE_32_LO 6
++#define R_MICROBLAZE_SRO32 7
++#define R_MICROBLAZE_SRW32 8
++#define R_MICROBLAZE_64_NONE 9
++#define R_MICROBLAZE_32_SYM_OP_SYM 10
++#define R_MICROBLAZE_GNU_VTINHERIT 11
++#define R_MICROBLAZE_GNU_VTENTRY 12
++#define R_MICROBLAZE_GOTPC_64 13
++#define R_MICROBLAZE_GOT_64 14
++#define R_MICROBLAZE_PLT_64 15
++#define R_MICROBLAZE_REL 16
++#define R_MICROBLAZE_JUMP_SLOT 17
++#define R_MICROBLAZE_GLOB_DAT 18
++#define R_MICROBLAZE_GOTOFF_64 19
++#define R_MICROBLAZE_GOTOFF_32 20
++#define R_MICROBLAZE_COPY 21
++#define R_MICROBLAZE_TLS 22
++#define R_MICROBLAZE_TLSGD 23
++#define R_MICROBLAZE_TLSLD 24
++#define R_MICROBLAZE_TLSDTPMOD32 25
++#define R_MICROBLAZE_TLSDTPREL32 26
++#define R_MICROBLAZE_TLSDTPREL64 27
++#define R_MICROBLAZE_TLSGOTTPREL32 28
++#define R_MICROBLAZE_TLSTPREL32 29
++
++#ifdef __cplusplus
++}
++#endif
++
++
++#endif
diff --git a/target/linux/patches/3.14.43/sgidefs.patch b/target/linux/patches/3.14.43/sgidefs.patch
new file mode 100644
index 000000000..f00a284d9
--- /dev/null
+++ b/target/linux/patches/3.14.43/sgidefs.patch
@@ -0,0 +1,18 @@
+diff -Nur linux-3.11.5.orig/arch/mips/include/uapi/asm/sgidefs.h linux-3.11.5/arch/mips/include/uapi/asm/sgidefs.h
+--- linux-3.11.5.orig/arch/mips/include/uapi/asm/sgidefs.h 2013-10-14 03:14:45.000000000 +0200
++++ linux-3.11.5/arch/mips/include/uapi/asm/sgidefs.h 2013-11-08 22:01:28.000000000 +0100
+@@ -11,14 +11,6 @@
+ #define __ASM_SGIDEFS_H
+
+ /*
+- * Using a Linux compiler for building Linux seems logic but not to
+- * everybody.
+- */
+-#ifndef __linux__
+-#error Use a Linux compiler or give up.
+-#endif
+-
+-/*
+ * Definitions for the ISA levels
+ *
+ * With the introduction of MIPS32 / MIPS64 instruction sets definitions
diff --git a/target/linux/patches/3.14.43/sortext.patch b/target/linux/patches/3.14.43/sortext.patch
new file mode 100644
index 000000000..8fd4e1d6b
--- /dev/null
+++ b/target/linux/patches/3.14.43/sortext.patch
@@ -0,0 +1,33 @@
+diff -Nur linux-3.12.6.orig/arch/arm/Kconfig linux-3.12.6/arch/arm/Kconfig
+--- linux-3.12.6.orig/arch/arm/Kconfig 2013-12-20 16:51:33.000000000 +0100
++++ linux-3.12.6/arch/arm/Kconfig 2013-12-28 19:29:33.000000000 +0100
+@@ -6,7 +6,6 @@
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAVE_CUSTOM_GPIO_H
+ select ARCH_WANT_IPC_PARSE_VERSION
+- select BUILDTIME_EXTABLE_SORT if MMU
+ select CLONE_BACKWARDS
+ select CPU_PM if (SUSPEND || CPU_IDLE)
+ select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN && MMU
+diff -Nur linux-3.12.6.orig/arch/mips/Kconfig linux-3.12.6/arch/mips/Kconfig
+--- linux-3.12.6.orig/arch/mips/Kconfig 2013-12-20 16:51:33.000000000 +0100
++++ linux-3.12.6/arch/mips/Kconfig 2013-12-28 19:30:06.000000000 +0100
+@@ -35,7 +35,6 @@
+ select HAVE_MEMBLOCK_NODE_MAP
+ select ARCH_DISCARD_MEMBLOCK
+ select GENERIC_SMP_IDLE_THREAD
+- select BUILDTIME_EXTABLE_SORT
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CMOS_UPDATE
+ select HAVE_MOD_ARCH_SPECIFIC
+diff -Nur linux-3.12.6.orig/arch/x86/Kconfig linux-3.12.6/arch/x86/Kconfig
+--- linux-3.12.6.orig/arch/x86/Kconfig 2013-12-20 16:51:33.000000000 +0100
++++ linux-3.12.6/arch/x86/Kconfig 2013-12-28 19:29:50.000000000 +0100
+@@ -100,7 +100,6 @@
+ select GENERIC_SMP_IDLE_THREAD
+ select ARCH_WANT_IPC_PARSE_VERSION if X86_32
+ select HAVE_ARCH_SECCOMP_FILTER
+- select BUILDTIME_EXTABLE_SORT
+ select GENERIC_CMOS_UPDATE
+ select HAVE_ARCH_SOFT_DIRTY
+ select CLOCKSOURCE_WATCHDOG
diff --git a/target/linux/patches/3.14.43/startup.patch b/target/linux/patches/3.14.43/startup.patch
new file mode 100644
index 000000000..d396b75e4
--- /dev/null
+++ b/target/linux/patches/3.14.43/startup.patch
@@ -0,0 +1,37 @@
+diff -Nur linux-3.13.3.orig/init/main.c linux-3.13.3/init/main.c
+--- linux-3.13.3.orig/init/main.c 2014-02-13 23:00:14.000000000 +0100
++++ linux-3.13.3/init/main.c 2014-02-17 11:35:14.000000000 +0100
+@@ -916,6 +917,8 @@
+ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
+ pr_err("Warning: unable to open an initial console.\n");
+
++ printk(KERN_WARNING "Starting Linux (built with OpenADK).\n");
++
+ (void) sys_dup(0);
+ (void) sys_dup(0);
+ /*
+diff -Nur linux-3.13.6.orig/init/initramfs.c linux-3.13.6/init/initramfs.c
+--- linux-3.13.6.orig/init/initramfs.c 2014-03-07 07:07:02.000000000 +0100
++++ linux-3.13.6/init/initramfs.c 2014-03-15 12:11:31.882731916 +0100
+@@ -622,6 +622,9 @@
+ */
+ load_default_modules();
+ }
++#ifdef CONFIG_DEVTMPFS_MOUNT
++ devtmpfs_mount("dev");
++#endif
+ return 0;
+ }
+ rootfs_initcall(populate_rootfs);
+diff -Nur linux-3.13.6.orig/init/main.c linux-3.13.6/init/main.c
+--- linux-3.13.6.orig/init/main.c 2014-03-07 07:07:02.000000000 +0100
++++ linux-3.13.6/init/main.c 2014-03-15 12:13:16.459024452 +0100
+@@ -924,7 +924,7 @@
+ */
+
+ if (!ramdisk_execute_command)
+- ramdisk_execute_command = "/init";
++ ramdisk_execute_command = "/sbin/init";
+
+ if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
+ ramdisk_execute_command = NULL;
diff --git a/target/linux/patches/3.14.43/wlan-cf.patch b/target/linux/patches/3.14.43/wlan-cf.patch
new file mode 100644
index 000000000..fc20759e2
--- /dev/null
+++ b/target/linux/patches/3.14.43/wlan-cf.patch
@@ -0,0 +1,11 @@
+diff -Nur linux-2.6.39.orig/drivers/net/wireless/hostap/hostap_cs.c linux-2.6.39/drivers/net/wireless/hostap/hostap_cs.c
+--- linux-2.6.39.orig/drivers/net/wireless/hostap/hostap_cs.c 2011-05-19 06:06:34.000000000 +0200
++++ linux-2.6.39/drivers/net/wireless/hostap/hostap_cs.c 2011-09-12 02:46:26.987984145 +0200
+@@ -623,6 +623,7 @@
+ static struct pcmcia_device_id hostap_cs_ids[] = {
+ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100),
+ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300),
++ PCMCIA_DEVICE_MANF_CARD(0x0004, 0x2003),
+ PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777),
+ PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000),
+ PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002),
diff --git a/target/linux/patches/3.14.43/xargs.patch b/target/linux/patches/3.14.43/xargs.patch
new file mode 100644
index 000000000..2c7b3df59
--- /dev/null
+++ b/target/linux/patches/3.14.43/xargs.patch
@@ -0,0 +1,12 @@
+diff -Nur linux-3.12.6.orig/scripts/Makefile.modpost linux-3.12.6/scripts/Makefile.modpost
+--- linux-3.12.6.orig/scripts/Makefile.modpost 2013-12-20 16:51:33.000000000 +0100
++++ linux-3.12.6/scripts/Makefile.modpost 2014-01-25 14:55:33.000000000 +0100
+@@ -60,7 +60,7 @@
+ modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers
+
+ # Step 1), find all modules listed in $(MODVERDIR)/
+-MODLISTCMD := find $(MODVERDIR) -name '*.mod' | xargs -r grep -h '\.ko$$' | sort -u
++MODLISTCMD := find $(MODVERDIR) -name '*.mod' | xargs grep -h '\.ko$$' | sort -u
+ __modules := $(shell $(MODLISTCMD))
+ modules := $(patsubst %.o,%.ko, $(wildcard $(__modules:.ko=.o)))
+
diff --git a/target/linux/patches/3.14.43/yaffs2.patch b/target/linux/patches/3.14.43/yaffs2.patch
new file mode 100644
index 000000000..f075aa658
--- /dev/null
+++ b/target/linux/patches/3.14.43/yaffs2.patch
@@ -0,0 +1,16547 @@
+diff -Nur linux-3.14.4.orig/fs/Kconfig linux-3.14.4/fs/Kconfig
+--- linux-3.14.4.orig/fs/Kconfig 2014-05-13 13:33:14.000000000 +0200
++++ linux-3.14.4/fs/Kconfig 2014-05-17 02:22:30.000000000 +0200
+@@ -189,6 +189,7 @@
+ source "fs/befs/Kconfig"
+ source "fs/bfs/Kconfig"
+ source "fs/efs/Kconfig"
++source "fs/yaffs2/Kconfig"
+ source "fs/jffs2/Kconfig"
+ # UBIFS File system configuration
+ source "fs/ubifs/Kconfig"
+diff -Nur linux-3.14.4.orig/fs/Makefile linux-3.14.4/fs/Makefile
+--- linux-3.14.4.orig/fs/Makefile 2014-05-13 13:33:14.000000000 +0200
++++ linux-3.14.4/fs/Makefile 2014-05-17 02:22:30.000000000 +0200
+@@ -125,3 +125,4 @@
+ obj-$(CONFIG_CEPH_FS) += ceph/
+ obj-$(CONFIG_PSTORE) += pstore/
+ obj-$(CONFIG_EFIVAR_FS) += efivarfs/
++obj-$(CONFIG_YAFFS_FS) += yaffs2/
+diff -Nur linux-3.14.4.orig/fs/yaffs2/Kconfig linux-3.14.4/fs/yaffs2/Kconfig
+--- linux-3.14.4.orig/fs/yaffs2/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/Kconfig 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,171 @@
++#
++# yaffs file system configurations
++#
++
++config YAFFS_FS
++ tristate "yaffs2 file system support"
++ default n
++ depends on MTD_BLOCK
++ select YAFFS_YAFFS1
++ select YAFFS_YAFFS2
++ help
++ yaffs2, or Yet Another Flash File System, is a file system
++ optimised for NAND Flash chips.
++
++ To compile the yaffs2 file system support as a module, choose M
++ here: the module will be called yaffs2.
++
++ If unsure, say N.
++
++ Further information on yaffs2 is available at
++ <http://www.aleph1.co.uk/yaffs/>.
++
++config YAFFS_YAFFS1
++ bool "512 byte / page devices"
++ depends on YAFFS_FS
++ default y
++ help
++ Enable yaffs1 support -- yaffs for 512 byte / page devices
++
++ Not needed for 2K-page devices.
++
++ If unsure, say Y.
++
++config YAFFS_9BYTE_TAGS
++ bool "Use older-style on-NAND data format with pageStatus byte"
++ depends on YAFFS_YAFFS1
++ default n
++ help
++
++ Older-style on-NAND data format has a "pageStatus" byte to record
++ chunk/page state. This byte is zero when the page is discarded.
++ Choose this option if you have existing on-NAND data using this
++ format that you need to continue to support. New data written
++ also uses the older-style format. Note: Use of this option
++ generally requires that MTD's oob layout be adjusted to use the
++ older-style format. See notes on tags formats and MTD versions
++ in yaffs_mtdif1.c.
++
++ If unsure, say N.
++
++config YAFFS_DOES_ECC
++ bool "Lets yaffs do its own ECC"
++ depends on YAFFS_FS && YAFFS_YAFFS1 && !YAFFS_9BYTE_TAGS
++ default n
++ help
++ This enables yaffs to use its own ECC functions instead of using
++ the ones from the generic MTD-NAND driver.
++
++ If unsure, say N.
++
++config YAFFS_ECC_WRONG_ORDER
++ bool "Use the same ecc byte order as Steven Hill's nand_ecc.c"
++ depends on YAFFS_FS && YAFFS_DOES_ECC && !YAFFS_9BYTE_TAGS
++ default n
++ help
++ This makes yaffs_ecc.c use the same ecc byte order as Steven
++ Hill's nand_ecc.c. If not set, then you get the same ecc byte
++ order as SmartMedia.
++
++ If unsure, say N.
++
++config YAFFS_YAFFS2
++ bool "2048 byte (or larger) / page devices"
++ depends on YAFFS_FS
++ default y
++ help
++ Enable yaffs2 support -- yaffs for >= 2K bytes per page devices
++
++ If unsure, say Y.
++
++config YAFFS_AUTO_YAFFS2
++ bool "Autoselect yaffs2 format"
++ depends on YAFFS_YAFFS2
++ default y
++ help
++ Without this, you need to explicitely use yaffs2 as the file
++ system type. With this, you can say "yaffs" and yaffs or yaffs2
++ will be used depending on the device page size (yaffs on
++ 512-byte page devices, yaffs2 on 2K page devices).
++
++ If unsure, say Y.
++
++config YAFFS_DISABLE_TAGS_ECC
++ bool "Disable yaffs from doing ECC on tags by default"
++ depends on YAFFS_FS && YAFFS_YAFFS2
++ default n
++ help
++ This defaults yaffs to using its own ECC calculations on tags instead of
++ just relying on the MTD.
++ This behavior can also be overridden with tags_ecc_on and
++ tags_ecc_off mount options.
++
++ If unsure, say N.
++
++config YAFFS_ALWAYS_CHECK_CHUNK_ERASED
++ bool "Force chunk erase check"
++ depends on YAFFS_FS
++ default n
++ help
++ Normally yaffs only checks chunks before writing until an erased
++ chunk is found. This helps to detect any partially written
++ chunks that might have happened due to power loss.
++
++ Enabling this forces on the test that chunks are erased in flash
++ before writing to them. This takes more time but is potentially
++ a bit more secure.
++
++ Suggest setting Y during development and ironing out driver
++ issues etc. Suggest setting to N if you want faster writing.
++
++ If unsure, say Y.
++
++config YAFFS_EMPTY_LOST_AND_FOUND
++ bool "Empty lost and found on boot"
++ depends on YAFFS_FS
++ default n
++ help
++ If this is enabled then the contents of lost and found is
++ automatically dumped at mount.
++
++ If unsure, say N.
++
++config YAFFS_DISABLE_BLOCK_REFRESHING
++ bool "Disable yaffs2 block refreshing"
++ depends on YAFFS_FS
++ default n
++ help
++ If this is set, then block refreshing is disabled.
++ Block refreshing infrequently refreshes the oldest block in
++ a yaffs2 file system. This mechanism helps to refresh flash to
++ mitigate against data loss. This is particularly useful for MLC.
++
++ If unsure, say N.
++
++config YAFFS_DISABLE_BACKGROUND
++ bool "Disable yaffs2 background processing"
++ depends on YAFFS_FS
++ default n
++ help
++ If this is set, then background processing is disabled.
++ Background processing makes many foreground activities faster.
++
++ If unsure, say N.
++
++config YAFFS_DISABLE_BAD_BLOCK_MARKING
++ bool "Disable yaffs2 bad block marking"
++ depends on YAFFS_FS
++ default n
++ help
++ Useful during early flash bring up to prevent problems causing
++ lots of bad block marking.
++
++ If unsure, say N.
++
++config YAFFS_XATTR
++ bool "Enable yaffs2 xattr support"
++ depends on YAFFS_FS
++ default y
++ help
++ If this is set then yaffs2 will provide xattr support.
++ If unsure, say Y.
+diff -Nur linux-3.14.4.orig/fs/yaffs2/Makefile linux-3.14.4/fs/yaffs2/Makefile
+--- linux-3.14.4.orig/fs/yaffs2/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/Makefile 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,18 @@
++#
++# Makefile for the linux YAFFS filesystem routines.
++#
++
++obj-$(CONFIG_YAFFS_FS) += yaffs.o
++
++yaffs-y := yaffs_ecc.o yaffs_vfs.o yaffs_guts.o yaffs_checkptrw.o
++yaffs-y += yaffs_packedtags1.o yaffs_packedtags2.o yaffs_nand.o
++yaffs-y += yaffs_tagscompat.o yaffs_tagsmarshall.o
++yaffs-y += yaffs_mtdif.o
++yaffs-y += yaffs_nameval.o yaffs_attribs.o
++yaffs-y += yaffs_allocator.o
++yaffs-y += yaffs_yaffs1.o
++yaffs-y += yaffs_yaffs2.o
++yaffs-y += yaffs_bitmap.o
++yaffs-y += yaffs_summary.o
++yaffs-y += yaffs_verify.o
++
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_allocator.c linux-3.14.4/fs/yaffs2/yaffs_allocator.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_allocator.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_allocator.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,357 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_allocator.h"
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yportenv.h"
++
++/*
++ * Each entry in yaffs_tnode_list and yaffs_obj_list hold blocks
++ * of approx 100 objects that are themn allocated singly.
++ * This is basically a simplified slab allocator.
++ *
++ * We don't use the Linux slab allocator because slab does not allow
++ * us to dump all the objects in one hit when we do a umount and tear
++ * down all the tnodes and objects. slab requires that we first free
++ * the individual objects.
++ *
++ * Once yaffs has been mainlined I shall try to motivate for a change
++ * to slab to provide the extra features we need here.
++ */
++
++struct yaffs_tnode_list {
++ struct yaffs_tnode_list *next;
++ struct yaffs_tnode *tnodes;
++};
++
++struct yaffs_obj_list {
++ struct yaffs_obj_list *next;
++ struct yaffs_obj *objects;
++};
++
++struct yaffs_allocator {
++ int n_tnodes_created;
++ struct yaffs_tnode *free_tnodes;
++ int n_free_tnodes;
++ struct yaffs_tnode_list *alloc_tnode_list;
++
++ int n_obj_created;
++ struct list_head free_objs;
++ int n_free_objects;
++
++ struct yaffs_obj_list *allocated_obj_list;
++};
++
++static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev)
++{
++ struct yaffs_allocator *allocator =
++ (struct yaffs_allocator *)dev->allocator;
++ struct yaffs_tnode_list *tmp;
++
++ if (!allocator) {
++ BUG();
++ return;
++ }
++
++ while (allocator->alloc_tnode_list) {
++ tmp = allocator->alloc_tnode_list->next;
++
++ kfree(allocator->alloc_tnode_list->tnodes);
++ kfree(allocator->alloc_tnode_list);
++ allocator->alloc_tnode_list = tmp;
++ }
++
++ allocator->free_tnodes = NULL;
++ allocator->n_free_tnodes = 0;
++ allocator->n_tnodes_created = 0;
++}
++
++static void yaffs_init_raw_tnodes(struct yaffs_dev *dev)
++{
++ struct yaffs_allocator *allocator = dev->allocator;
++
++ if (!allocator) {
++ BUG();
++ return;
++ }
++
++ allocator->alloc_tnode_list = NULL;
++ allocator->free_tnodes = NULL;
++ allocator->n_free_tnodes = 0;
++ allocator->n_tnodes_created = 0;
++}
++
++static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes)
++{
++ struct yaffs_allocator *allocator =
++ (struct yaffs_allocator *)dev->allocator;
++ int i;
++ struct yaffs_tnode *new_tnodes;
++ u8 *mem;
++ struct yaffs_tnode *curr;
++ struct yaffs_tnode *next;
++ struct yaffs_tnode_list *tnl;
++
++ if (!allocator) {
++ BUG();
++ return YAFFS_FAIL;
++ }
++
++ if (n_tnodes < 1)
++ return YAFFS_OK;
++
++ /* make these things */
++ new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS);
++ mem = (u8 *) new_tnodes;
++
++ if (!new_tnodes) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs: Could not allocate Tnodes");
++ return YAFFS_FAIL;
++ }
++
++ /* New hookup for wide tnodes */
++ for (i = 0; i < n_tnodes - 1; i++) {
++ curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size];
++ next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size];
++ curr->internal[0] = next;
++ }
++
++ curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size];
++ curr->internal[0] = allocator->free_tnodes;
++ allocator->free_tnodes = (struct yaffs_tnode *)mem;
++
++ allocator->n_free_tnodes += n_tnodes;
++ allocator->n_tnodes_created += n_tnodes;
++
++ /* Now add this bunch of tnodes to a list for freeing up.
++ * NB If we can't add this to the management list it isn't fatal
++ * but it just means we can't free this bunch of tnodes later.
++ */
++ tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS);
++ if (!tnl) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "Could not add tnodes to management list");
++ return YAFFS_FAIL;
++ } else {
++ tnl->tnodes = new_tnodes;
++ tnl->next = allocator->alloc_tnode_list;
++ allocator->alloc_tnode_list = tnl;
++ }
++
++ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Tnodes added");
++
++ return YAFFS_OK;
++}
++
++struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
++{
++ struct yaffs_allocator *allocator =
++ (struct yaffs_allocator *)dev->allocator;
++ struct yaffs_tnode *tn = NULL;
++
++ if (!allocator) {
++ BUG();
++ return NULL;
++ }
++
++ /* If there are none left make more */
++ if (!allocator->free_tnodes)
++ yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
++
++ if (allocator->free_tnodes) {
++ tn = allocator->free_tnodes;
++ allocator->free_tnodes = allocator->free_tnodes->internal[0];
++ allocator->n_free_tnodes--;
++ }
++
++ return tn;
++}
++
++/* FreeTnode frees up a tnode and puts it back on the free list */
++void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
++{
++ struct yaffs_allocator *allocator = dev->allocator;
++
++ if (!allocator) {
++ BUG();
++ return;
++ }
++
++ if (tn) {
++ tn->internal[0] = allocator->free_tnodes;
++ allocator->free_tnodes = tn;
++ allocator->n_free_tnodes++;
++ }
++ dev->checkpoint_blocks_required = 0; /* force recalculation */
++}
++
++/*--------------- yaffs_obj alloaction ------------------------
++ *
++ * Free yaffs_objs are stored in a list using obj->siblings.
++ * The blocks of allocated objects are stored in a linked list.
++ */
++
++static void yaffs_init_raw_objs(struct yaffs_dev *dev)
++{
++ struct yaffs_allocator *allocator = dev->allocator;
++
++ if (!allocator) {
++ BUG();
++ return;
++ }
++
++ allocator->allocated_obj_list = NULL;
++ INIT_LIST_HEAD(&allocator->free_objs);
++ allocator->n_free_objects = 0;
++}
++
++static void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
++{
++ struct yaffs_allocator *allocator = dev->allocator;
++ struct yaffs_obj_list *tmp;
++
++ if (!allocator) {
++ BUG();
++ return;
++ }
++
++ while (allocator->allocated_obj_list) {
++ tmp = allocator->allocated_obj_list->next;
++ kfree(allocator->allocated_obj_list->objects);
++ kfree(allocator->allocated_obj_list);
++ allocator->allocated_obj_list = tmp;
++ }
++
++ INIT_LIST_HEAD(&allocator->free_objs);
++ allocator->n_free_objects = 0;
++ allocator->n_obj_created = 0;
++}
++
++static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj)
++{
++ struct yaffs_allocator *allocator = dev->allocator;
++ int i;
++ struct yaffs_obj *new_objs;
++ struct yaffs_obj_list *list;
++
++ if (!allocator) {
++ BUG();
++ return YAFFS_FAIL;
++ }
++
++ if (n_obj < 1)
++ return YAFFS_OK;
++
++ /* make these things */
++ new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS);
++ list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS);
++
++ if (!new_objs || !list) {
++ kfree(new_objs);
++ new_objs = NULL;
++ kfree(list);
++ list = NULL;
++ yaffs_trace(YAFFS_TRACE_ALLOCATE,
++ "Could not allocate more objects");
++ return YAFFS_FAIL;
++ }
++
++ /* Hook them into the free list */
++ for (i = 0; i < n_obj; i++)
++ list_add(&new_objs[i].siblings, &allocator->free_objs);
++
++ allocator->n_free_objects += n_obj;
++ allocator->n_obj_created += n_obj;
++
++ /* Now add this bunch of Objects to a list for freeing up. */
++
++ list->objects = new_objs;
++ list->next = allocator->allocated_obj_list;
++ allocator->allocated_obj_list = list;
++
++ return YAFFS_OK;
++}
++
++struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj = NULL;
++ struct list_head *lh;
++ struct yaffs_allocator *allocator = dev->allocator;
++
++ if (!allocator) {
++ BUG();
++ return obj;
++ }
++
++ /* If there are none left make more */
++ if (list_empty(&allocator->free_objs))
++ yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
++
++ if (!list_empty(&allocator->free_objs)) {
++ lh = allocator->free_objs.next;
++ obj = list_entry(lh, struct yaffs_obj, siblings);
++ list_del_init(lh);
++ allocator->n_free_objects--;
++ }
++
++ return obj;
++}
++
++void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
++{
++
++ struct yaffs_allocator *allocator = dev->allocator;
++
++ if (!allocator) {
++ BUG();
++ return;
++ }
++
++ /* Link into the free list. */
++ list_add(&obj->siblings, &allocator->free_objs);
++ allocator->n_free_objects++;
++}
++
++void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
++{
++
++ if (!dev->allocator) {
++ BUG();
++ return;
++ }
++
++ yaffs_deinit_raw_tnodes(dev);
++ yaffs_deinit_raw_objs(dev);
++ kfree(dev->allocator);
++ dev->allocator = NULL;
++}
++
++void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
++{
++ struct yaffs_allocator *allocator;
++
++ if (dev->allocator) {
++ BUG();
++ return;
++ }
++
++ allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS);
++ if (allocator) {
++ dev->allocator = allocator;
++ yaffs_init_raw_tnodes(dev);
++ yaffs_init_raw_objs(dev);
++ }
++}
++
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_allocator.h linux-3.14.4/fs/yaffs2/yaffs_allocator.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_allocator.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_allocator.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,30 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_ALLOCATOR_H__
++#define __YAFFS_ALLOCATOR_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev);
++void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev);
++
++struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev);
++void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn);
++
++struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev);
++void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj);
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_attribs.c linux-3.14.4/fs/yaffs2/yaffs_attribs.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_attribs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_attribs.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,166 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_attribs.h"
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
++static inline uid_t ia_uid_read(const struct iattr *iattr)
++{
++ return from_kuid(&init_user_ns, iattr->ia_uid);
++}
++
++static inline gid_t ia_gid_read(const struct iattr *iattr)
++{
++ return from_kgid(&init_user_ns, iattr->ia_gid);
++}
++
++static inline void ia_uid_write(struct iattr *iattr, uid_t uid)
++{
++ iattr->ia_uid = make_kuid(&init_user_ns, uid);
++}
++
++static inline void ia_gid_write(struct iattr *iattr, gid_t gid)
++{
++ iattr->ia_gid = make_kgid(&init_user_ns, gid);
++}
++#else
++static inline uid_t ia_uid_read(const struct iattr *iattr)
++{
++ return iattr->ia_uid;
++}
++
++static inline gid_t ia_gid_read(const struct iattr *inode)
++{
++ return iattr->ia_gid;
++}
++
++static inline void ia_uid_write(struct iattr *iattr, uid_t uid)
++{
++ iattr->ia_uid = uid;
++}
++
++static inline void ia_gid_write(struct iattr *iattr, gid_t gid)
++{
++ iattr->ia_gid = gid;
++}
++#endif
++
++void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
++{
++ obj->yst_uid = oh->yst_uid;
++ obj->yst_gid = oh->yst_gid;
++ obj->yst_atime = oh->yst_atime;
++ obj->yst_mtime = oh->yst_mtime;
++ obj->yst_ctime = oh->yst_ctime;
++ obj->yst_rdev = oh->yst_rdev;
++}
++
++void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj)
++{
++ oh->yst_uid = obj->yst_uid;
++ oh->yst_gid = obj->yst_gid;
++ oh->yst_atime = obj->yst_atime;
++ oh->yst_mtime = obj->yst_mtime;
++ oh->yst_ctime = obj->yst_ctime;
++ oh->yst_rdev = obj->yst_rdev;
++
++}
++
++void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c)
++{
++ obj->yst_mtime = Y_CURRENT_TIME;
++ if (do_a)
++ obj->yst_atime = obj->yst_mtime;
++ if (do_c)
++ obj->yst_ctime = obj->yst_mtime;
++}
++
++void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev)
++{
++ yaffs_load_current_time(obj, 1, 1);
++ obj->yst_rdev = rdev;
++ obj->yst_uid = uid;
++ obj->yst_gid = gid;
++}
++
++static loff_t yaffs_get_file_size(struct yaffs_obj *obj)
++{
++ YCHAR *alias = NULL;
++ obj = yaffs_get_equivalent_obj(obj);
++
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ return obj->variant.file_variant.file_size;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ alias = obj->variant.symlink_variant.alias;
++ if (!alias)
++ return 0;
++ return strnlen(alias, YAFFS_MAX_ALIAS_LENGTH);
++ default:
++ return 0;
++ }
++}
++
++int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr)
++{
++ unsigned int valid = attr->ia_valid;
++
++ if (valid & ATTR_MODE)
++ obj->yst_mode = attr->ia_mode;
++ if (valid & ATTR_UID)
++ obj->yst_uid = ia_uid_read(attr);
++ if (valid & ATTR_GID)
++ obj->yst_gid = ia_gid_read(attr);
++
++ if (valid & ATTR_ATIME)
++ obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
++ if (valid & ATTR_CTIME)
++ obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
++ if (valid & ATTR_MTIME)
++ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
++
++ if (valid & ATTR_SIZE)
++ yaffs_resize_file(obj, attr->ia_size);
++
++ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
++
++ return YAFFS_OK;
++
++}
++
++int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr)
++{
++ unsigned int valid = 0;
++
++ attr->ia_mode = obj->yst_mode;
++ valid |= ATTR_MODE;
++ ia_uid_write(attr, obj->yst_uid);
++ valid |= ATTR_UID;
++ ia_gid_write(attr, obj->yst_gid);
++ valid |= ATTR_GID;
++
++ Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
++ valid |= ATTR_ATIME;
++ Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
++ valid |= ATTR_CTIME;
++ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
++ valid |= ATTR_MTIME;
++
++ attr->ia_size = yaffs_get_file_size(obj);
++ valid |= ATTR_SIZE;
++
++ attr->ia_valid = valid;
++
++ return YAFFS_OK;
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_attribs.h linux-3.14.4/fs/yaffs2/yaffs_attribs.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_attribs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_attribs.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,28 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_ATTRIBS_H__
++#define __YAFFS_ATTRIBS_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh);
++void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj);
++void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev);
++void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c);
++int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr);
++int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr);
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_bitmap.c linux-3.14.4/fs/yaffs2/yaffs_bitmap.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_bitmap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_bitmap.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,97 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_bitmap.h"
++#include "yaffs_trace.h"
++/*
++ * Chunk bitmap manipulations
++ */
++
++static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
++{
++ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "BlockBits block %d is not valid",
++ blk);
++ BUG();
++ }
++ return dev->chunk_bits +
++ (dev->chunk_bit_stride * (blk - dev->internal_start_block));
++}
++
++void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
++{
++ if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
++ chunk < 0 || chunk >= dev->param.chunks_per_block) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "Chunk Id (%d:%d) invalid",
++ blk, chunk);
++ BUG();
++ }
++}
++
++void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
++{
++ u8 *blk_bits = yaffs_block_bits(dev, blk);
++
++ memset(blk_bits, 0, dev->chunk_bit_stride);
++}
++
++void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
++{
++ u8 *blk_bits = yaffs_block_bits(dev, blk);
++
++ yaffs_verify_chunk_bit_id(dev, blk, chunk);
++ blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
++}
++
++void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
++{
++ u8 *blk_bits = yaffs_block_bits(dev, blk);
++
++ yaffs_verify_chunk_bit_id(dev, blk, chunk);
++ blk_bits[chunk / 8] |= (1 << (chunk & 7));
++}
++
++int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
++{
++ u8 *blk_bits = yaffs_block_bits(dev, blk);
++
++ yaffs_verify_chunk_bit_id(dev, blk, chunk);
++ return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
++}
++
++int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
++{
++ u8 *blk_bits = yaffs_block_bits(dev, blk);
++ int i;
++
++ for (i = 0; i < dev->chunk_bit_stride; i++) {
++ if (*blk_bits)
++ return 1;
++ blk_bits++;
++ }
++ return 0;
++}
++
++int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
++{
++ u8 *blk_bits = yaffs_block_bits(dev, blk);
++ int i;
++ int n = 0;
++
++ for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
++ n += hweight8(*blk_bits);
++
++ return n;
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_bitmap.h linux-3.14.4/fs/yaffs2/yaffs_bitmap.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_bitmap.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_bitmap.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,33 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/*
++ * Chunk bitmap manipulations
++ */
++
++#ifndef __YAFFS_BITMAP_H__
++#define __YAFFS_BITMAP_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
++void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
++void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
++void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
++int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
++int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
++int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_checkptrw.c linux-3.14.4/fs/yaffs2/yaffs_checkptrw.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_checkptrw.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_checkptrw.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,474 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_checkptrw.h"
++#include "yaffs_getblockinfo.h"
++
++struct yaffs_checkpt_chunk_hdr {
++ int version;
++ int seq;
++ u32 sum;
++ u32 xor;
++} ;
++
++
++static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
++{
++ return chunk - dev->chunk_offset;
++}
++
++static int apply_block_offset(struct yaffs_dev *dev, int block)
++{
++ return block - dev->block_offset;
++}
++
++static void yaffs2_checkpt_init_chunk_hdr(struct yaffs_dev *dev)
++{
++ struct yaffs_checkpt_chunk_hdr hdr;
++
++ hdr.version = YAFFS_CHECKPOINT_VERSION;
++ hdr.seq = dev->checkpt_page_seq;
++ hdr.sum = dev->checkpt_sum;
++ hdr.xor = dev->checkpt_xor;
++
++ dev->checkpt_byte_offs = sizeof(hdr);
++
++ memcpy(dev->checkpt_buffer, &hdr, sizeof(hdr));
++}
++
++static int yaffs2_checkpt_check_chunk_hdr(struct yaffs_dev *dev)
++{
++ struct yaffs_checkpt_chunk_hdr hdr;
++
++ memcpy(&hdr, dev->checkpt_buffer, sizeof(hdr));
++
++ dev->checkpt_byte_offs = sizeof(hdr);
++
++ return hdr.version == YAFFS_CHECKPOINT_VERSION &&
++ hdr.seq == dev->checkpt_page_seq &&
++ hdr.sum == dev->checkpt_sum &&
++ hdr.xor == dev->checkpt_xor;
++}
++
++static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
++{
++ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "checkpt blocks_avail = %d", blocks_avail);
++
++ return (blocks_avail <= 0) ? 0 : 1;
++}
++
++static int yaffs_checkpt_erase(struct yaffs_dev *dev)
++{
++ int i;
++
++ if (!dev->drv.drv_erase_fn)
++ return 0;
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "checking blocks %d to %d",
++ dev->internal_start_block, dev->internal_end_block);
++
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
++ int offset_i = apply_block_offset(dev, i);
++ int result;
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "erasing checkpt block %d", i);
++
++ dev->n_erasures++;
++
++ result = dev->drv.drv_erase_fn(dev, offset_i);
++ if(result) {
++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++ dev->n_erased_blocks++;
++ dev->n_free_chunks +=
++ dev->param.chunks_per_block;
++ } else {
++ dev->drv.drv_mark_bad_fn(dev, offset_i);
++ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
++ }
++ }
++ }
++
++ dev->blocks_in_checkpt = 0;
++
++ return 1;
++}
++
++static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
++{
++ int i;
++ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "allocating checkpt block: erased %d reserved %d avail %d next %d ",
++ dev->n_erased_blocks, dev->param.n_reserved_blocks,
++ blocks_avail, dev->checkpt_next_block);
++
++ if (dev->checkpt_next_block >= 0 &&
++ dev->checkpt_next_block <= dev->internal_end_block &&
++ blocks_avail > 0) {
++
++ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
++ i++) {
++ struct yaffs_block_info *bi;
++
++ bi = yaffs_get_block_info(dev, i);
++ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++ dev->checkpt_next_block = i + 1;
++ dev->checkpt_cur_block = i;
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "allocating checkpt block %d", i);
++ return;
++ }
++ }
++ }
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
++
++ dev->checkpt_next_block = -1;
++ dev->checkpt_cur_block = -1;
++}
++
++static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
++{
++ int i;
++ struct yaffs_ext_tags tags;
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "find next checkpt block: start: blocks %d next %d",
++ dev->blocks_in_checkpt, dev->checkpt_next_block);
++
++ if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
++ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
++ i++) {
++ int chunk = i * dev->param.chunks_per_block;
++ enum yaffs_block_state state;
++ u32 seq;
++
++ dev->tagger.read_chunk_tags_fn(dev,
++ apply_chunk_offset(dev, chunk),
++ NULL, &tags);
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "find next checkpt block: search: block %d state %d oid %d seq %d eccr %d",
++ i, (int) state,
++ tags.obj_id, tags.seq_number,
++ tags.ecc_result);
++
++ if (tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA)
++ continue;
++
++ dev->tagger.query_block_fn(dev,
++ apply_block_offset(dev, i),
++ &state, &seq);
++ if (state == YAFFS_BLOCK_STATE_DEAD)
++ continue;
++
++ /* Right kind of block */
++ dev->checkpt_next_block = tags.obj_id;
++ dev->checkpt_cur_block = i;
++ dev->checkpt_block_list[dev->blocks_in_checkpt] = i;
++ dev->blocks_in_checkpt++;
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "found checkpt block %d", i);
++ return;
++ }
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
++
++ dev->checkpt_next_block = -1;
++ dev->checkpt_cur_block = -1;
++}
++
++int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
++{
++ int i;
++
++ dev->checkpt_open_write = writing;
++
++ /* Got the functions we need? */
++ if (!dev->tagger.write_chunk_tags_fn ||
++ !dev->tagger.read_chunk_tags_fn ||
++ !dev->drv.drv_erase_fn ||
++ !dev->drv.drv_mark_bad_fn)
++ return 0;
++
++ if (writing && !yaffs2_checkpt_space_ok(dev))
++ return 0;
++
++ if (!dev->checkpt_buffer)
++ dev->checkpt_buffer =
++ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
++ if (!dev->checkpt_buffer)
++ return 0;
++
++ dev->checkpt_page_seq = 0;
++ dev->checkpt_byte_count = 0;
++ dev->checkpt_sum = 0;
++ dev->checkpt_xor = 0;
++ dev->checkpt_cur_block = -1;
++ dev->checkpt_cur_chunk = -1;
++ dev->checkpt_next_block = dev->internal_start_block;
++
++ if (writing) {
++ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
++ yaffs2_checkpt_init_chunk_hdr(dev);
++ return yaffs_checkpt_erase(dev);
++ }
++
++ /* Opening for a read */
++ /* Set to a value that will kick off a read */
++ dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
++ /* A checkpoint block list of 1 checkpoint block per 16 block is
++ * (hopefully) going to be way more than we need */
++ dev->blocks_in_checkpt = 0;
++ dev->checkpt_max_blocks =
++ (dev->internal_end_block - dev->internal_start_block) / 16 + 2;
++ dev->checkpt_block_list =
++ kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
++
++ if (!dev->checkpt_block_list)
++ return 0;
++
++ for (i = 0; i < dev->checkpt_max_blocks; i++)
++ dev->checkpt_block_list[i] = -1;
++
++ return 1;
++}
++
++int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
++{
++ u32 composite_sum;
++
++ composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xff);
++ *sum = composite_sum;
++ return 1;
++}
++
++static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
++{
++ int chunk;
++ int offset_chunk;
++ struct yaffs_ext_tags tags;
++
++ if (dev->checkpt_cur_block < 0) {
++ yaffs2_checkpt_find_erased_block(dev);
++ dev->checkpt_cur_chunk = 0;
++ }
++
++ if (dev->checkpt_cur_block < 0)
++ return 0;
++
++ tags.is_deleted = 0;
++ tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */
++ tags.chunk_id = dev->checkpt_page_seq + 1;
++ tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
++ tags.n_bytes = dev->data_bytes_per_chunk;
++ if (dev->checkpt_cur_chunk == 0) {
++ /* First chunk we write for the block? Set block state to
++ checkpoint */
++ struct yaffs_block_info *bi =
++ yaffs_get_block_info(dev, dev->checkpt_cur_block);
++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
++ dev->blocks_in_checkpt++;
++ }
++
++ chunk =
++ dev->checkpt_cur_block * dev->param.chunks_per_block +
++ dev->checkpt_cur_chunk;
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
++ chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
++ tags.obj_id, tags.chunk_id);
++
++ offset_chunk = apply_chunk_offset(dev, chunk);
++
++ dev->n_page_writes++;
++
++ dev->tagger.write_chunk_tags_fn(dev, offset_chunk,
++ dev->checkpt_buffer, &tags);
++ dev->checkpt_page_seq++;
++ dev->checkpt_cur_chunk++;
++ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
++ dev->checkpt_cur_chunk = 0;
++ dev->checkpt_cur_block = -1;
++ }
++ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
++
++ yaffs2_checkpt_init_chunk_hdr(dev);
++
++
++ return 1;
++}
++
++int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
++{
++ int i = 0;
++ int ok = 1;
++ u8 *data_bytes = (u8 *) data;
++
++ if (!dev->checkpt_buffer)
++ return 0;
++
++ if (!dev->checkpt_open_write)
++ return -1;
++
++ while (i < n_bytes && ok) {
++ dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
++ dev->checkpt_sum += *data_bytes;
++ dev->checkpt_xor ^= *data_bytes;
++
++ dev->checkpt_byte_offs++;
++ i++;
++ data_bytes++;
++ dev->checkpt_byte_count++;
++
++ if (dev->checkpt_byte_offs < 0 ||
++ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
++ ok = yaffs2_checkpt_flush_buffer(dev);
++ }
++
++ return i;
++}
++
++int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
++{
++ int i = 0;
++ int ok = 1;
++ struct yaffs_ext_tags tags;
++ int chunk;
++ int offset_chunk;
++ u8 *data_bytes = (u8 *) data;
++
++ if (!dev->checkpt_buffer)
++ return 0;
++
++ if (dev->checkpt_open_write)
++ return -1;
++
++ while (i < n_bytes && ok) {
++
++ if (dev->checkpt_byte_offs < 0 ||
++ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
++
++ if (dev->checkpt_cur_block < 0) {
++ yaffs2_checkpt_find_block(dev);
++ dev->checkpt_cur_chunk = 0;
++ }
++
++ if (dev->checkpt_cur_block < 0) {
++ ok = 0;
++ break;
++ }
++
++ chunk = dev->checkpt_cur_block *
++ dev->param.chunks_per_block +
++ dev->checkpt_cur_chunk;
++
++ offset_chunk = apply_chunk_offset(dev, chunk);
++ dev->n_page_reads++;
++
++ /* read in the next chunk */
++ dev->tagger.read_chunk_tags_fn(dev,
++ offset_chunk,
++ dev->checkpt_buffer,
++ &tags);
++
++ if (tags.chunk_id != (dev->checkpt_page_seq + 1) ||
++ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
++ tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA) {
++ ok = 0;
++ break;
++ }
++ if(!yaffs2_checkpt_check_chunk_hdr(dev)) {
++ ok = 0;
++ break;
++ }
++
++ dev->checkpt_page_seq++;
++ dev->checkpt_cur_chunk++;
++
++ if (dev->checkpt_cur_chunk >=
++ dev->param.chunks_per_block)
++ dev->checkpt_cur_block = -1;
++
++ }
++
++ *data_bytes = dev->checkpt_buffer[dev->checkpt_byte_offs];
++ dev->checkpt_sum += *data_bytes;
++ dev->checkpt_xor ^= *data_bytes;
++ dev->checkpt_byte_offs++;
++ i++;
++ data_bytes++;
++ dev->checkpt_byte_count++;
++ }
++
++ return i;
++}
++
++int yaffs_checkpt_close(struct yaffs_dev *dev)
++{
++ int i;
++
++ if (dev->checkpt_open_write) {
++ if (dev->checkpt_byte_offs !=
++ sizeof(sizeof(struct yaffs_checkpt_chunk_hdr)))
++ yaffs2_checkpt_flush_buffer(dev);
++ } else if (dev->checkpt_block_list) {
++ for (i = 0;
++ i < dev->blocks_in_checkpt &&
++ dev->checkpt_block_list[i] >= 0; i++) {
++ int blk = dev->checkpt_block_list[i];
++ struct yaffs_block_info *bi = NULL;
++
++ if (dev->internal_start_block <= blk &&
++ blk <= dev->internal_end_block)
++ bi = yaffs_get_block_info(dev, blk);
++ if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
++ }
++ kfree(dev->checkpt_block_list);
++ dev->checkpt_block_list = NULL;
++ }
++
++ dev->n_free_chunks -=
++ dev->blocks_in_checkpt * dev->param.chunks_per_block;
++ dev->n_erased_blocks -= dev->blocks_in_checkpt;
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "checkpoint byte count %d",
++ dev->checkpt_byte_count);
++
++ if (dev->checkpt_buffer) {
++ /* free the buffer */
++ kfree(dev->checkpt_buffer);
++ dev->checkpt_buffer = NULL;
++ return 1;
++ } else {
++ return 0;
++ }
++}
++
++int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
++{
++ /* Erase the checkpoint data */
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "checkpoint invalidate of %d blocks",
++ dev->blocks_in_checkpt);
++
++ return yaffs_checkpt_erase(dev);
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_checkptrw.h linux-3.14.4/fs/yaffs2/yaffs_checkptrw.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_checkptrw.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_checkptrw.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,33 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_CHECKPTRW_H__
++#define __YAFFS_CHECKPTRW_H__
++
++#include "yaffs_guts.h"
++
++int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing);
++
++int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes);
++
++int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes);
++
++int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum);
++
++int yaffs_checkpt_close(struct yaffs_dev *dev);
++
++int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev);
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_ecc.c linux-3.14.4/fs/yaffs2/yaffs_ecc.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_ecc.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_ecc.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,281 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This code implements the ECC algorithm used in SmartMedia.
++ *
++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
++ * The two unused bit are set to 1.
++ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two
++ * such ECC blocks are used on a 512-byte NAND page.
++ *
++ */
++
++#include "yportenv.h"
++
++#include "yaffs_ecc.h"
++
++/* Table generated by gen-ecc.c
++ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
++ * for each byte of data. These are instead provided in a table in bits7..2.
++ * Bit 0 of each entry indicates whether the entry has an odd or even parity,
++ * and therefore this bytes influence on the line parity.
++ */
++
++static const unsigned char column_parity_table[] = {
++ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
++ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
++ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
++ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
++ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
++ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
++ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
++ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
++ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
++ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
++ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
++ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
++ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
++ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
++ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
++ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
++ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
++ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
++ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
++ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
++ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
++ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
++ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
++ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
++ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
++ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
++ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
++ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
++ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
++ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
++ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
++ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
++};
++
++
++/* Calculate the ECC for a 256-byte block of data */
++void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc)
++{
++ unsigned int i;
++ unsigned char col_parity = 0;
++ unsigned char line_parity = 0;
++ unsigned char line_parity_prime = 0;
++ unsigned char t;
++ unsigned char b;
++
++ for (i = 0; i < 256; i++) {
++ b = column_parity_table[*data++];
++ col_parity ^= b;
++
++ if (b & 0x01) { /* odd number of bits in the byte */
++ line_parity ^= i;
++ line_parity_prime ^= ~i;
++ }
++ }
++
++ ecc[2] = (~col_parity) | 0x03;
++
++ t = 0;
++ if (line_parity & 0x80)
++ t |= 0x80;
++ if (line_parity_prime & 0x80)
++ t |= 0x40;
++ if (line_parity & 0x40)
++ t |= 0x20;
++ if (line_parity_prime & 0x40)
++ t |= 0x10;
++ if (line_parity & 0x20)
++ t |= 0x08;
++ if (line_parity_prime & 0x20)
++ t |= 0x04;
++ if (line_parity & 0x10)
++ t |= 0x02;
++ if (line_parity_prime & 0x10)
++ t |= 0x01;
++ ecc[1] = ~t;
++
++ t = 0;
++ if (line_parity & 0x08)
++ t |= 0x80;
++ if (line_parity_prime & 0x08)
++ t |= 0x40;
++ if (line_parity & 0x04)
++ t |= 0x20;
++ if (line_parity_prime & 0x04)
++ t |= 0x10;
++ if (line_parity & 0x02)
++ t |= 0x08;
++ if (line_parity_prime & 0x02)
++ t |= 0x04;
++ if (line_parity & 0x01)
++ t |= 0x02;
++ if (line_parity_prime & 0x01)
++ t |= 0x01;
++ ecc[0] = ~t;
++
++}
++
++/* Correct the ECC on a 256 byte block of data */
++
++int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
++ const unsigned char *test_ecc)
++{
++ unsigned char d0, d1, d2; /* deltas */
++
++ d0 = read_ecc[0] ^ test_ecc[0];
++ d1 = read_ecc[1] ^ test_ecc[1];
++ d2 = read_ecc[2] ^ test_ecc[2];
++
++ if ((d0 | d1 | d2) == 0)
++ return 0; /* no error */
++
++ if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
++ ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
++ ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
++ /* Single bit (recoverable) error in data */
++
++ unsigned byte;
++ unsigned bit;
++
++ bit = byte = 0;
++
++ if (d1 & 0x80)
++ byte |= 0x80;
++ if (d1 & 0x20)
++ byte |= 0x40;
++ if (d1 & 0x08)
++ byte |= 0x20;
++ if (d1 & 0x02)
++ byte |= 0x10;
++ if (d0 & 0x80)
++ byte |= 0x08;
++ if (d0 & 0x20)
++ byte |= 0x04;
++ if (d0 & 0x08)
++ byte |= 0x02;
++ if (d0 & 0x02)
++ byte |= 0x01;
++
++ if (d2 & 0x80)
++ bit |= 0x04;
++ if (d2 & 0x20)
++ bit |= 0x02;
++ if (d2 & 0x08)
++ bit |= 0x01;
++
++ data[byte] ^= (1 << bit);
++
++ return 1; /* Corrected the error */
++ }
++
++ if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) {
++ /* Reccoverable error in ecc */
++
++ read_ecc[0] = test_ecc[0];
++ read_ecc[1] = test_ecc[1];
++ read_ecc[2] = test_ecc[2];
++
++ return 1; /* Corrected the error */
++ }
++
++ /* Unrecoverable error */
++
++ return -1;
++
++}
++
++/*
++ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
++ */
++void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
++ struct yaffs_ecc_other *ecc_other)
++{
++ unsigned int i;
++ unsigned char col_parity = 0;
++ unsigned line_parity = 0;
++ unsigned line_parity_prime = 0;
++ unsigned char b;
++
++ for (i = 0; i < n_bytes; i++) {
++ b = column_parity_table[*data++];
++ col_parity ^= b;
++
++ if (b & 0x01) {
++ /* odd number of bits in the byte */
++ line_parity ^= i;
++ line_parity_prime ^= ~i;
++ }
++
++ }
++
++ ecc_other->col_parity = (col_parity >> 2) & 0x3f;
++ ecc_other->line_parity = line_parity;
++ ecc_other->line_parity_prime = line_parity_prime;
++}
++
++int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
++ struct yaffs_ecc_other *read_ecc,
++ const struct yaffs_ecc_other *test_ecc)
++{
++ unsigned char delta_col; /* column parity delta */
++ unsigned delta_line; /* line parity delta */
++ unsigned delta_line_prime; /* line parity delta */
++ unsigned bit;
++
++ delta_col = read_ecc->col_parity ^ test_ecc->col_parity;
++ delta_line = read_ecc->line_parity ^ test_ecc->line_parity;
++ delta_line_prime =
++ read_ecc->line_parity_prime ^ test_ecc->line_parity_prime;
++
++ if ((delta_col | delta_line | delta_line_prime) == 0)
++ return 0; /* no error */
++
++ if (delta_line == ~delta_line_prime &&
++ (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) {
++ /* Single bit (recoverable) error in data */
++
++ bit = 0;
++
++ if (delta_col & 0x20)
++ bit |= 0x04;
++ if (delta_col & 0x08)
++ bit |= 0x02;
++ if (delta_col & 0x02)
++ bit |= 0x01;
++
++ if (delta_line >= n_bytes)
++ return -1;
++
++ data[delta_line] ^= (1 << bit);
++
++ return 1; /* corrected */
++ }
++
++ if ((hweight32(delta_line) +
++ hweight32(delta_line_prime) +
++ hweight8(delta_col)) == 1) {
++ /* Reccoverable error in ecc */
++
++ *read_ecc = *test_ecc;
++ return 1; /* corrected */
++ }
++
++ /* Unrecoverable error */
++
++ return -1;
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_ecc.h linux-3.14.4/fs/yaffs2/yaffs_ecc.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_ecc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_ecc.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,44 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/*
++ * This code implements the ECC algorithm used in SmartMedia.
++ *
++ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
++ * The two unused bit are set to 1.
++ * The ECC can correct single bit errors in a 256-byte page of data.
++ * Thus, two such ECC blocks are used on a 512-byte NAND page.
++ *
++ */
++
++#ifndef __YAFFS_ECC_H__
++#define __YAFFS_ECC_H__
++
++struct yaffs_ecc_other {
++ unsigned char col_parity;
++ unsigned line_parity;
++ unsigned line_parity_prime;
++};
++
++void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc);
++int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
++ const unsigned char *test_ecc);
++
++void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
++ struct yaffs_ecc_other *ecc);
++int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
++ struct yaffs_ecc_other *read_ecc,
++ const struct yaffs_ecc_other *test_ecc);
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_getblockinfo.h linux-3.14.4/fs/yaffs2/yaffs_getblockinfo.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_getblockinfo.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_getblockinfo.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,35 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_GETBLOCKINFO_H__
++#define __YAFFS_GETBLOCKINFO_H__
++
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++
++/* Function to manipulate block info */
++static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
++ *dev, int blk)
++{
++ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>> yaffs: get_block_info block %d is not valid",
++ blk);
++ BUG();
++ }
++ return &dev->block_info[blk - dev->internal_start_block];
++}
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_guts.c linux-3.14.4/fs/yaffs2/yaffs_guts.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_guts.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_guts.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,5146 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yportenv.h"
++#include "yaffs_trace.h"
++
++#include "yaffs_guts.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_tagscompat.h"
++#include "yaffs_tagsmarshall.h"
++#include "yaffs_nand.h"
++#include "yaffs_yaffs1.h"
++#include "yaffs_yaffs2.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_verify.h"
++#include "yaffs_nand.h"
++#include "yaffs_packedtags2.h"
++#include "yaffs_nameval.h"
++#include "yaffs_allocator.h"
++#include "yaffs_attribs.h"
++#include "yaffs_summary.h"
++
++/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
++#define YAFFS_GC_GOOD_ENOUGH 2
++#define YAFFS_GC_PASSIVE_THRESHOLD 4
++
++#include "yaffs_ecc.h"
++
++/* Forward declarations */
++
++static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
++ const u8 *buffer, int n_bytes, int use_reserve);
++
++static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
++ int buffer_size);
++
++/* Function to calculate chunk and offset */
++
++void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
++ int *chunk_out, u32 *offset_out)
++{
++ int chunk;
++ u32 offset;
++
++ chunk = (u32) (addr >> dev->chunk_shift);
++
++ if (dev->chunk_div == 1) {
++ /* easy power of 2 case */
++ offset = (u32) (addr & dev->chunk_mask);
++ } else {
++ /* Non power-of-2 case */
++
++ loff_t chunk_base;
++
++ chunk /= dev->chunk_div;
++
++ chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
++ offset = (u32) (addr - chunk_base);
++ }
++
++ *chunk_out = chunk;
++ *offset_out = offset;
++}
++
++/* Function to return the number of shifts for a power of 2 greater than or
++ * equal to the given number
++ * Note we don't try to cater for all possible numbers and this does not have to
++ * be hellishly efficient.
++ */
++
++static inline u32 calc_shifts_ceiling(u32 x)
++{
++ int extra_bits;
++ int shifts;
++
++ shifts = extra_bits = 0;
++
++ while (x > 1) {
++ if (x & 1)
++ extra_bits++;
++ x >>= 1;
++ shifts++;
++ }
++
++ if (extra_bits)
++ shifts++;
++
++ return shifts;
++}
++
++/* Function to return the number of shifts to get a 1 in bit 0
++ */
++
++static inline u32 calc_shifts(u32 x)
++{
++ u32 shifts;
++
++ shifts = 0;
++
++ if (!x)
++ return 0;
++
++ while (!(x & 1)) {
++ x >>= 1;
++ shifts++;
++ }
++
++ return shifts;
++}
++
++/*
++ * Temporary buffer manipulations.
++ */
++
++static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
++{
++ int i;
++ u8 *buf = (u8 *) 1;
++
++ memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
++
++ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
++ dev->temp_buffer[i].in_use = 0;
++ buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
++ dev->temp_buffer[i].buffer = buf;
++ }
++
++ return buf ? YAFFS_OK : YAFFS_FAIL;
++}
++
++u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
++{
++ int i;
++
++ dev->temp_in_use++;
++ if (dev->temp_in_use > dev->max_temp)
++ dev->max_temp = dev->temp_in_use;
++
++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++ if (dev->temp_buffer[i].in_use == 0) {
++ dev->temp_buffer[i].in_use = 1;
++ return dev->temp_buffer[i].buffer;
++ }
++ }
++
++ yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
++ /*
++ * If we got here then we have to allocate an unmanaged one
++ * This is not good.
++ */
++
++ dev->unmanaged_buffer_allocs++;
++ return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
++
++}
++
++void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
++{
++ int i;
++
++ dev->temp_in_use--;
++
++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
++ if (dev->temp_buffer[i].buffer == buffer) {
++ dev->temp_buffer[i].in_use = 0;
++ return;
++ }
++ }
++
++ if (buffer) {
++ /* assume it is an unmanaged one. */
++ yaffs_trace(YAFFS_TRACE_BUFFERS,
++ "Releasing unmanaged temp buffer");
++ kfree(buffer);
++ dev->unmanaged_buffer_deallocs++;
++ }
++
++}
++
++/*
++ * Functions for robustisizing TODO
++ *
++ */
++
++static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
++ const u8 *data,
++ const struct yaffs_ext_tags *tags)
++{
++ (void) dev;
++ (void) nand_chunk;
++ (void) data;
++ (void) tags;
++}
++
++static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
++ const struct yaffs_ext_tags *tags)
++{
++ (void) dev;
++ (void) nand_chunk;
++ (void) tags;
++}
++
++void yaffs_handle_chunk_error(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi)
++{
++ if (!bi->gc_prioritise) {
++ bi->gc_prioritise = 1;
++ dev->has_pending_prioritised_gc = 1;
++ bi->chunk_error_strikes++;
++
++ if (bi->chunk_error_strikes > 3) {
++ bi->needs_retiring = 1; /* Too many stikes, so retire */
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: Block struck out");
++
++ }
++ }
++}
++
++static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
++ int erased_ok)
++{
++ int flash_block = nand_chunk / dev->param.chunks_per_block;
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
++
++ yaffs_handle_chunk_error(dev, bi);
++
++ if (erased_ok) {
++ /* Was an actual write failure,
++ * so mark the block for retirement.*/
++ bi->needs_retiring = 1;
++ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ "**>> Block %d needs retiring", flash_block);
++ }
++
++ /* Delete the chunk */
++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++ yaffs_skip_rest_of_block(dev);
++}
++
++/*
++ * Verification code
++ */
++
++/*
++ * Simple hash function. Needs to have a reasonable spread
++ */
++
++static inline int yaffs_hash_fn(int n)
++{
++ if (n < 0)
++ n = -n;
++ return n % YAFFS_NOBJECT_BUCKETS;
++}
++
++/*
++ * Access functions to useful fake objects.
++ * Note that root might have a presence in NAND if permissions are set.
++ */
++
++struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
++{
++ return dev->root_dir;
++}
++
++struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
++{
++ return dev->lost_n_found;
++}
++
++/*
++ * Erased NAND checking functions
++ */
++
++int yaffs_check_ff(u8 *buffer, int n_bytes)
++{
++ /* Horrible, slow implementation */
++ while (n_bytes--) {
++ if (*buffer != 0xff)
++ return 0;
++ buffer++;
++ }
++ return 1;
++}
++
++static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
++{
++ int retval = YAFFS_OK;
++ u8 *data = yaffs_get_temp_buffer(dev);
++ struct yaffs_ext_tags tags;
++ int result;
++
++ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
++
++ if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
++ retval = YAFFS_FAIL;
++
++ if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
++ tags.chunk_used) {
++ yaffs_trace(YAFFS_TRACE_NANDACCESS,
++ "Chunk %d not erased", nand_chunk);
++ retval = YAFFS_FAIL;
++ }
++
++ yaffs_release_temp_buffer(dev, data);
++
++ return retval;
++
++}
++
++static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
++ int nand_chunk,
++ const u8 *data,
++ struct yaffs_ext_tags *tags)
++{
++ int retval = YAFFS_OK;
++ struct yaffs_ext_tags temp_tags;
++ u8 *buffer = yaffs_get_temp_buffer(dev);
++ int result;
++
++ result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
++ if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
++ temp_tags.obj_id != tags->obj_id ||
++ temp_tags.chunk_id != tags->chunk_id ||
++ temp_tags.n_bytes != tags->n_bytes)
++ retval = YAFFS_FAIL;
++
++ yaffs_release_temp_buffer(dev, buffer);
++
++ return retval;
++}
++
++
++int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
++{
++ int reserved_chunks;
++ int reserved_blocks = dev->param.n_reserved_blocks;
++ int checkpt_blocks;
++
++ checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
++
++ reserved_chunks =
++ (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
++
++ return (dev->n_free_chunks > (reserved_chunks + n_chunks));
++}
++
++static int yaffs_find_alloc_block(struct yaffs_dev *dev)
++{
++ int i;
++ struct yaffs_block_info *bi;
++
++ if (dev->n_erased_blocks < 1) {
++ /* Hoosterman we've got a problem.
++ * Can't get space to gc
++ */
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: no more erased blocks");
++
++ return -1;
++ }
++
++ /* Find an empty block. */
++
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ dev->alloc_block_finder++;
++ if (dev->alloc_block_finder < dev->internal_start_block
++ || dev->alloc_block_finder > dev->internal_end_block) {
++ dev->alloc_block_finder = dev->internal_start_block;
++ }
++
++ bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++ bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
++ dev->seq_number++;
++ bi->seq_number = dev->seq_number;
++ dev->n_erased_blocks--;
++ yaffs_trace(YAFFS_TRACE_ALLOCATE,
++ "Allocated block %d, seq %d, %d left" ,
++ dev->alloc_block_finder, dev->seq_number,
++ dev->n_erased_blocks);
++ return dev->alloc_block_finder;
++ }
++ }
++
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs tragedy: no more erased blocks, but there should have been %d",
++ dev->n_erased_blocks);
++
++ return -1;
++}
++
++static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
++ struct yaffs_block_info **block_ptr)
++{
++ int ret_val;
++ struct yaffs_block_info *bi;
++
++ if (dev->alloc_block < 0) {
++ /* Get next block to allocate off */
++ dev->alloc_block = yaffs_find_alloc_block(dev);
++ dev->alloc_page = 0;
++ }
++
++ if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
++ /* No space unless we're allowed to use the reserve. */
++ return -1;
++ }
++
++ if (dev->n_erased_blocks < dev->param.n_reserved_blocks
++ && dev->alloc_page == 0)
++ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
++
++ /* Next page please.... */
++ if (dev->alloc_block >= 0) {
++ bi = yaffs_get_block_info(dev, dev->alloc_block);
++
++ ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
++ dev->alloc_page;
++ bi->pages_in_use++;
++ yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
++
++ dev->alloc_page++;
++
++ dev->n_free_chunks--;
++
++ /* If the block is full set the state to full */
++ if (dev->alloc_page >= dev->param.chunks_per_block) {
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ dev->alloc_block = -1;
++ }
++
++ if (block_ptr)
++ *block_ptr = bi;
++
++ return ret_val;
++ }
++
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
++
++ return -1;
++}
++
++static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
++{
++ int n;
++
++ n = dev->n_erased_blocks * dev->param.chunks_per_block;
++
++ if (dev->alloc_block > 0)
++ n += (dev->param.chunks_per_block - dev->alloc_page);
++
++ return n;
++
++}
++
++/*
++ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
++ * if we don't want to write to it.
++ */
++void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
++{
++ struct yaffs_block_info *bi;
++
++ if (dev->alloc_block > 0) {
++ bi = yaffs_get_block_info(dev, dev->alloc_block);
++ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ dev->alloc_block = -1;
++ }
++ }
++}
++
++static int yaffs_write_new_chunk(struct yaffs_dev *dev,
++ const u8 *data,
++ struct yaffs_ext_tags *tags, int use_reserver)
++{
++ int attempts = 0;
++ int write_ok = 0;
++ int chunk;
++
++ yaffs2_checkpt_invalidate(dev);
++
++ do {
++ struct yaffs_block_info *bi = 0;
++ int erased_ok = 0;
++
++ chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
++ if (chunk < 0) {
++ /* no space */
++ break;
++ }
++
++ /* First check this chunk is erased, if it needs
++ * checking. The checking policy (unless forced
++ * always on) is as follows:
++ *
++ * Check the first page we try to write in a block.
++ * If the check passes then we don't need to check any
++ * more. If the check fails, we check again...
++ * If the block has been erased, we don't need to check.
++ *
++ * However, if the block has been prioritised for gc,
++ * then we think there might be something odd about
++ * this block and stop using it.
++ *
++ * Rationale: We should only ever see chunks that have
++ * not been erased if there was a partially written
++ * chunk due to power loss. This checking policy should
++ * catch that case with very few checks and thus save a
++ * lot of checks that are most likely not needed.
++ *
++ * Mods to the above
++ * If an erase check fails or the write fails we skip the
++ * rest of the block.
++ */
++
++ /* let's give it a try */
++ attempts++;
++
++ if (dev->param.always_check_erased)
++ bi->skip_erased_check = 0;
++
++ if (!bi->skip_erased_check) {
++ erased_ok = yaffs_check_chunk_erased(dev, chunk);
++ if (erased_ok != YAFFS_OK) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>> yaffs chunk %d was not erased",
++ chunk);
++
++ /* If not erased, delete this one,
++ * skip rest of block and
++ * try another chunk */
++ yaffs_chunk_del(dev, chunk, 1, __LINE__);
++ yaffs_skip_rest_of_block(dev);
++ continue;
++ }
++ }
++
++ write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
++
++ if (!bi->skip_erased_check)
++ write_ok =
++ yaffs_verify_chunk_written(dev, chunk, data, tags);
++
++ if (write_ok != YAFFS_OK) {
++ /* Clean up aborted write, skip to next block and
++ * try another chunk */
++ yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
++ continue;
++ }
++
++ bi->skip_erased_check = 1;
++
++ /* Copy the data into the robustification buffer */
++ yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
++
++ } while (write_ok != YAFFS_OK &&
++ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
++
++ if (!write_ok)
++ chunk = -1;
++
++ if (attempts > 1) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>> yaffs write required %d attempts",
++ attempts);
++ dev->n_retried_writes += (attempts - 1);
++ }
++
++ return chunk;
++}
++
++/*
++ * Block retiring for handling a broken block.
++ */
++
++static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
++{
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
++
++ yaffs2_checkpt_invalidate(dev);
++
++ yaffs2_clear_oldest_dirty_seq(dev, bi);
++
++ if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
++ if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: Failed to mark bad and erase block %d",
++ flash_block);
++ } else {
++ struct yaffs_ext_tags tags;
++ int chunk_id =
++ flash_block * dev->param.chunks_per_block;
++
++ u8 *buffer = yaffs_get_temp_buffer(dev);
++
++ memset(buffer, 0xff, dev->data_bytes_per_chunk);
++ memset(&tags, 0, sizeof(tags));
++ tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
++ if (dev->tagger.write_chunk_tags_fn(dev, chunk_id -
++ dev->chunk_offset,
++ buffer,
++ &tags) != YAFFS_OK)
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: Failed to write bad block marker to block %d",
++ flash_block);
++
++ yaffs_release_temp_buffer(dev, buffer);
++ }
++ }
++
++ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
++ bi->gc_prioritise = 0;
++ bi->needs_retiring = 0;
++
++ dev->n_retired_blocks++;
++}
++
++/*---------------- Name handling functions ------------*/
++
++static u16 yaffs_calc_name_sum(const YCHAR *name)
++{
++ u16 sum = 0;
++ u16 i = 1;
++
++ if (!name)
++ return 0;
++
++ while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
++
++ /* 0x1f mask is case insensitive */
++ sum += ((*name) & 0x1f) * i;
++ i++;
++ name++;
++ }
++ return sum;
++}
++
++
++void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
++{
++ memset(obj->short_name, 0, sizeof(obj->short_name));
++
++ if (name && !name[0]) {
++ yaffs_fix_null_name(obj, obj->short_name,
++ YAFFS_SHORT_NAME_LENGTH);
++ name = obj->short_name;
++ } else if (name &&
++ strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
++ YAFFS_SHORT_NAME_LENGTH) {
++ strcpy(obj->short_name, name);
++ }
++
++ obj->sum = yaffs_calc_name_sum(name);
++}
++
++void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
++ const struct yaffs_obj_hdr *oh)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++ YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
++ memset(tmp_name, 0, sizeof(tmp_name));
++ yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
++ YAFFS_MAX_NAME_LENGTH + 1);
++ yaffs_set_obj_name(obj, tmp_name);
++#else
++ yaffs_set_obj_name(obj, oh->name);
++#endif
++}
++
++loff_t yaffs_max_file_size(struct yaffs_dev *dev)
++{
++ if(sizeof(loff_t) < 8)
++ return YAFFS_MAX_FILE_SIZE_32;
++ else
++ return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
++}
++
++/*-------------------- TNODES -------------------
++
++ * List of spare tnodes
++ * The list is hooked together using the first pointer
++ * in the tnode.
++ */
++
++struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
++{
++ struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
++
++ if (tn) {
++ memset(tn, 0, dev->tnode_size);
++ dev->n_tnodes++;
++ }
++
++ dev->checkpoint_blocks_required = 0; /* force recalculation */
++
++ return tn;
++}
++
++/* FreeTnode frees up a tnode and puts it back on the free list */
++static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
++{
++ yaffs_free_raw_tnode(dev, tn);
++ dev->n_tnodes--;
++ dev->checkpoint_blocks_required = 0; /* force recalculation */
++}
++
++static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
++{
++ yaffs_deinit_raw_tnodes_and_objs(dev);
++ dev->n_obj = 0;
++ dev->n_tnodes = 0;
++}
++
++static void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
++ unsigned pos, unsigned val)
++{
++ u32 *map = (u32 *) tn;
++ u32 bit_in_map;
++ u32 bit_in_word;
++ u32 word_in_map;
++ u32 mask;
++
++ pos &= YAFFS_TNODES_LEVEL0_MASK;
++ val >>= dev->chunk_grp_bits;
++
++ bit_in_map = pos * dev->tnode_width;
++ word_in_map = bit_in_map / 32;
++ bit_in_word = bit_in_map & (32 - 1);
++
++ mask = dev->tnode_mask << bit_in_word;
++
++ map[word_in_map] &= ~mask;
++ map[word_in_map] |= (mask & (val << bit_in_word));
++
++ if (dev->tnode_width > (32 - bit_in_word)) {
++ bit_in_word = (32 - bit_in_word);
++ word_in_map++;
++ mask =
++ dev->tnode_mask >> bit_in_word;
++ map[word_in_map] &= ~mask;
++ map[word_in_map] |= (mask & (val >> bit_in_word));
++ }
++}
++
++u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
++ unsigned pos)
++{
++ u32 *map = (u32 *) tn;
++ u32 bit_in_map;
++ u32 bit_in_word;
++ u32 word_in_map;
++ u32 val;
++
++ pos &= YAFFS_TNODES_LEVEL0_MASK;
++
++ bit_in_map = pos * dev->tnode_width;
++ word_in_map = bit_in_map / 32;
++ bit_in_word = bit_in_map & (32 - 1);
++
++ val = map[word_in_map] >> bit_in_word;
++
++ if (dev->tnode_width > (32 - bit_in_word)) {
++ bit_in_word = (32 - bit_in_word);
++ word_in_map++;
++ val |= (map[word_in_map] << bit_in_word);
++ }
++
++ val &= dev->tnode_mask;
++ val <<= dev->chunk_grp_bits;
++
++ return val;
++}
++
++/* ------------------- End of individual tnode manipulation -----------------*/
++
++/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
++ * The look up tree is represented by the top tnode and the number of top_level
++ * in the tree. 0 means only the level 0 tnode is in the tree.
++ */
++
++/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
++struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
++ struct yaffs_file_var *file_struct,
++ u32 chunk_id)
++{
++ struct yaffs_tnode *tn = file_struct->top;
++ u32 i;
++ int required_depth;
++ int level = file_struct->top_level;
++
++ (void) dev;
++
++ /* Check sane level and chunk Id */
++ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
++ return NULL;
++
++ if (chunk_id > YAFFS_MAX_CHUNK_ID)
++ return NULL;
++
++ /* First check we're tall enough (ie enough top_level) */
++
++ i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
++ required_depth = 0;
++ while (i) {
++ i >>= YAFFS_TNODES_INTERNAL_BITS;
++ required_depth++;
++ }
++
++ if (required_depth > file_struct->top_level)
++ return NULL; /* Not tall enough, so we can't find it */
++
++ /* Traverse down to level 0 */
++ while (level > 0 && tn) {
++ tn = tn->internal[(chunk_id >>
++ (YAFFS_TNODES_LEVEL0_BITS +
++ (level - 1) *
++ YAFFS_TNODES_INTERNAL_BITS)) &
++ YAFFS_TNODES_INTERNAL_MASK];
++ level--;
++ }
++
++ return tn;
++}
++
++/* add_find_tnode_0 finds the level 0 tnode if it exists,
++ * otherwise first expands the tree.
++ * This happens in two steps:
++ * 1. If the tree isn't tall enough, then make it taller.
++ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
++ *
++ * Used when modifying the tree.
++ *
++ * If the tn argument is NULL, then a fresh tnode will be added otherwise the
++ * specified tn will be plugged into the ttree.
++ */
++
++struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
++ struct yaffs_file_var *file_struct,
++ u32 chunk_id,
++ struct yaffs_tnode *passed_tn)
++{
++ int required_depth;
++ int i;
++ int l;
++ struct yaffs_tnode *tn;
++ u32 x;
++
++ /* Check sane level and page Id */
++ if (file_struct->top_level < 0 ||
++ file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
++ return NULL;
++
++ if (chunk_id > YAFFS_MAX_CHUNK_ID)
++ return NULL;
++
++ /* First check we're tall enough (ie enough top_level) */
++
++ x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
++ required_depth = 0;
++ while (x) {
++ x >>= YAFFS_TNODES_INTERNAL_BITS;
++ required_depth++;
++ }
++
++ if (required_depth > file_struct->top_level) {
++ /* Not tall enough, gotta make the tree taller */
++ for (i = file_struct->top_level; i < required_depth; i++) {
++
++ tn = yaffs_get_tnode(dev);
++
++ if (tn) {
++ tn->internal[0] = file_struct->top;
++ file_struct->top = tn;
++ file_struct->top_level++;
++ } else {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs: no more tnodes");
++ return NULL;
++ }
++ }
++ }
++
++ /* Traverse down to level 0, adding anything we need */
++
++ l = file_struct->top_level;
++ tn = file_struct->top;
++
++ if (l > 0) {
++ while (l > 0 && tn) {
++ x = (chunk_id >>
++ (YAFFS_TNODES_LEVEL0_BITS +
++ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
++ YAFFS_TNODES_INTERNAL_MASK;
++
++ if ((l > 1) && !tn->internal[x]) {
++ /* Add missing non-level-zero tnode */
++ tn->internal[x] = yaffs_get_tnode(dev);
++ if (!tn->internal[x])
++ return NULL;
++ } else if (l == 1) {
++ /* Looking from level 1 at level 0 */
++ if (passed_tn) {
++ /* If we already have one, release it */
++ if (tn->internal[x])
++ yaffs_free_tnode(dev,
++ tn->internal[x]);
++ tn->internal[x] = passed_tn;
++
++ } else if (!tn->internal[x]) {
++ /* Don't have one, none passed in */
++ tn->internal[x] = yaffs_get_tnode(dev);
++ if (!tn->internal[x])
++ return NULL;
++ }
++ }
++
++ tn = tn->internal[x];
++ l--;
++ }
++ } else {
++ /* top is level 0 */
++ if (passed_tn) {
++ memcpy(tn, passed_tn,
++ (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
++ yaffs_free_tnode(dev, passed_tn);
++ }
++ }
++
++ return tn;
++}
++
++static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
++ int chunk_obj)
++{
++ return (tags->chunk_id == chunk_obj &&
++ tags->obj_id == obj_id &&
++ !tags->is_deleted) ? 1 : 0;
++
++}
++
++static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
++ struct yaffs_ext_tags *tags, int obj_id,
++ int inode_chunk)
++{
++ int j;
++
++ for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
++ if (yaffs_check_chunk_bit
++ (dev, the_chunk / dev->param.chunks_per_block,
++ the_chunk % dev->param.chunks_per_block)) {
++
++ if (dev->chunk_grp_size == 1)
++ return the_chunk;
++ else {
++ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
++ tags);
++ if (yaffs_tags_match(tags,
++ obj_id, inode_chunk)) {
++ /* found it; */
++ return the_chunk;
++ }
++ }
++ }
++ the_chunk++;
++ }
++ return -1;
++}
++
++int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
++ struct yaffs_ext_tags *tags)
++{
++ /*Get the Tnode, then get the level 0 offset chunk offset */
++ struct yaffs_tnode *tn;
++ int the_chunk = -1;
++ struct yaffs_ext_tags local_tags;
++ int ret_val = -1;
++ struct yaffs_dev *dev = in->my_dev;
++
++ if (!tags) {
++ /* Passed a NULL, so use our own tags space */
++ tags = &local_tags;
++ }
++
++ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
++
++ if (!tn)
++ return ret_val;
++
++ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
++
++ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
++ inode_chunk);
++ return ret_val;
++}
++
++static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
++ struct yaffs_ext_tags *tags)
++{
++ /* Get the Tnode, then get the level 0 offset chunk offset */
++ struct yaffs_tnode *tn;
++ int the_chunk = -1;
++ struct yaffs_ext_tags local_tags;
++ struct yaffs_dev *dev = in->my_dev;
++ int ret_val = -1;
++
++ if (!tags) {
++ /* Passed a NULL, so use our own tags space */
++ tags = &local_tags;
++ }
++
++ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
++
++ if (!tn)
++ return ret_val;
++
++ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
++
++ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
++ inode_chunk);
++
++ /* Delete the entry in the filestructure (if found) */
++ if (ret_val != -1)
++ yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
++
++ return ret_val;
++}
++
++int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
++ int nand_chunk, int in_scan)
++{
++ /* NB in_scan is zero unless scanning.
++ * For forward scanning, in_scan is > 0;
++ * for backward scanning in_scan is < 0
++ *
++ * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
++ */
++
++ struct yaffs_tnode *tn;
++ struct yaffs_dev *dev = in->my_dev;
++ int existing_cunk;
++ struct yaffs_ext_tags existing_tags;
++ struct yaffs_ext_tags new_tags;
++ unsigned existing_serial, new_serial;
++
++ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
++ /* Just ignore an attempt at putting a chunk into a non-file
++ * during scanning.
++ * If it is not during Scanning then something went wrong!
++ */
++ if (!in_scan) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy:attempt to put data chunk into a non-file"
++ );
++ BUG();
++ }
++
++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++ return YAFFS_OK;
++ }
++
++ tn = yaffs_add_find_tnode_0(dev,
++ &in->variant.file_variant,
++ inode_chunk, NULL);
++ if (!tn)
++ return YAFFS_FAIL;
++
++ if (!nand_chunk)
++ /* Dummy insert, bail now */
++ return YAFFS_OK;
++
++ existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
++
++ if (in_scan != 0) {
++ /* If we're scanning then we need to test for duplicates
++ * NB This does not need to be efficient since it should only
++ * happen when the power fails during a write, then only one
++ * chunk should ever be affected.
++ *
++ * Correction for YAFFS2: This could happen quite a lot and we
++ * need to think about efficiency! TODO
++ * Update: For backward scanning we don't need to re-read tags
++ * so this is quite cheap.
++ */
++
++ if (existing_cunk > 0) {
++ /* NB Right now existing chunk will not be real
++ * chunk_id if the chunk group size > 1
++ * thus we have to do a FindChunkInFile to get the
++ * real chunk id.
++ *
++ * We have a duplicate now we need to decide which
++ * one to use:
++ *
++ * Backwards scanning YAFFS2: The old one is what
++ * we use, dump the new one.
++ * YAFFS1: Get both sets of tags and compare serial
++ * numbers.
++ */
++
++ if (in_scan > 0) {
++ /* Only do this for forward scanning */
++ yaffs_rd_chunk_tags_nand(dev,
++ nand_chunk,
++ NULL, &new_tags);
++
++ /* Do a proper find */
++ existing_cunk =
++ yaffs_find_chunk_in_file(in, inode_chunk,
++ &existing_tags);
++ }
++
++ if (existing_cunk <= 0) {
++ /*Hoosterman - how did this happen? */
++
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: existing chunk < 0 in scan"
++ );
++
++ }
++
++ /* NB The deleted flags should be false, otherwise
++ * the chunks will not be loaded during a scan
++ */
++
++ if (in_scan > 0) {
++ new_serial = new_tags.serial_number;
++ existing_serial = existing_tags.serial_number;
++ }
++
++ if ((in_scan > 0) &&
++ (existing_cunk <= 0 ||
++ ((existing_serial + 1) & 3) == new_serial)) {
++ /* Forward scanning.
++ * Use new
++ * Delete the old one and drop through to
++ * update the tnode
++ */
++ yaffs_chunk_del(dev, existing_cunk, 1,
++ __LINE__);
++ } else {
++ /* Backward scanning or we want to use the
++ * existing one
++ * Delete the new one and return early so that
++ * the tnode isn't changed
++ */
++ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
++ return YAFFS_OK;
++ }
++ }
++
++ }
++
++ if (existing_cunk == 0)
++ in->n_data_chunks++;
++
++ yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
++
++ return YAFFS_OK;
++}
++
++static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
++{
++ struct yaffs_block_info *the_block;
++ unsigned block_no;
++
++ yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
++
++ block_no = chunk / dev->param.chunks_per_block;
++ the_block = yaffs_get_block_info(dev, block_no);
++ if (the_block) {
++ the_block->soft_del_pages++;
++ dev->n_free_chunks++;
++ yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
++ }
++}
++
++/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
++ * the chunks in the file.
++ * All soft deleting does is increment the block's softdelete count and pulls
++ * the chunk out of the tnode.
++ * Thus, essentially this is the same as DeleteWorker except that the chunks
++ * are soft deleted.
++ */
++
++static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
++ u32 level, int chunk_offset)
++{
++ int i;
++ int the_chunk;
++ int all_done = 1;
++ struct yaffs_dev *dev = in->my_dev;
++
++ if (!tn)
++ return 1;
++
++ if (level > 0) {
++ for (i = YAFFS_NTNODES_INTERNAL - 1;
++ all_done && i >= 0;
++ i--) {
++ if (tn->internal[i]) {
++ all_done =
++ yaffs_soft_del_worker(in,
++ tn->internal[i],
++ level - 1,
++ (chunk_offset <<
++ YAFFS_TNODES_INTERNAL_BITS)
++ + i);
++ if (all_done) {
++ yaffs_free_tnode(dev,
++ tn->internal[i]);
++ tn->internal[i] = NULL;
++ } else {
++ /* Can this happen? */
++ }
++ }
++ }
++ return (all_done) ? 1 : 0;
++ }
++
++ /* level 0 */
++ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
++ the_chunk = yaffs_get_group_base(dev, tn, i);
++ if (the_chunk) {
++ yaffs_soft_del_chunk(dev, the_chunk);
++ yaffs_load_tnode_0(dev, tn, i, 0);
++ }
++ }
++ return 1;
++}
++
++static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
++{
++ struct yaffs_dev *dev = obj->my_dev;
++ struct yaffs_obj *parent;
++
++ yaffs_verify_obj_in_dir(obj);
++ parent = obj->parent;
++
++ yaffs_verify_dir(parent);
++
++ if (dev && dev->param.remove_obj_fn)
++ dev->param.remove_obj_fn(obj);
++
++ list_del_init(&obj->siblings);
++ obj->parent = NULL;
++
++ yaffs_verify_dir(parent);
++}
++
++void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
++{
++ if (!directory) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "tragedy: Trying to add an object to a null pointer directory"
++ );
++ BUG();
++ return;
++ }
++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "tragedy: Trying to add an object to a non-directory"
++ );
++ BUG();
++ }
++
++ if (obj->siblings.prev == NULL) {
++ /* Not initialised */
++ BUG();
++ }
++
++ yaffs_verify_dir(directory);
++
++ yaffs_remove_obj_from_dir(obj);
++
++ /* Now add it */
++ list_add(&obj->siblings, &directory->variant.dir_variant.children);
++ obj->parent = directory;
++
++ if (directory == obj->my_dev->unlinked_dir
++ || directory == obj->my_dev->del_dir) {
++ obj->unlinked = 1;
++ obj->my_dev->n_unlinked_files++;
++ obj->rename_allowed = 0;
++ }
++
++ yaffs_verify_dir(directory);
++ yaffs_verify_obj_in_dir(obj);
++}
++
++static int yaffs_change_obj_name(struct yaffs_obj *obj,
++ struct yaffs_obj *new_dir,
++ const YCHAR *new_name, int force, int shadows)
++{
++ int unlink_op;
++ int del_op;
++ struct yaffs_obj *existing_target;
++
++ if (new_dir == NULL)
++ new_dir = obj->parent; /* use the old directory */
++
++ if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "tragedy: yaffs_change_obj_name: new_dir is not a directory"
++ );
++ BUG();
++ }
++
++ unlink_op = (new_dir == obj->my_dev->unlinked_dir);
++ del_op = (new_dir == obj->my_dev->del_dir);
++
++ existing_target = yaffs_find_by_name(new_dir, new_name);
++
++ /* If the object is a file going into the unlinked directory,
++ * then it is OK to just stuff it in since duplicate names are OK.
++ * else only proceed if the new name does not exist and we're putting
++ * it into a directory.
++ */
++ if (!(unlink_op || del_op || force ||
++ shadows > 0 || !existing_target) ||
++ new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++ return YAFFS_FAIL;
++
++ yaffs_set_obj_name(obj, new_name);
++ obj->dirty = 1;
++ yaffs_add_obj_to_dir(new_dir, obj);
++
++ if (unlink_op)
++ obj->unlinked = 1;
++
++ /* If it is a deletion then we mark it as a shrink for gc */
++ if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
++ return YAFFS_OK;
++
++ return YAFFS_FAIL;
++}
++
++/*------------------------ Short Operations Cache ------------------------------
++ * In many situations where there is no high level buffering a lot of
++ * reads might be short sequential reads, and a lot of writes may be short
++ * sequential writes. eg. scanning/writing a jpeg file.
++ * In these cases, a short read/write cache can provide a huge perfomance
++ * benefit with dumb-as-a-rock code.
++ * In Linux, the page cache provides read buffering and the short op cache
++ * provides write buffering.
++ *
++ * There are a small number (~10) of cache chunks per device so that we don't
++ * need a very intelligent search.
++ */
++
++static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
++{
++ struct yaffs_dev *dev = obj->my_dev;
++ int i;
++ struct yaffs_cache *cache;
++ int n_caches = obj->my_dev->param.n_caches;
++
++ for (i = 0; i < n_caches; i++) {
++ cache = &dev->cache[i];
++ if (cache->object == obj && cache->dirty)
++ return 1;
++ }
++
++ return 0;
++}
++
++static void yaffs_flush_file_cache(struct yaffs_obj *obj)
++{
++ struct yaffs_dev *dev = obj->my_dev;
++ int lowest = -99; /* Stop compiler whining. */
++ int i;
++ struct yaffs_cache *cache;
++ int chunk_written = 0;
++ int n_caches = obj->my_dev->param.n_caches;
++
++ if (n_caches < 1)
++ return;
++ do {
++ cache = NULL;
++
++ /* Find the lowest dirty chunk for this object */
++ for (i = 0; i < n_caches; i++) {
++ if (dev->cache[i].object == obj &&
++ dev->cache[i].dirty) {
++ if (!cache ||
++ dev->cache[i].chunk_id < lowest) {
++ cache = &dev->cache[i];
++ lowest = cache->chunk_id;
++ }
++ }
++ }
++
++ if (cache && !cache->locked) {
++ /* Write it out and free it up */
++ chunk_written =
++ yaffs_wr_data_obj(cache->object,
++ cache->chunk_id,
++ cache->data,
++ cache->n_bytes, 1);
++ cache->dirty = 0;
++ cache->object = NULL;
++ }
++ } while (cache && chunk_written > 0);
++
++ if (cache)
++ /* Hoosterman, disk full while writing cache out. */
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: no space during cache write");
++}
++
++/*yaffs_flush_whole_cache(dev)
++ *
++ *
++ */
++
++void yaffs_flush_whole_cache(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj;
++ int n_caches = dev->param.n_caches;
++ int i;
++
++ /* Find a dirty object in the cache and flush it...
++ * until there are no further dirty objects.
++ */
++ do {
++ obj = NULL;
++ for (i = 0; i < n_caches && !obj; i++) {
++ if (dev->cache[i].object && dev->cache[i].dirty)
++ obj = dev->cache[i].object;
++ }
++ if (obj)
++ yaffs_flush_file_cache(obj);
++ } while (obj);
++
++}
++
++/* Grab us a cache chunk for use.
++ * First look for an empty one.
++ * Then look for the least recently used non-dirty one.
++ * Then look for the least recently used dirty one...., flush and look again.
++ */
++static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
++{
++ int i;
++
++ if (dev->param.n_caches > 0) {
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (!dev->cache[i].object)
++ return &dev->cache[i];
++ }
++ }
++ return NULL;
++}
++
++static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
++{
++ struct yaffs_cache *cache;
++ struct yaffs_obj *the_obj;
++ int usage;
++ int i;
++ int pushout;
++
++ if (dev->param.n_caches < 1)
++ return NULL;
++
++ /* Try find a non-dirty one... */
++
++ cache = yaffs_grab_chunk_worker(dev);
++
++ if (!cache) {
++ /* They were all dirty, find the LRU object and flush
++ * its cache, then find again.
++ * NB what's here is not very accurate,
++ * we actually flush the object with the LRU chunk.
++ */
++
++ /* With locking we can't assume we can use entry zero,
++ * Set the_obj to a valid pointer for Coverity. */
++ the_obj = dev->cache[0].object;
++ usage = -1;
++ cache = NULL;
++ pushout = -1;
++
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].object &&
++ !dev->cache[i].locked &&
++ (dev->cache[i].last_use < usage ||
++ !cache)) {
++ usage = dev->cache[i].last_use;
++ the_obj = dev->cache[i].object;
++ cache = &dev->cache[i];
++ pushout = i;
++ }
++ }
++
++ if (!cache || cache->dirty) {
++ /* Flush and try again */
++ yaffs_flush_file_cache(the_obj);
++ cache = yaffs_grab_chunk_worker(dev);
++ }
++ }
++ return cache;
++}
++
++/* Find a cached chunk */
++static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
++ int chunk_id)
++{
++ struct yaffs_dev *dev = obj->my_dev;
++ int i;
++
++ if (dev->param.n_caches < 1)
++ return NULL;
++
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].object == obj &&
++ dev->cache[i].chunk_id == chunk_id) {
++ dev->cache_hits++;
++
++ return &dev->cache[i];
++ }
++ }
++ return NULL;
++}
++
++/* Mark the chunk for the least recently used algorithym */
++static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
++ int is_write)
++{
++ int i;
++
++ if (dev->param.n_caches < 1)
++ return;
++
++ if (dev->cache_last_use < 0 ||
++ dev->cache_last_use > 100000000) {
++ /* Reset the cache usages */
++ for (i = 1; i < dev->param.n_caches; i++)
++ dev->cache[i].last_use = 0;
++
++ dev->cache_last_use = 0;
++ }
++ dev->cache_last_use++;
++ cache->last_use = dev->cache_last_use;
++
++ if (is_write)
++ cache->dirty = 1;
++}
++
++/* Invalidate a single cache page.
++ * Do this when a whole page gets written,
++ * ie the short cache for this page is no longer valid.
++ */
++static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
++{
++ struct yaffs_cache *cache;
++
++ if (object->my_dev->param.n_caches > 0) {
++ cache = yaffs_find_chunk_cache(object, chunk_id);
++
++ if (cache)
++ cache->object = NULL;
++ }
++}
++
++/* Invalidate all the cache pages associated with this object
++ * Do this whenever ther file is deleted or resized.
++ */
++static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
++{
++ int i;
++ struct yaffs_dev *dev = in->my_dev;
++
++ if (dev->param.n_caches > 0) {
++ /* Invalidate it. */
++ for (i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].object == in)
++ dev->cache[i].object = NULL;
++ }
++ }
++}
++
++static void yaffs_unhash_obj(struct yaffs_obj *obj)
++{
++ int bucket;
++ struct yaffs_dev *dev = obj->my_dev;
++
++ /* If it is still linked into the bucket list, free from the list */
++ if (!list_empty(&obj->hash_link)) {
++ list_del_init(&obj->hash_link);
++ bucket = yaffs_hash_fn(obj->obj_id);
++ dev->obj_bucket[bucket].count--;
++ }
++}
++
++/* FreeObject frees up a Object and puts it back on the free list */
++static void yaffs_free_obj(struct yaffs_obj *obj)
++{
++ struct yaffs_dev *dev;
++
++ if (!obj) {
++ BUG();
++ return;
++ }
++ dev = obj->my_dev;
++ yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
++ obj, obj->my_inode);
++ if (obj->parent)
++ BUG();
++ if (!list_empty(&obj->siblings))
++ BUG();
++
++ if (obj->my_inode) {
++ /* We're still hooked up to a cached inode.
++ * Don't delete now, but mark for later deletion
++ */
++ obj->defered_free = 1;
++ return;
++ }
++
++ yaffs_unhash_obj(obj);
++
++ yaffs_free_raw_obj(dev, obj);
++ dev->n_obj--;
++ dev->checkpoint_blocks_required = 0; /* force recalculation */
++}
++
++void yaffs_handle_defered_free(struct yaffs_obj *obj)
++{
++ if (obj->defered_free)
++ yaffs_free_obj(obj);
++}
++
++static int yaffs_generic_obj_del(struct yaffs_obj *in)
++{
++ /* Iinvalidate the file's data in the cache, without flushing. */
++ yaffs_invalidate_whole_cache(in);
++
++ if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
++ /* Move to unlinked directory so we have a deletion record */
++ yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
++ 0);
++ }
++
++ yaffs_remove_obj_from_dir(in);
++ yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
++ in->hdr_chunk = 0;
++
++ yaffs_free_obj(in);
++ return YAFFS_OK;
++
++}
++
++static void yaffs_soft_del_file(struct yaffs_obj *obj)
++{
++ if (!obj->deleted ||
++ obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
++ obj->soft_del)
++ return;
++
++ if (obj->n_data_chunks <= 0) {
++ /* Empty file with no duplicate object headers,
++ * just delete it immediately */
++ yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
++ obj->variant.file_variant.top = NULL;
++ yaffs_trace(YAFFS_TRACE_TRACING,
++ "yaffs: Deleting empty file %d",
++ obj->obj_id);
++ yaffs_generic_obj_del(obj);
++ } else {
++ yaffs_soft_del_worker(obj,
++ obj->variant.file_variant.top,
++ obj->variant.
++ file_variant.top_level, 0);
++ obj->soft_del = 1;
++ }
++}
++
++/* Pruning removes any part of the file structure tree that is beyond the
++ * bounds of the file (ie that does not point to chunks).
++ *
++ * A file should only get pruned when its size is reduced.
++ *
++ * Before pruning, the chunks must be pulled from the tree and the
++ * level 0 tnode entries must be zeroed out.
++ * Could also use this for file deletion, but that's probably better handled
++ * by a special case.
++ *
++ * This function is recursive. For levels > 0 the function is called again on
++ * any sub-tree. For level == 0 we just check if the sub-tree has data.
++ * If there is no data in a subtree then it is pruned.
++ */
++
++static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
++ struct yaffs_tnode *tn, u32 level,
++ int del0)
++{
++ int i;
++ int has_data;
++
++ if (!tn)
++ return tn;
++
++ has_data = 0;
++
++ if (level > 0) {
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
++ if (tn->internal[i]) {
++ tn->internal[i] =
++ yaffs_prune_worker(dev,
++ tn->internal[i],
++ level - 1,
++ (i == 0) ? del0 : 1);
++ }
++
++ if (tn->internal[i])
++ has_data++;
++ }
++ } else {
++ int tnode_size_u32 = dev->tnode_size / sizeof(u32);
++ u32 *map = (u32 *) tn;
++
++ for (i = 0; !has_data && i < tnode_size_u32; i++) {
++ if (map[i])
++ has_data++;
++ }
++ }
++
++ if (has_data == 0 && del0) {
++ /* Free and return NULL */
++ yaffs_free_tnode(dev, tn);
++ tn = NULL;
++ }
++ return tn;
++}
++
++static int yaffs_prune_tree(struct yaffs_dev *dev,
++ struct yaffs_file_var *file_struct)
++{
++ int i;
++ int has_data;
++ int done = 0;
++ struct yaffs_tnode *tn;
++
++ if (file_struct->top_level < 1)
++ return YAFFS_OK;
++
++ file_struct->top =
++ yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
++
++ /* Now we have a tree with all the non-zero branches NULL but
++ * the height is the same as it was.
++ * Let's see if we can trim internal tnodes to shorten the tree.
++ * We can do this if only the 0th element in the tnode is in use
++ * (ie all the non-zero are NULL)
++ */
++
++ while (file_struct->top_level && !done) {
++ tn = file_struct->top;
++
++ has_data = 0;
++ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
++ if (tn->internal[i])
++ has_data++;
++ }
++
++ if (!has_data) {
++ file_struct->top = tn->internal[0];
++ file_struct->top_level--;
++ yaffs_free_tnode(dev, tn);
++ } else {
++ done = 1;
++ }
++ }
++
++ return YAFFS_OK;
++}
++
++/*-------------------- End of File Structure functions.-------------------*/
++
++/* alloc_empty_obj gets us a clean Object.*/
++static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
++
++ if (!obj)
++ return obj;
++
++ dev->n_obj++;
++
++ /* Now sweeten it up... */
++
++ memset(obj, 0, sizeof(struct yaffs_obj));
++ obj->being_created = 1;
++
++ obj->my_dev = dev;
++ obj->hdr_chunk = 0;
++ obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
++ INIT_LIST_HEAD(&(obj->hard_links));
++ INIT_LIST_HEAD(&(obj->hash_link));
++ INIT_LIST_HEAD(&obj->siblings);
++
++ /* Now make the directory sane */
++ if (dev->root_dir) {
++ obj->parent = dev->root_dir;
++ list_add(&(obj->siblings),
++ &dev->root_dir->variant.dir_variant.children);
++ }
++
++ /* Add it to the lost and found directory.
++ * NB Can't put root or lost-n-found in lost-n-found so
++ * check if lost-n-found exists first
++ */
++ if (dev->lost_n_found)
++ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
++
++ obj->being_created = 0;
++
++ dev->checkpoint_blocks_required = 0; /* force recalculation */
++
++ return obj;
++}
++
++static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
++{
++ int i;
++ int l = 999;
++ int lowest = 999999;
++
++ /* Search for the shortest list or one that
++ * isn't too long.
++ */
++
++ for (i = 0; i < 10 && lowest > 4; i++) {
++ dev->bucket_finder++;
++ dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
++ if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
++ lowest = dev->obj_bucket[dev->bucket_finder].count;
++ l = dev->bucket_finder;
++ }
++ }
++
++ return l;
++}
++
++static int yaffs_new_obj_id(struct yaffs_dev *dev)
++{
++ int bucket = yaffs_find_nice_bucket(dev);
++ int found = 0;
++ struct list_head *i;
++ u32 n = (u32) bucket;
++
++ /* Now find an object value that has not already been taken
++ * by scanning the list.
++ */
++
++ while (!found) {
++ found = 1;
++ n += YAFFS_NOBJECT_BUCKETS;
++ if (1 || dev->obj_bucket[bucket].count > 0) {
++ list_for_each(i, &dev->obj_bucket[bucket].list) {
++ /* If there is already one in the list */
++ if (i && list_entry(i, struct yaffs_obj,
++ hash_link)->obj_id == n) {
++ found = 0;
++ }
++ }
++ }
++ }
++ return n;
++}
++
++static void yaffs_hash_obj(struct yaffs_obj *in)
++{
++ int bucket = yaffs_hash_fn(in->obj_id);
++ struct yaffs_dev *dev = in->my_dev;
++
++ list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
++ dev->obj_bucket[bucket].count++;
++}
++
++struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
++{
++ int bucket = yaffs_hash_fn(number);
++ struct list_head *i;
++ struct yaffs_obj *in;
++
++ list_for_each(i, &dev->obj_bucket[bucket].list) {
++ /* Look if it is in the list */
++ in = list_entry(i, struct yaffs_obj, hash_link);
++ if (in->obj_id == number) {
++ /* Don't show if it is defered free */
++ if (in->defered_free)
++ return NULL;
++ return in;
++ }
++ }
++
++ return NULL;
++}
++
++static struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
++ enum yaffs_obj_type type)
++{
++ struct yaffs_obj *the_obj = NULL;
++ struct yaffs_tnode *tn = NULL;
++
++ if (number < 0)
++ number = yaffs_new_obj_id(dev);
++
++ if (type == YAFFS_OBJECT_TYPE_FILE) {
++ tn = yaffs_get_tnode(dev);
++ if (!tn)
++ return NULL;
++ }
++
++ the_obj = yaffs_alloc_empty_obj(dev);
++ if (!the_obj) {
++ if (tn)
++ yaffs_free_tnode(dev, tn);
++ return NULL;
++ }
++
++ the_obj->fake = 0;
++ the_obj->rename_allowed = 1;
++ the_obj->unlink_allowed = 1;
++ the_obj->obj_id = number;
++ yaffs_hash_obj(the_obj);
++ the_obj->variant_type = type;
++ yaffs_load_current_time(the_obj, 1, 1);
++
++ switch (type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ the_obj->variant.file_variant.file_size = 0;
++ the_obj->variant.file_variant.scanned_size = 0;
++ the_obj->variant.file_variant.shrink_size =
++ yaffs_max_file_size(dev);
++ the_obj->variant.file_variant.top_level = 0;
++ the_obj->variant.file_variant.top = tn;
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
++ INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* No action required */
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* todo this should not happen */
++ break;
++ }
++ return the_obj;
++}
++
++static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
++ int number, u32 mode)
++{
++
++ struct yaffs_obj *obj =
++ yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
++
++ if (!obj)
++ return NULL;
++
++ obj->fake = 1; /* it is fake so it might not use NAND */
++ obj->rename_allowed = 0;
++ obj->unlink_allowed = 0;
++ obj->deleted = 0;
++ obj->unlinked = 0;
++ obj->yst_mode = mode;
++ obj->my_dev = dev;
++ obj->hdr_chunk = 0; /* Not a valid chunk. */
++ return obj;
++
++}
++
++
++static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
++{
++ int i;
++
++ dev->n_obj = 0;
++ dev->n_tnodes = 0;
++ yaffs_init_raw_tnodes_and_objs(dev);
++
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ INIT_LIST_HEAD(&dev->obj_bucket[i].list);
++ dev->obj_bucket[i].count = 0;
++ }
++}
++
++struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
++ int number,
++ enum yaffs_obj_type type)
++{
++ struct yaffs_obj *the_obj = NULL;
++
++ if (number > 0)
++ the_obj = yaffs_find_by_number(dev, number);
++
++ if (!the_obj)
++ the_obj = yaffs_new_obj(dev, number, type);
++
++ return the_obj;
++
++}
++
++YCHAR *yaffs_clone_str(const YCHAR *str)
++{
++ YCHAR *new_str = NULL;
++ int len;
++
++ if (!str)
++ str = _Y("");
++
++ len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
++ new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
++ if (new_str) {
++ strncpy(new_str, str, len);
++ new_str[len] = 0;
++ }
++ return new_str;
++
++}
++/*
++ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
++ * link (ie. name) is created or deleted in the directory.
++ *
++ * ie.
++ * create dir/a : update dir's mtime/ctime
++ * rm dir/a: update dir's mtime/ctime
++ * modify dir/a: don't update dir's mtimme/ctime
++ *
++ * This can be handled immediately or defered. Defering helps reduce the number
++ * of updates when many files in a directory are changed within a brief period.
++ *
++ * If the directory updating is defered then yaffs_update_dirty_dirs must be
++ * called periodically.
++ */
++
++static void yaffs_update_parent(struct yaffs_obj *obj)
++{
++ struct yaffs_dev *dev;
++
++ if (!obj)
++ return;
++ dev = obj->my_dev;
++ obj->dirty = 1;
++ yaffs_load_current_time(obj, 0, 1);
++ if (dev->param.defered_dir_update) {
++ struct list_head *link = &obj->variant.dir_variant.dirty;
++
++ if (list_empty(link)) {
++ list_add(link, &dev->dirty_dirs);
++ yaffs_trace(YAFFS_TRACE_BACKGROUND,
++ "Added object %d to dirty directories",
++ obj->obj_id);
++ }
++
++ } else {
++ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
++ }
++}
++
++void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
++{
++ struct list_head *link;
++ struct yaffs_obj *obj;
++ struct yaffs_dir_var *d_s;
++ union yaffs_obj_var *o_v;
++
++ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
++
++ while (!list_empty(&dev->dirty_dirs)) {
++ link = dev->dirty_dirs.next;
++ list_del_init(link);
++
++ d_s = list_entry(link, struct yaffs_dir_var, dirty);
++ o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
++ obj = list_entry(o_v, struct yaffs_obj, variant);
++
++ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
++ obj->obj_id);
++
++ if (obj->dirty)
++ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
++ }
++}
++
++/*
++ * Mknod (create) a new object.
++ * equiv_obj only has meaning for a hard link;
++ * alias_str only has meaning for a symlink.
++ * rdev only has meaning for devices (a subset of special objects)
++ */
++
++static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
++ struct yaffs_obj *parent,
++ const YCHAR *name,
++ u32 mode,
++ u32 uid,
++ u32 gid,
++ struct yaffs_obj *equiv_obj,
++ const YCHAR *alias_str, u32 rdev)
++{
++ struct yaffs_obj *in;
++ YCHAR *str = NULL;
++ struct yaffs_dev *dev = parent->my_dev;
++
++ /* Check if the entry exists.
++ * If it does then fail the call since we don't want a dup. */
++ if (yaffs_find_by_name(parent, name))
++ return NULL;
++
++ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
++ str = yaffs_clone_str(alias_str);
++ if (!str)
++ return NULL;
++ }
++
++ in = yaffs_new_obj(dev, -1, type);
++
++ if (!in) {
++ kfree(str);
++ return NULL;
++ }
++
++ in->hdr_chunk = 0;
++ in->valid = 1;
++ in->variant_type = type;
++
++ in->yst_mode = mode;
++
++ yaffs_attribs_init(in, gid, uid, rdev);
++
++ in->n_data_chunks = 0;
++
++ yaffs_set_obj_name(in, name);
++ in->dirty = 1;
++
++ yaffs_add_obj_to_dir(parent, in);
++
++ in->my_dev = parent->my_dev;
++
++ switch (type) {
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ in->variant.symlink_variant.alias = str;
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ in->variant.hardlink_variant.equiv_obj = equiv_obj;
++ in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
++ list_add(&in->hard_links, &equiv_obj->hard_links);
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* do nothing */
++ break;
++ }
++
++ if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
++ /* Could not create the object header, fail */
++ yaffs_del_obj(in);
++ in = NULL;
++ }
++
++ if (in)
++ yaffs_update_parent(parent);
++
++ return in;
++}
++
++struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
++ const YCHAR *name, u32 mode, u32 uid,
++ u32 gid)
++{
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
++ uid, gid, NULL, NULL, 0);
++}
++
++struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
++ u32 mode, u32 uid, u32 gid)
++{
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
++ mode, uid, gid, NULL, NULL, 0);
++}
++
++struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
++ const YCHAR *name, u32 mode, u32 uid,
++ u32 gid, u32 rdev)
++{
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
++ uid, gid, NULL, NULL, rdev);
++}
++
++struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
++ const YCHAR *name, u32 mode, u32 uid,
++ u32 gid, const YCHAR *alias)
++{
++ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
++ uid, gid, NULL, alias, 0);
++}
++
++/* yaffs_link_obj returns the object id of the equivalent object.*/
++struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
++ struct yaffs_obj *equiv_obj)
++{
++ /* Get the real object in case we were fed a hard link obj */
++ equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
++
++ if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
++ parent, name, 0, 0, 0,
++ equiv_obj, NULL, 0))
++ return equiv_obj;
++
++ return NULL;
++
++}
++
++
++
++/*---------------------- Block Management and Page Allocation -------------*/
++
++static void yaffs_deinit_blocks(struct yaffs_dev *dev)
++{
++ if (dev->block_info_alt && dev->block_info)
++ vfree(dev->block_info);
++ else
++ kfree(dev->block_info);
++
++ dev->block_info_alt = 0;
++
++ dev->block_info = NULL;
++
++ if (dev->chunk_bits_alt && dev->chunk_bits)
++ vfree(dev->chunk_bits);
++ else
++ kfree(dev->chunk_bits);
++ dev->chunk_bits_alt = 0;
++ dev->chunk_bits = NULL;
++}
++
++static int yaffs_init_blocks(struct yaffs_dev *dev)
++{
++ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
++
++ dev->block_info = NULL;
++ dev->chunk_bits = NULL;
++ dev->alloc_block = -1; /* force it to get a new one */
++
++ /* If the first allocation strategy fails, thry the alternate one */
++ dev->block_info =
++ kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
++ if (!dev->block_info) {
++ dev->block_info =
++ vmalloc(n_blocks * sizeof(struct yaffs_block_info));
++ dev->block_info_alt = 1;
++ } else {
++ dev->block_info_alt = 0;
++ }
++
++ if (!dev->block_info)
++ goto alloc_error;
++
++ /* Set up dynamic blockinfo stuff. Round up bytes. */
++ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
++ dev->chunk_bits =
++ kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
++ if (!dev->chunk_bits) {
++ dev->chunk_bits =
++ vmalloc(dev->chunk_bit_stride * n_blocks);
++ dev->chunk_bits_alt = 1;
++ } else {
++ dev->chunk_bits_alt = 0;
++ }
++ if (!dev->chunk_bits)
++ goto alloc_error;
++
++
++ memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
++ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
++ return YAFFS_OK;
++
++alloc_error:
++ yaffs_deinit_blocks(dev);
++ return YAFFS_FAIL;
++}
++
++
++void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
++{
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
++ int erased_ok = 0;
++ int i;
++
++ /* If the block is still healthy erase it and mark as clean.
++ * If the block has had a data failure, then retire it.
++ */
++
++ yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
++ "yaffs_block_became_dirty block %d state %d %s",
++ block_no, bi->block_state,
++ (bi->needs_retiring) ? "needs retiring" : "");
++
++ yaffs2_clear_oldest_dirty_seq(dev, bi);
++
++ bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
++
++ /* If this is the block being garbage collected then stop gc'ing */
++ if (block_no == dev->gc_block)
++ dev->gc_block = 0;
++
++ /* If this block is currently the best candidate for gc
++ * then drop as a candidate */
++ if (block_no == dev->gc_dirtiest) {
++ dev->gc_dirtiest = 0;
++ dev->gc_pages_in_use = 0;
++ }
++
++ if (!bi->needs_retiring) {
++ yaffs2_checkpt_invalidate(dev);
++ erased_ok = yaffs_erase_block(dev, block_no);
++ if (!erased_ok) {
++ dev->n_erase_failures++;
++ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ "**>> Erasure failed %d", block_no);
++ }
++ }
++
++ /* Verify erasure if needed */
++ if (erased_ok &&
++ ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
++ !yaffs_skip_verification(dev))) {
++ for (i = 0; i < dev->param.chunks_per_block; i++) {
++ if (!yaffs_check_chunk_erased(dev,
++ block_no * dev->param.chunks_per_block + i)) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ ">>Block %d erasure supposedly OK, but chunk %d not erased",
++ block_no, i);
++ }
++ }
++ }
++
++ if (!erased_ok) {
++ /* We lost a block of free space */
++ dev->n_free_chunks -= dev->param.chunks_per_block;
++ yaffs_retire_block(dev, block_no);
++ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ "**>> Block %d retired", block_no);
++ return;
++ }
++
++ /* Clean it up... */
++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++ bi->seq_number = 0;
++ dev->n_erased_blocks++;
++ bi->pages_in_use = 0;
++ bi->soft_del_pages = 0;
++ bi->has_shrink_hdr = 0;
++ bi->skip_erased_check = 1; /* Clean, so no need to check */
++ bi->gc_prioritise = 0;
++ bi->has_summary = 0;
++
++ yaffs_clear_chunk_bits(dev, block_no);
++
++ yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
++}
++
++static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi,
++ int old_chunk, u8 *buffer)
++{
++ int new_chunk;
++ int mark_flash = 1;
++ struct yaffs_ext_tags tags;
++ struct yaffs_obj *object;
++ int matching_chunk;
++ int ret_val = YAFFS_OK;
++
++ memset(&tags, 0, sizeof(tags));
++ yaffs_rd_chunk_tags_nand(dev, old_chunk,
++ buffer, &tags);
++ object = yaffs_find_by_number(dev, tags.obj_id);
++
++ yaffs_trace(YAFFS_TRACE_GC_DETAIL,
++ "Collecting chunk in block %d, %d %d %d ",
++ dev->gc_chunk, tags.obj_id,
++ tags.chunk_id, tags.n_bytes);
++
++ if (object && !yaffs_skip_verification(dev)) {
++ if (tags.chunk_id == 0)
++ matching_chunk =
++ object->hdr_chunk;
++ else if (object->soft_del)
++ /* Defeat the test */
++ matching_chunk = old_chunk;
++ else
++ matching_chunk =
++ yaffs_find_chunk_in_file
++ (object, tags.chunk_id,
++ NULL);
++
++ if (old_chunk != matching_chunk)
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "gc: page in gc mismatch: %d %d %d %d",
++ old_chunk,
++ matching_chunk,
++ tags.obj_id,
++ tags.chunk_id);
++ }
++
++ if (!object) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "page %d in gc has no object: %d %d %d ",
++ old_chunk,
++ tags.obj_id, tags.chunk_id,
++ tags.n_bytes);
++ }
++
++ if (object &&
++ object->deleted &&
++ object->soft_del && tags.chunk_id != 0) {
++ /* Data chunk in a soft deleted file,
++ * throw it away.
++ * It's a soft deleted data chunk,
++ * No need to copy this, just forget
++ * about it and fix up the object.
++ */
++
++ /* Free chunks already includes
++ * softdeleted chunks, how ever this
++ * chunk is going to soon be really
++ * deleted which will increment free
++ * chunks. We have to decrement free
++ * chunks so this works out properly.
++ */
++ dev->n_free_chunks--;
++ bi->soft_del_pages--;
++
++ object->n_data_chunks--;
++ if (object->n_data_chunks <= 0) {
++ /* remeber to clean up obj */
++ dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
++ dev->n_clean_ups++;
++ }
++ mark_flash = 0;
++ } else if (object) {
++ /* It's either a data chunk in a live
++ * file or an ObjectHeader, so we're
++ * interested in it.
++ * NB Need to keep the ObjectHeaders of
++ * deleted files until the whole file
++ * has been deleted off
++ */
++ tags.serial_number++;
++ dev->n_gc_copies++;
++
++ if (tags.chunk_id == 0) {
++ /* It is an object Id,
++ * We need to nuke the
++ * shrinkheader flags since its
++ * work is done.
++ * Also need to clean up
++ * shadowing.
++ */
++ struct yaffs_obj_hdr *oh;
++ oh = (struct yaffs_obj_hdr *) buffer;
++
++ oh->is_shrink = 0;
++ tags.extra_is_shrink = 0;
++ oh->shadows_obj = 0;
++ oh->inband_shadowed_obj_id = 0;
++ tags.extra_shadows = 0;
++
++ /* Update file size */
++ if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
++ yaffs_oh_size_load(oh,
++ object->variant.file_variant.file_size);
++ tags.extra_file_size =
++ object->variant.file_variant.file_size;
++ }
++
++ yaffs_verify_oh(object, oh, &tags, 1);
++ new_chunk =
++ yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
++ } else {
++ new_chunk =
++ yaffs_write_new_chunk(dev, buffer, &tags, 1);
++ }
++
++ if (new_chunk < 0) {
++ ret_val = YAFFS_FAIL;
++ } else {
++
++ /* Now fix up the Tnodes etc. */
++
++ if (tags.chunk_id == 0) {
++ /* It's a header */
++ object->hdr_chunk = new_chunk;
++ object->serial = tags.serial_number;
++ } else {
++ /* It's a data chunk */
++ yaffs_put_chunk_in_file(object, tags.chunk_id,
++ new_chunk, 0);
++ }
++ }
++ }
++ if (ret_val == YAFFS_OK)
++ yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
++ return ret_val;
++}
++
++static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
++{
++ int old_chunk;
++ int ret_val = YAFFS_OK;
++ int i;
++ int is_checkpt_block;
++ int max_copies;
++ int chunks_before = yaffs_get_erased_chunks(dev);
++ int chunks_after;
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
++
++ is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
++
++ yaffs_trace(YAFFS_TRACE_TRACING,
++ "Collecting block %d, in use %d, shrink %d, whole_block %d",
++ block, bi->pages_in_use, bi->has_shrink_hdr,
++ whole_block);
++
++ /*yaffs_verify_free_chunks(dev); */
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
++ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
++
++ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
++
++ dev->gc_disable = 1;
++
++ yaffs_summary_gc(dev, block);
++
++ if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
++ yaffs_trace(YAFFS_TRACE_TRACING,
++ "Collecting block %d that has no chunks in use",
++ block);
++ yaffs_block_became_dirty(dev, block);
++ } else {
++
++ u8 *buffer = yaffs_get_temp_buffer(dev);
++
++ yaffs_verify_blk(dev, bi, block);
++
++ max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
++ old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
++
++ for (/* init already done */ ;
++ ret_val == YAFFS_OK &&
++ dev->gc_chunk < dev->param.chunks_per_block &&
++ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
++ max_copies > 0;
++ dev->gc_chunk++, old_chunk++) {
++ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
++ /* Page is in use and might need to be copied */
++ max_copies--;
++ ret_val = yaffs_gc_process_chunk(dev, bi,
++ old_chunk, buffer);
++ }
++ }
++ yaffs_release_temp_buffer(dev, buffer);
++ }
++
++ yaffs_verify_collected_blk(dev, bi, block);
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
++ /*
++ * The gc did not complete. Set block state back to FULL
++ * because checkpointing does not restore gc.
++ */
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ } else {
++ /* The gc completed. */
++ /* Do any required cleanups */
++ for (i = 0; i < dev->n_clean_ups; i++) {
++ /* Time to delete the file too */
++ struct yaffs_obj *object =
++ yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
++ if (object) {
++ yaffs_free_tnode(dev,
++ object->variant.file_variant.top);
++ object->variant.file_variant.top = NULL;
++ yaffs_trace(YAFFS_TRACE_GC,
++ "yaffs: About to finally delete object %d",
++ object->obj_id);
++ yaffs_generic_obj_del(object);
++ object->my_dev->n_deleted_files--;
++ }
++
++ }
++ chunks_after = yaffs_get_erased_chunks(dev);
++ if (chunks_before >= chunks_after)
++ yaffs_trace(YAFFS_TRACE_GC,
++ "gc did not increase free chunks before %d after %d",
++ chunks_before, chunks_after);
++ dev->gc_block = 0;
++ dev->gc_chunk = 0;
++ dev->n_clean_ups = 0;
++ }
++
++ dev->gc_disable = 0;
++
++ return ret_val;
++}
++
++/*
++ * find_gc_block() selects the dirtiest block (or close enough)
++ * for garbage collection.
++ */
++
++static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
++ int aggressive, int background)
++{
++ int i;
++ int iterations;
++ unsigned selected = 0;
++ int prioritised = 0;
++ int prioritised_exist = 0;
++ struct yaffs_block_info *bi;
++ int threshold;
++
++ /* First let's see if we need to grab a prioritised block */
++ if (dev->has_pending_prioritised_gc && !aggressive) {
++ dev->gc_dirtiest = 0;
++ bi = dev->block_info;
++ for (i = dev->internal_start_block;
++ i <= dev->internal_end_block && !selected; i++) {
++
++ if (bi->gc_prioritise) {
++ prioritised_exist = 1;
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
++ yaffs_block_ok_for_gc(dev, bi)) {
++ selected = i;
++ prioritised = 1;
++ }
++ }
++ bi++;
++ }
++
++ /*
++ * If there is a prioritised block and none was selected then
++ * this happened because there is at least one old dirty block
++ * gumming up the works. Let's gc the oldest dirty block.
++ */
++
++ if (prioritised_exist &&
++ !selected && dev->oldest_dirty_block > 0)
++ selected = dev->oldest_dirty_block;
++
++ if (!prioritised_exist) /* None found, so we can clear this */
++ dev->has_pending_prioritised_gc = 0;
++ }
++
++ /* If we're doing aggressive GC then we are happy to take a less-dirty
++ * block, and search harder.
++ * else (leasurely gc), then we only bother to do this if the
++ * block has only a few pages in use.
++ */
++
++ if (!selected) {
++ int pages_used;
++ int n_blocks =
++ dev->internal_end_block - dev->internal_start_block + 1;
++ if (aggressive) {
++ threshold = dev->param.chunks_per_block;
++ iterations = n_blocks;
++ } else {
++ int max_threshold;
++
++ if (background)
++ max_threshold = dev->param.chunks_per_block / 2;
++ else
++ max_threshold = dev->param.chunks_per_block / 8;
++
++ if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
++ max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
++
++ threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
++ if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
++ threshold = YAFFS_GC_PASSIVE_THRESHOLD;
++ if (threshold > max_threshold)
++ threshold = max_threshold;
++
++ iterations = n_blocks / 16 + 1;
++ if (iterations > 100)
++ iterations = 100;
++ }
++
++ for (i = 0;
++ i < iterations &&
++ (dev->gc_dirtiest < 1 ||
++ dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
++ i++) {
++ dev->gc_block_finder++;
++ if (dev->gc_block_finder < dev->internal_start_block ||
++ dev->gc_block_finder > dev->internal_end_block)
++ dev->gc_block_finder =
++ dev->internal_start_block;
++
++ bi = yaffs_get_block_info(dev, dev->gc_block_finder);
++
++ pages_used = bi->pages_in_use - bi->soft_del_pages;
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
++ pages_used < dev->param.chunks_per_block &&
++ (dev->gc_dirtiest < 1 ||
++ pages_used < dev->gc_pages_in_use) &&
++ yaffs_block_ok_for_gc(dev, bi)) {
++ dev->gc_dirtiest = dev->gc_block_finder;
++ dev->gc_pages_in_use = pages_used;
++ }
++ }
++
++ if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
++ selected = dev->gc_dirtiest;
++ }
++
++ /*
++ * If nothing has been selected for a while, try the oldest dirty
++ * because that's gumming up the works.
++ */
++
++ if (!selected && dev->param.is_yaffs2 &&
++ dev->gc_not_done >= (background ? 10 : 20)) {
++ yaffs2_find_oldest_dirty_seq(dev);
++ if (dev->oldest_dirty_block > 0) {
++ selected = dev->oldest_dirty_block;
++ dev->gc_dirtiest = selected;
++ dev->oldest_dirty_gc_count++;
++ bi = yaffs_get_block_info(dev, selected);
++ dev->gc_pages_in_use =
++ bi->pages_in_use - bi->soft_del_pages;
++ } else {
++ dev->gc_not_done = 0;
++ }
++ }
++
++ if (selected) {
++ yaffs_trace(YAFFS_TRACE_GC,
++ "GC Selected block %d with %d free, prioritised:%d",
++ selected,
++ dev->param.chunks_per_block - dev->gc_pages_in_use,
++ prioritised);
++
++ dev->n_gc_blocks++;
++ if (background)
++ dev->bg_gcs++;
++
++ dev->gc_dirtiest = 0;
++ dev->gc_pages_in_use = 0;
++ dev->gc_not_done = 0;
++ if (dev->refresh_skip > 0)
++ dev->refresh_skip--;
++ } else {
++ dev->gc_not_done++;
++ yaffs_trace(YAFFS_TRACE_GC,
++ "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
++ dev->gc_block_finder, dev->gc_not_done, threshold,
++ dev->gc_dirtiest, dev->gc_pages_in_use,
++ dev->oldest_dirty_block, background ? " bg" : "");
++ }
++
++ return selected;
++}
++
++/* New garbage collector
++ * If we're very low on erased blocks then we do aggressive garbage collection
++ * otherwise we do "leasurely" garbage collection.
++ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
++ * Passive gc only inspects smaller areas and only accepts more dirty blocks.
++ *
++ * The idea is to help clear out space in a more spread-out manner.
++ * Dunno if it really does anything useful.
++ */
++static int yaffs_check_gc(struct yaffs_dev *dev, int background)
++{
++ int aggressive = 0;
++ int gc_ok = YAFFS_OK;
++ int max_tries = 0;
++ int min_erased;
++ int erased_chunks;
++ int checkpt_block_adjust;
++
++ if (dev->param.gc_control_fn &&
++ (dev->param.gc_control_fn(dev) & 1) == 0)
++ return YAFFS_OK;
++
++ if (dev->gc_disable)
++ /* Bail out so we don't get recursive gc */
++ return YAFFS_OK;
++
++ /* This loop should pass the first time.
++ * Only loops here if the collection does not increase space.
++ */
++
++ do {
++ max_tries++;
++
++ checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
++
++ min_erased =
++ dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
++ erased_chunks =
++ dev->n_erased_blocks * dev->param.chunks_per_block;
++
++ /* If we need a block soon then do aggressive gc. */
++ if (dev->n_erased_blocks < min_erased)
++ aggressive = 1;
++ else {
++ if (!background
++ && erased_chunks > (dev->n_free_chunks / 4))
++ break;
++
++ if (dev->gc_skip > 20)
++ dev->gc_skip = 20;
++ if (erased_chunks < dev->n_free_chunks / 2 ||
++ dev->gc_skip < 1 || background)
++ aggressive = 0;
++ else {
++ dev->gc_skip--;
++ break;
++ }
++ }
++
++ dev->gc_skip = 5;
++
++ /* If we don't already have a block being gc'd then see if we
++ * should start another */
++
++ if (dev->gc_block < 1 && !aggressive) {
++ dev->gc_block = yaffs2_find_refresh_block(dev);
++ dev->gc_chunk = 0;
++ dev->n_clean_ups = 0;
++ }
++ if (dev->gc_block < 1) {
++ dev->gc_block =
++ yaffs_find_gc_block(dev, aggressive, background);
++ dev->gc_chunk = 0;
++ dev->n_clean_ups = 0;
++ }
++
++ if (dev->gc_block > 0) {
++ dev->all_gcs++;
++ if (!aggressive)
++ dev->passive_gc_count++;
++
++ yaffs_trace(YAFFS_TRACE_GC,
++ "yaffs: GC n_erased_blocks %d aggressive %d",
++ dev->n_erased_blocks, aggressive);
++
++ gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
++ }
++
++ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
++ dev->gc_block > 0) {
++ yaffs_trace(YAFFS_TRACE_GC,
++ "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
++ dev->n_erased_blocks, max_tries,
++ dev->gc_block);
++ }
++ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
++ (dev->gc_block > 0) && (max_tries < 2));
++
++ return aggressive ? gc_ok : YAFFS_OK;
++}
++
++/*
++ * yaffs_bg_gc()
++ * Garbage collects. Intended to be called from a background thread.
++ * Returns non-zero if at least half the free chunks are erased.
++ */
++int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
++{
++ int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
++
++ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
++
++ yaffs_check_gc(dev, 1);
++ return erased_chunks > dev->n_free_chunks / 2;
++}
++
++/*-------------------- Data file manipulation -----------------*/
++
++static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
++{
++ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
++
++ if (nand_chunk >= 0)
++ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
++ buffer, NULL);
++ else {
++ yaffs_trace(YAFFS_TRACE_NANDACCESS,
++ "Chunk %d not found zero instead",
++ nand_chunk);
++ /* get sane (zero) data if you read a hole */
++ memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
++ return 0;
++ }
++
++}
++
++void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
++ int lyn)
++{
++ int block;
++ int page;
++ struct yaffs_ext_tags tags;
++ struct yaffs_block_info *bi;
++
++ if (chunk_id <= 0)
++ return;
++
++ dev->n_deletions++;
++ block = chunk_id / dev->param.chunks_per_block;
++ page = chunk_id % dev->param.chunks_per_block;
++
++ if (!yaffs_check_chunk_bit(dev, block, page))
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Deleting invalid chunk %d", chunk_id);
++
++ bi = yaffs_get_block_info(dev, block);
++
++ yaffs2_update_oldest_dirty_seq(dev, block, bi);
++
++ yaffs_trace(YAFFS_TRACE_DELETION,
++ "line %d delete of chunk %d",
++ lyn, chunk_id);
++
++ if (!dev->param.is_yaffs2 && mark_flash &&
++ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
++
++ memset(&tags, 0, sizeof(tags));
++ tags.is_deleted = 1;
++ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
++ yaffs_handle_chunk_update(dev, chunk_id, &tags);
++ } else {
++ dev->n_unmarked_deletions++;
++ }
++
++ /* Pull out of the management area.
++ * If the whole block became dirty, this will kick off an erasure.
++ */
++ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
++ bi->block_state == YAFFS_BLOCK_STATE_FULL ||
++ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
++ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
++ dev->n_free_chunks++;
++ yaffs_clear_chunk_bit(dev, block, page);
++ bi->pages_in_use--;
++
++ if (bi->pages_in_use == 0 &&
++ !bi->has_shrink_hdr &&
++ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
++ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
++ yaffs_block_became_dirty(dev, block);
++ }
++ }
++}
++
++static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
++ const u8 *buffer, int n_bytes, int use_reserve)
++{
++ /* Find old chunk Need to do this to get serial number
++ * Write new one and patch into tree.
++ * Invalidate old tags.
++ */
++
++ int prev_chunk_id;
++ struct yaffs_ext_tags prev_tags;
++ int new_chunk_id;
++ struct yaffs_ext_tags new_tags;
++ struct yaffs_dev *dev = in->my_dev;
++
++ yaffs_check_gc(dev, 0);
++
++ /* Get the previous chunk at this location in the file if it exists.
++ * If it does not exist then put a zero into the tree. This creates
++ * the tnode now, rather than later when it is harder to clean up.
++ */
++ prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
++ if (prev_chunk_id < 1 &&
++ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
++ return 0;
++
++ /* Set up new tags */
++ memset(&new_tags, 0, sizeof(new_tags));
++
++ new_tags.chunk_id = inode_chunk;
++ new_tags.obj_id = in->obj_id;
++ new_tags.serial_number =
++ (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
++ new_tags.n_bytes = n_bytes;
++
++ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "Writing %d bytes to chunk!!!!!!!!!",
++ n_bytes);
++ BUG();
++ }
++
++ new_chunk_id =
++ yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
++
++ if (new_chunk_id > 0) {
++ yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
++
++ if (prev_chunk_id > 0)
++ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
++
++ yaffs_verify_file_sane(in);
++ }
++ return new_chunk_id;
++
++}
++
++
++
++static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
++ const YCHAR *name, const void *value, int size,
++ int flags)
++{
++ struct yaffs_xattr_mod xmod;
++ int result;
++
++ xmod.set = set;
++ xmod.name = name;
++ xmod.data = value;
++ xmod.size = size;
++ xmod.flags = flags;
++ xmod.result = -ENOSPC;
++
++ result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
++
++ if (result > 0)
++ return xmod.result;
++ else
++ return -ENOSPC;
++}
++
++static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
++ struct yaffs_xattr_mod *xmod)
++{
++ int retval = 0;
++ int x_offs = sizeof(struct yaffs_obj_hdr);
++ struct yaffs_dev *dev = obj->my_dev;
++ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
++ char *x_buffer = buffer + x_offs;
++
++ if (xmod->set)
++ retval =
++ nval_set(x_buffer, x_size, xmod->name, xmod->data,
++ xmod->size, xmod->flags);
++ else
++ retval = nval_del(x_buffer, x_size, xmod->name);
++
++ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
++ obj->xattr_known = 1;
++ xmod->result = retval;
++
++ return retval;
++}
++
++static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
++ void *value, int size)
++{
++ char *buffer = NULL;
++ int result;
++ struct yaffs_ext_tags tags;
++ struct yaffs_dev *dev = obj->my_dev;
++ int x_offs = sizeof(struct yaffs_obj_hdr);
++ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
++ char *x_buffer;
++ int retval = 0;
++
++ if (obj->hdr_chunk < 1)
++ return -ENODATA;
++
++ /* If we know that the object has no xattribs then don't do all the
++ * reading and parsing.
++ */
++ if (obj->xattr_known && !obj->has_xattr) {
++ if (name)
++ return -ENODATA;
++ else
++ return 0;
++ }
++
++ buffer = (char *)yaffs_get_temp_buffer(dev);
++ if (!buffer)
++ return -ENOMEM;
++
++ result =
++ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
++
++ if (result != YAFFS_OK)
++ retval = -ENOENT;
++ else {
++ x_buffer = buffer + x_offs;
++
++ if (!obj->xattr_known) {
++ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
++ obj->xattr_known = 1;
++ }
++
++ if (name)
++ retval = nval_get(x_buffer, x_size, name, value, size);
++ else
++ retval = nval_list(x_buffer, x_size, value, size);
++ }
++ yaffs_release_temp_buffer(dev, (u8 *) buffer);
++ return retval;
++}
++
++int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
++ const void *value, int size, int flags)
++{
++ return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
++}
++
++int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
++{
++ return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
++}
++
++int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
++ int size)
++{
++ return yaffs_do_xattrib_fetch(obj, name, value, size);
++}
++
++int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
++{
++ return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
++}
++
++static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
++{
++ u8 *buf;
++ struct yaffs_obj_hdr *oh;
++ struct yaffs_dev *dev;
++ struct yaffs_ext_tags tags;
++ int result;
++ int alloc_failed = 0;
++
++ if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
++ return;
++
++ dev = in->my_dev;
++ in->lazy_loaded = 0;
++ buf = yaffs_get_temp_buffer(dev);
++
++ result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
++ oh = (struct yaffs_obj_hdr *)buf;
++
++ in->yst_mode = oh->yst_mode;
++ yaffs_load_attribs(in, oh);
++ yaffs_set_obj_name_from_oh(in, oh);
++
++ if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
++ in->variant.symlink_variant.alias =
++ yaffs_clone_str(oh->alias);
++ if (!in->variant.symlink_variant.alias)
++ alloc_failed = 1; /* Not returned */
++ }
++ yaffs_release_temp_buffer(dev, buf);
++}
++
++static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
++ const YCHAR *oh_name, int buff_size)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++ if (dev->param.auto_unicode) {
++ if (*oh_name) {
++ /* It is an ASCII name, do an ASCII to
++ * unicode conversion */
++ const char *ascii_oh_name = (const char *)oh_name;
++ int n = buff_size - 1;
++ while (n > 0 && *ascii_oh_name) {
++ *name = *ascii_oh_name;
++ name++;
++ ascii_oh_name++;
++ n--;
++ }
++ } else {
++ strncpy(name, oh_name + 1, buff_size - 1);
++ }
++ } else {
++#else
++ (void) dev;
++ {
++#endif
++ strncpy(name, oh_name, buff_size - 1);
++ }
++}
++
++static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
++ const YCHAR *name)
++{
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++
++ int is_ascii;
++ YCHAR *w;
++
++ if (dev->param.auto_unicode) {
++
++ is_ascii = 1;
++ w = name;
++
++ /* Figure out if the name will fit in ascii character set */
++ while (is_ascii && *w) {
++ if ((*w) & 0xff00)
++ is_ascii = 0;
++ w++;
++ }
++
++ if (is_ascii) {
++ /* It is an ASCII name, so convert unicode to ascii */
++ char *ascii_oh_name = (char *)oh_name;
++ int n = YAFFS_MAX_NAME_LENGTH - 1;
++ while (n > 0 && *name) {
++ *ascii_oh_name = *name;
++ name++;
++ ascii_oh_name++;
++ n--;
++ }
++ } else {
++ /* Unicode name, so save starting at the second YCHAR */
++ *oh_name = 0;
++ strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
++ }
++ } else {
++#else
++ dev = dev;
++ {
++#endif
++ strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
++ }
++}
++
++/* UpdateObjectHeader updates the header on NAND for an object.
++ * If name is not NULL, then that new name is used.
++ */
++int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
++ int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
++{
++
++ struct yaffs_block_info *bi;
++ struct yaffs_dev *dev = in->my_dev;
++ int prev_chunk_id;
++ int ret_val = 0;
++ int result = 0;
++ int new_chunk_id;
++ struct yaffs_ext_tags new_tags;
++ struct yaffs_ext_tags old_tags;
++ const YCHAR *alias = NULL;
++ u8 *buffer = NULL;
++ YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
++ struct yaffs_obj_hdr *oh = NULL;
++ loff_t file_size = 0;
++
++ strcpy(old_name, _Y("silly old name"));
++
++ if (in->fake && in != dev->root_dir && !force && !xmod)
++ return ret_val;
++
++ yaffs_check_gc(dev, 0);
++ yaffs_check_obj_details_loaded(in);
++
++ buffer = yaffs_get_temp_buffer(in->my_dev);
++ oh = (struct yaffs_obj_hdr *)buffer;
++
++ prev_chunk_id = in->hdr_chunk;
++
++ if (prev_chunk_id > 0) {
++ result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
++ buffer, &old_tags);
++
++ yaffs_verify_oh(in, oh, &old_tags, 0);
++ memcpy(old_name, oh->name, sizeof(oh->name));
++ memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
++ } else {
++ memset(buffer, 0xff, dev->data_bytes_per_chunk);
++ }
++
++ oh->type = in->variant_type;
++ oh->yst_mode = in->yst_mode;
++ oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
++
++ yaffs_load_attribs_oh(oh, in);
++
++ if (in->parent)
++ oh->parent_obj_id = in->parent->obj_id;
++ else
++ oh->parent_obj_id = 0;
++
++ if (name && *name) {
++ memset(oh->name, 0, sizeof(oh->name));
++ yaffs_load_oh_from_name(dev, oh->name, name);
++ } else if (prev_chunk_id > 0) {
++ memcpy(oh->name, old_name, sizeof(oh->name));
++ } else {
++ memset(oh->name, 0, sizeof(oh->name));
++ }
++
++ oh->is_shrink = is_shrink;
++
++ switch (in->variant_type) {
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* Should not happen */
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
++ oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
++ file_size = in->variant.file_variant.file_size;
++ yaffs_oh_size_load(oh, file_size);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ oh->equiv_id = in->variant.hardlink_variant.equiv_id;
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ alias = in->variant.symlink_variant.alias;
++ if (!alias)
++ alias = _Y("no alias");
++ strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
++ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
++ break;
++ }
++
++ /* process any xattrib modifications */
++ if (xmod)
++ yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
++
++ /* Tags */
++ memset(&new_tags, 0, sizeof(new_tags));
++ in->serial++;
++ new_tags.chunk_id = 0;
++ new_tags.obj_id = in->obj_id;
++ new_tags.serial_number = in->serial;
++
++ /* Add extra info for file header */
++ new_tags.extra_available = 1;
++ new_tags.extra_parent_id = oh->parent_obj_id;
++ new_tags.extra_file_size = file_size;
++ new_tags.extra_is_shrink = oh->is_shrink;
++ new_tags.extra_equiv_id = oh->equiv_id;
++ new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
++ new_tags.extra_obj_type = in->variant_type;
++ yaffs_verify_oh(in, oh, &new_tags, 1);
++
++ /* Create new chunk in NAND */
++ new_chunk_id =
++ yaffs_write_new_chunk(dev, buffer, &new_tags,
++ (prev_chunk_id > 0) ? 1 : 0);
++
++ if (buffer)
++ yaffs_release_temp_buffer(dev, buffer);
++
++ if (new_chunk_id < 0)
++ return new_chunk_id;
++
++ in->hdr_chunk = new_chunk_id;
++
++ if (prev_chunk_id > 0)
++ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
++
++ if (!yaffs_obj_cache_dirty(in))
++ in->dirty = 0;
++
++ /* If this was a shrink, then mark the block
++ * that the chunk lives on */
++ if (is_shrink) {
++ bi = yaffs_get_block_info(in->my_dev,
++ new_chunk_id /
++ in->my_dev->param.chunks_per_block);
++ bi->has_shrink_hdr = 1;
++ }
++
++
++ return new_chunk_id;
++}
++
++/*--------------------- File read/write ------------------------
++ * Read and write have very similar structures.
++ * In general the read/write has three parts to it
++ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
++ * Some complete chunks
++ * An incomplete chunk to end off with
++ *
++ * Curve-balls: the first chunk might also be the last chunk.
++ */
++
++int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
++{
++ int chunk;
++ u32 start;
++ int n_copy;
++ int n = n_bytes;
++ int n_done = 0;
++ struct yaffs_cache *cache;
++ struct yaffs_dev *dev;
++
++ dev = in->my_dev;
++
++ while (n > 0) {
++ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
++ chunk++;
++
++ /* OK now check for the curveball where the start and end are in
++ * the same chunk.
++ */
++ if ((start + n) < dev->data_bytes_per_chunk)
++ n_copy = n;
++ else
++ n_copy = dev->data_bytes_per_chunk - start;
++
++ cache = yaffs_find_chunk_cache(in, chunk);
++
++ /* If the chunk is already in the cache or it is less than
++ * a whole chunk or we're using inband tags then use the cache
++ * (if there is caching) else bypass the cache.
++ */
++ if (cache || n_copy != dev->data_bytes_per_chunk ||
++ dev->param.inband_tags) {
++ if (dev->param.n_caches > 0) {
++
++ /* If we can't find the data in the cache,
++ * then load it up. */
++
++ if (!cache) {
++ cache =
++ yaffs_grab_chunk_cache(in->my_dev);
++ cache->object = in;
++ cache->chunk_id = chunk;
++ cache->dirty = 0;
++ cache->locked = 0;
++ yaffs_rd_data_obj(in, chunk,
++ cache->data);
++ cache->n_bytes = 0;
++ }
++
++ yaffs_use_cache(dev, cache, 0);
++
++ cache->locked = 1;
++
++ memcpy(buffer, &cache->data[start], n_copy);
++
++ cache->locked = 0;
++ } else {
++ /* Read into the local buffer then copy.. */
++
++ u8 *local_buffer =
++ yaffs_get_temp_buffer(dev);
++ yaffs_rd_data_obj(in, chunk, local_buffer);
++
++ memcpy(buffer, &local_buffer[start], n_copy);
++
++ yaffs_release_temp_buffer(dev, local_buffer);
++ }
++ } else {
++ /* A full chunk. Read directly into the buffer. */
++ yaffs_rd_data_obj(in, chunk, buffer);
++ }
++ n -= n_copy;
++ offset += n_copy;
++ buffer += n_copy;
++ n_done += n_copy;
++ }
++ return n_done;
++}
++
++int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
++ int n_bytes, int write_through)
++{
++
++ int chunk;
++ u32 start;
++ int n_copy;
++ int n = n_bytes;
++ int n_done = 0;
++ int n_writeback;
++ loff_t start_write = offset;
++ int chunk_written = 0;
++ u32 n_bytes_read;
++ loff_t chunk_start;
++ struct yaffs_dev *dev;
++
++ dev = in->my_dev;
++
++ while (n > 0 && chunk_written >= 0) {
++ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
++
++ if (((loff_t)chunk) *
++ dev->data_bytes_per_chunk + start != offset ||
++ start >= dev->data_bytes_per_chunk) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "AddrToChunk of offset %lld gives chunk %d start %d",
++ offset, chunk, start);
++ }
++ chunk++; /* File pos to chunk in file offset */
++
++ /* OK now check for the curveball where the start and end are in
++ * the same chunk.
++ */
++
++ if ((start + n) < dev->data_bytes_per_chunk) {
++ n_copy = n;
++
++ /* Now calculate how many bytes to write back....
++ * If we're overwriting and not writing to then end of
++ * file then we need to write back as much as was there
++ * before.
++ */
++
++ chunk_start = (((loff_t)(chunk - 1)) *
++ dev->data_bytes_per_chunk);
++
++ if (chunk_start > in->variant.file_variant.file_size)
++ n_bytes_read = 0; /* Past end of file */
++ else
++ n_bytes_read =
++ in->variant.file_variant.file_size -
++ chunk_start;
++
++ if (n_bytes_read > dev->data_bytes_per_chunk)
++ n_bytes_read = dev->data_bytes_per_chunk;
++
++ n_writeback =
++ (n_bytes_read >
++ (start + n)) ? n_bytes_read : (start + n);
++
++ if (n_writeback < 0 ||
++ n_writeback > dev->data_bytes_per_chunk)
++ BUG();
++
++ } else {
++ n_copy = dev->data_bytes_per_chunk - start;
++ n_writeback = dev->data_bytes_per_chunk;
++ }
++
++ if (n_copy != dev->data_bytes_per_chunk ||
++ !dev->param.cache_bypass_aligned ||
++ dev->param.inband_tags) {
++ /* An incomplete start or end chunk (or maybe both
++ * start and end chunk), or we're using inband tags,
++ * or we're forcing writes through the cache,
++ * so we want to use the cache buffers.
++ */
++ if (dev->param.n_caches > 0) {
++ struct yaffs_cache *cache;
++
++ /* If we can't find the data in the cache, then
++ * load the cache */
++ cache = yaffs_find_chunk_cache(in, chunk);
++
++ if (!cache &&
++ yaffs_check_alloc_available(dev, 1)) {
++ cache = yaffs_grab_chunk_cache(dev);
++ cache->object = in;
++ cache->chunk_id = chunk;
++ cache->dirty = 0;
++ cache->locked = 0;
++ yaffs_rd_data_obj(in, chunk,
++ cache->data);
++ } else if (cache &&
++ !cache->dirty &&
++ !yaffs_check_alloc_available(dev,
++ 1)) {
++ /* Drop the cache if it was a read cache
++ * item and no space check has been made
++ * for it.
++ */
++ cache = NULL;
++ }
++
++ if (cache) {
++ yaffs_use_cache(dev, cache, 1);
++ cache->locked = 1;
++
++ memcpy(&cache->data[start], buffer,
++ n_copy);
++
++ cache->locked = 0;
++ cache->n_bytes = n_writeback;
++
++ if (write_through) {
++ chunk_written =
++ yaffs_wr_data_obj
++ (cache->object,
++ cache->chunk_id,
++ cache->data,
++ cache->n_bytes, 1);
++ cache->dirty = 0;
++ }
++ } else {
++ chunk_written = -1; /* fail write */
++ }
++ } else {
++ /* An incomplete start or end chunk (or maybe
++ * both start and end chunk). Read into the
++ * local buffer then copy over and write back.
++ */
++
++ u8 *local_buffer = yaffs_get_temp_buffer(dev);
++
++ yaffs_rd_data_obj(in, chunk, local_buffer);
++ memcpy(&local_buffer[start], buffer, n_copy);
++
++ chunk_written =
++ yaffs_wr_data_obj(in, chunk,
++ local_buffer,
++ n_writeback, 0);
++
++ yaffs_release_temp_buffer(dev, local_buffer);
++ }
++ } else {
++ /* A full chunk. Write directly from the buffer. */
++
++ chunk_written =
++ yaffs_wr_data_obj(in, chunk, buffer,
++ dev->data_bytes_per_chunk, 0);
++
++ /* Since we've overwritten the cached data,
++ * we better invalidate it. */
++ yaffs_invalidate_chunk_cache(in, chunk);
++ }
++
++ if (chunk_written >= 0) {
++ n -= n_copy;
++ offset += n_copy;
++ buffer += n_copy;
++ n_done += n_copy;
++ }
++ }
++
++ /* Update file object */
++
++ if ((start_write + n_done) > in->variant.file_variant.file_size)
++ in->variant.file_variant.file_size = (start_write + n_done);
++
++ in->dirty = 1;
++ return n_done;
++}
++
++int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
++ int n_bytes, int write_through)
++{
++ yaffs2_handle_hole(in, offset);
++ return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through);
++}
++
++/* ---------------------- File resizing stuff ------------------ */
++
++static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size)
++{
++
++ struct yaffs_dev *dev = in->my_dev;
++ loff_t old_size = in->variant.file_variant.file_size;
++ int i;
++ int chunk_id;
++ u32 dummy;
++ int last_del;
++ int start_del;
++
++ if (old_size > 0)
++ yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy);
++ else
++ last_del = 0;
++
++ yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1,
++ &start_del, &dummy);
++ last_del++;
++ start_del++;
++
++ /* Delete backwards so that we don't end up with holes if
++ * power is lost part-way through the operation.
++ */
++ for (i = last_del; i >= start_del; i--) {
++ /* NB this could be optimised somewhat,
++ * eg. could retrieve the tags and write them without
++ * using yaffs_chunk_del
++ */
++
++ chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
++
++ if (chunk_id < 1)
++ continue;
++
++ if (chunk_id <
++ (dev->internal_start_block * dev->param.chunks_per_block) ||
++ chunk_id >=
++ ((dev->internal_end_block + 1) *
++ dev->param.chunks_per_block)) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "Found daft chunk_id %d for %d",
++ chunk_id, i);
++ } else {
++ in->n_data_chunks--;
++ yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
++ }
++ }
++}
++
++void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
++{
++ int new_full;
++ u32 new_partial;
++ struct yaffs_dev *dev = obj->my_dev;
++
++ yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
++
++ yaffs_prune_chunks(obj, new_size);
++
++ if (new_partial != 0) {
++ int last_chunk = 1 + new_full;
++ u8 *local_buffer = yaffs_get_temp_buffer(dev);
++
++ /* Rewrite the last chunk with its new size and zero pad */
++ yaffs_rd_data_obj(obj, last_chunk, local_buffer);
++ memset(local_buffer + new_partial, 0,
++ dev->data_bytes_per_chunk - new_partial);
++
++ yaffs_wr_data_obj(obj, last_chunk, local_buffer,
++ new_partial, 1);
++
++ yaffs_release_temp_buffer(dev, local_buffer);
++ }
++
++ obj->variant.file_variant.file_size = new_size;
++
++ yaffs_prune_tree(dev, &obj->variant.file_variant);
++}
++
++int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
++{
++ struct yaffs_dev *dev = in->my_dev;
++ loff_t old_size = in->variant.file_variant.file_size;
++
++ yaffs_flush_file_cache(in);
++ yaffs_invalidate_whole_cache(in);
++
++ yaffs_check_gc(dev, 0);
++
++ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
++ return YAFFS_FAIL;
++
++ if (new_size == old_size)
++ return YAFFS_OK;
++
++ if (new_size > old_size) {
++ yaffs2_handle_hole(in, new_size);
++ in->variant.file_variant.file_size = new_size;
++ } else {
++ /* new_size < old_size */
++ yaffs_resize_file_down(in, new_size);
++ }
++
++ /* Write a new object header to reflect the resize.
++ * show we've shrunk the file, if need be
++ * Do this only if the file is not in the deleted directories
++ * and is not shadowed.
++ */
++ if (in->parent &&
++ !in->is_shadowed &&
++ in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
++ in->parent->obj_id != YAFFS_OBJECTID_DELETED)
++ yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
++
++ return YAFFS_OK;
++}
++
++int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
++{
++ if (!in->dirty)
++ return YAFFS_OK;
++
++ yaffs_flush_file_cache(in);
++
++ if (data_sync)
++ return YAFFS_OK;
++
++ if (update_time)
++ yaffs_load_current_time(in, 0, 0);
++
++ return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ?
++ YAFFS_OK : YAFFS_FAIL;
++}
++
++
++/* yaffs_del_file deletes the whole file data
++ * and the inode associated with the file.
++ * It does not delete the links associated with the file.
++ */
++static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
++{
++ int ret_val;
++ int del_now = 0;
++ struct yaffs_dev *dev = in->my_dev;
++
++ if (!in->my_inode)
++ del_now = 1;
++
++ if (del_now) {
++ ret_val =
++ yaffs_change_obj_name(in, in->my_dev->del_dir,
++ _Y("deleted"), 0, 0);
++ yaffs_trace(YAFFS_TRACE_TRACING,
++ "yaffs: immediate deletion of file %d",
++ in->obj_id);
++ in->deleted = 1;
++ in->my_dev->n_deleted_files++;
++ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
++ yaffs_resize_file(in, 0);
++ yaffs_soft_del_file(in);
++ } else {
++ ret_val =
++ yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
++ _Y("unlinked"), 0, 0);
++ }
++ return ret_val;
++}
++
++static int yaffs_del_file(struct yaffs_obj *in)
++{
++ int ret_val = YAFFS_OK;
++ int deleted; /* Need to cache value on stack if in is freed */
++ struct yaffs_dev *dev = in->my_dev;
++
++ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
++ yaffs_resize_file(in, 0);
++
++ if (in->n_data_chunks > 0) {
++ /* Use soft deletion if there is data in the file.
++ * That won't be the case if it has been resized to zero.
++ */
++ if (!in->unlinked)
++ ret_val = yaffs_unlink_file_if_needed(in);
++
++ deleted = in->deleted;
++
++ if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
++ in->deleted = 1;
++ deleted = 1;
++ in->my_dev->n_deleted_files++;
++ yaffs_soft_del_file(in);
++ }
++ return deleted ? YAFFS_OK : YAFFS_FAIL;
++ } else {
++ /* The file has no data chunks so we toss it immediately */
++ yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
++ in->variant.file_variant.top = NULL;
++ yaffs_generic_obj_del(in);
++
++ return YAFFS_OK;
++ }
++}
++
++int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
++{
++ return (obj &&
++ obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
++ !(list_empty(&obj->variant.dir_variant.children));
++}
++
++static int yaffs_del_dir(struct yaffs_obj *obj)
++{
++ /* First check that the directory is empty. */
++ if (yaffs_is_non_empty_dir(obj))
++ return YAFFS_FAIL;
++
++ return yaffs_generic_obj_del(obj);
++}
++
++static int yaffs_del_symlink(struct yaffs_obj *in)
++{
++ kfree(in->variant.symlink_variant.alias);
++ in->variant.symlink_variant.alias = NULL;
++
++ return yaffs_generic_obj_del(in);
++}
++
++static int yaffs_del_link(struct yaffs_obj *in)
++{
++ /* remove this hardlink from the list associated with the equivalent
++ * object
++ */
++ list_del_init(&in->hard_links);
++ return yaffs_generic_obj_del(in);
++}
++
++int yaffs_del_obj(struct yaffs_obj *obj)
++{
++ int ret_val = -1;
++
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ ret_val = yaffs_del_file(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ if (!list_empty(&obj->variant.dir_variant.dirty)) {
++ yaffs_trace(YAFFS_TRACE_BACKGROUND,
++ "Remove object %d from dirty directories",
++ obj->obj_id);
++ list_del_init(&obj->variant.dir_variant.dirty);
++ }
++ return yaffs_del_dir(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ ret_val = yaffs_del_symlink(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ ret_val = yaffs_del_link(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ ret_val = yaffs_generic_obj_del(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ ret_val = 0;
++ break; /* should not happen. */
++ }
++ return ret_val;
++}
++
++
++static void yaffs_empty_dir_to_dir(struct yaffs_obj *from_dir,
++ struct yaffs_obj *to_dir)
++{
++ struct yaffs_obj *obj;
++ struct list_head *lh;
++ struct list_head *n;
++
++ list_for_each_safe(lh, n, &from_dir->variant.dir_variant.children) {
++ obj = list_entry(lh, struct yaffs_obj, siblings);
++ yaffs_add_obj_to_dir(to_dir, obj);
++ }
++}
++
++struct yaffs_obj *yaffs_retype_obj(struct yaffs_obj *obj,
++ enum yaffs_obj_type type)
++{
++ /* Tear down the old variant */
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ /* Nuke file data */
++ yaffs_resize_file(obj, 0);
++ yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
++ obj->variant.file_variant.top = NULL;
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Put the children in lost and found. */
++ yaffs_empty_dir_to_dir(obj, obj->my_dev->lost_n_found);
++ if (!list_empty(&obj->variant.dir_variant.dirty))
++ list_del_init(&obj->variant.dir_variant.dirty);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ /* Nuke symplink data */
++ kfree(obj->variant.symlink_variant.alias);
++ obj->variant.symlink_variant.alias = NULL;
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ list_del_init(&obj->hard_links);
++ break;
++ default:
++ break;
++ }
++
++ memset(&obj->variant, 0, sizeof(obj->variant));
++
++ /*Set up new variant if the memset is not enough. */
++ switch (type) {
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ INIT_LIST_HEAD(&obj->variant.dir_variant.children);
++ INIT_LIST_HEAD(&obj->variant.dir_variant.dirty);
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ default:
++ break;
++ }
++
++ obj->variant_type = type;
++
++ return obj;
++
++}
++
++static int yaffs_unlink_worker(struct yaffs_obj *obj)
++{
++ int del_now = 0;
++
++ if (!obj)
++ return YAFFS_FAIL;
++
++ if (!obj->my_inode)
++ del_now = 1;
++
++ yaffs_update_parent(obj->parent);
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
++ return yaffs_del_link(obj);
++ } else if (!list_empty(&obj->hard_links)) {
++ /* Curve ball: We're unlinking an object that has a hardlink.
++ *
++ * This problem arises because we are not strictly following
++ * The Linux link/inode model.
++ *
++ * We can't really delete the object.
++ * Instead, we do the following:
++ * - Select a hardlink.
++ * - Unhook it from the hard links
++ * - Move it from its parent directory so that the rename works.
++ * - Rename the object to the hardlink's name.
++ * - Delete the hardlink
++ */
++
++ struct yaffs_obj *hl;
++ struct yaffs_obj *parent;
++ int ret_val;
++ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
++
++ hl = list_entry(obj->hard_links.next, struct yaffs_obj,
++ hard_links);
++
++ yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
++ parent = hl->parent;
++
++ list_del_init(&hl->hard_links);
++
++ yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
++
++ ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
++
++ if (ret_val == YAFFS_OK)
++ ret_val = yaffs_generic_obj_del(hl);
++
++ return ret_val;
++
++ } else if (del_now) {
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ return yaffs_del_file(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ list_del_init(&obj->variant.dir_variant.dirty);
++ return yaffs_del_dir(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ return yaffs_del_symlink(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ return yaffs_generic_obj_del(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ default:
++ return YAFFS_FAIL;
++ }
++ } else if (yaffs_is_non_empty_dir(obj)) {
++ return YAFFS_FAIL;
++ } else {
++ return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
++ _Y("unlinked"), 0, 0);
++ }
++}
++
++static int yaffs_unlink_obj(struct yaffs_obj *obj)
++{
++ if (obj && obj->unlink_allowed)
++ return yaffs_unlink_worker(obj);
++
++ return YAFFS_FAIL;
++}
++
++int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name)
++{
++ struct yaffs_obj *obj;
++
++ obj = yaffs_find_by_name(dir, name);
++ return yaffs_unlink_obj(obj);
++}
++
++/* Note:
++ * If old_name is NULL then we take old_dir as the object to be renamed.
++ */
++int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name,
++ struct yaffs_obj *new_dir, const YCHAR *new_name)
++{
++ struct yaffs_obj *obj = NULL;
++ struct yaffs_obj *existing_target = NULL;
++ int force = 0;
++ int result;
++ struct yaffs_dev *dev;
++
++ if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ BUG();
++ return YAFFS_FAIL;
++ }
++ if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ BUG();
++ return YAFFS_FAIL;
++ }
++
++ dev = old_dir->my_dev;
++
++#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
++ /* Special case for case insemsitive systems.
++ * While look-up is case insensitive, the name isn't.
++ * Therefore we might want to change x.txt to X.txt
++ */
++ if (old_dir == new_dir &&
++ old_name && new_name &&
++ strcmp(old_name, new_name) == 0)
++ force = 1;
++#endif
++
++ if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
++ YAFFS_MAX_NAME_LENGTH)
++ /* ENAMETOOLONG */
++ return YAFFS_FAIL;
++
++ if (old_name)
++ obj = yaffs_find_by_name(old_dir, old_name);
++ else{
++ obj = old_dir;
++ old_dir = obj->parent;
++ }
++
++ if (obj && obj->rename_allowed) {
++ /* Now handle an existing target, if there is one */
++ existing_target = yaffs_find_by_name(new_dir, new_name);
++ if (yaffs_is_non_empty_dir(existing_target)) {
++ return YAFFS_FAIL; /* ENOTEMPTY */
++ } else if (existing_target && existing_target != obj) {
++ /* Nuke the target first, using shadowing,
++ * but only if it isn't the same object.
++ *
++ * Note we must disable gc here otherwise it can mess
++ * up the shadowing.
++ *
++ */
++ dev->gc_disable = 1;
++ yaffs_change_obj_name(obj, new_dir, new_name, force,
++ existing_target->obj_id);
++ existing_target->is_shadowed = 1;
++ yaffs_unlink_obj(existing_target);
++ dev->gc_disable = 0;
++ }
++
++ result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
++
++ yaffs_update_parent(old_dir);
++ if (new_dir != old_dir)
++ yaffs_update_parent(new_dir);
++
++ return result;
++ }
++ return YAFFS_FAIL;
++}
++
++/*----------------------- Initialisation Scanning ---------------------- */
++
++void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
++ int backward_scanning)
++{
++ struct yaffs_obj *obj;
++
++ if (backward_scanning) {
++ /* Handle YAFFS2 case (backward scanning)
++ * If the shadowed object exists then ignore.
++ */
++ obj = yaffs_find_by_number(dev, obj_id);
++ if (obj)
++ return;
++ }
++
++ /* Let's create it (if it does not exist) assuming it is a file so that
++ * it can do shrinking etc.
++ * We put it in unlinked dir to be cleaned up after the scanning
++ */
++ obj =
++ yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
++ if (!obj)
++ return;
++ obj->is_shadowed = 1;
++ yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
++ obj->variant.file_variant.shrink_size = 0;
++ obj->valid = 1; /* So that we don't read any other info. */
++}
++
++void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list)
++{
++ struct list_head *lh;
++ struct list_head *save;
++ struct yaffs_obj *hl;
++ struct yaffs_obj *in;
++
++ list_for_each_safe(lh, save, hard_list) {
++ hl = list_entry(lh, struct yaffs_obj, hard_links);
++ in = yaffs_find_by_number(dev,
++ hl->variant.hardlink_variant.equiv_id);
++
++ if (in) {
++ /* Add the hardlink pointers */
++ hl->variant.hardlink_variant.equiv_obj = in;
++ list_add(&hl->hard_links, &in->hard_links);
++ } else {
++ /* Todo Need to report/handle this better.
++ * Got a problem... hardlink to a non-existant object
++ */
++ hl->variant.hardlink_variant.equiv_obj = NULL;
++ INIT_LIST_HEAD(&hl->hard_links);
++ }
++ }
++}
++
++static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
++{
++ /*
++ * Sort out state of unlinked and deleted objects after scanning.
++ */
++ struct list_head *i;
++ struct list_head *n;
++ struct yaffs_obj *l;
++
++ if (dev->read_only)
++ return;
++
++ /* Soft delete all the unlinked files */
++ list_for_each_safe(i, n,
++ &dev->unlinked_dir->variant.dir_variant.children) {
++ l = list_entry(i, struct yaffs_obj, siblings);
++ yaffs_del_obj(l);
++ }
++
++ list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
++ l = list_entry(i, struct yaffs_obj, siblings);
++ yaffs_del_obj(l);
++ }
++}
++
++/*
++ * This code iterates through all the objects making sure that they are rooted.
++ * Any unrooted objects are re-rooted in lost+found.
++ * An object needs to be in one of:
++ * - Directly under deleted, unlinked
++ * - Directly or indirectly under root.
++ *
++ * Note:
++ * This code assumes that we don't ever change the current relationships
++ * between directories:
++ * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
++ * lost-n-found->parent == root_dir
++ *
++ * This fixes the problem where directories might have inadvertently been
++ * deleted leaving the object "hanging" without being rooted in the
++ * directory tree.
++ */
++
++static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
++{
++ return (obj == dev->del_dir ||
++ obj == dev->unlinked_dir || obj == dev->root_dir);
++}
++
++static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_obj *parent;
++ int i;
++ struct list_head *lh;
++ struct list_head *n;
++ int depth_limit;
++ int hanging;
++
++ if (dev->read_only)
++ return;
++
++ /* Iterate through the objects in each hash entry,
++ * looking at each object.
++ * Make sure it is rooted.
++ */
++
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
++ obj = list_entry(lh, struct yaffs_obj, hash_link);
++ parent = obj->parent;
++
++ if (yaffs_has_null_parent(dev, obj)) {
++ /* These directories are not hanging */
++ hanging = 0;
++ } else if (!parent ||
++ parent->variant_type !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
++ hanging = 1;
++ } else if (yaffs_has_null_parent(dev, parent)) {
++ hanging = 0;
++ } else {
++ /*
++ * Need to follow the parent chain to
++ * see if it is hanging.
++ */
++ hanging = 0;
++ depth_limit = 100;
++
++ while (parent != dev->root_dir &&
++ parent->parent &&
++ parent->parent->variant_type ==
++ YAFFS_OBJECT_TYPE_DIRECTORY &&
++ depth_limit > 0) {
++ parent = parent->parent;
++ depth_limit--;
++ }
++ if (parent != dev->root_dir)
++ hanging = 1;
++ }
++ if (hanging) {
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "Hanging object %d moved to lost and found",
++ obj->obj_id);
++ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
++ }
++ }
++ }
++}
++
++/*
++ * Delete directory contents for cleaning up lost and found.
++ */
++static void yaffs_del_dir_contents(struct yaffs_obj *dir)
++{
++ struct yaffs_obj *obj;
++ struct list_head *lh;
++ struct list_head *n;
++
++ if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
++ BUG();
++
++ list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
++ obj = list_entry(lh, struct yaffs_obj, siblings);
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
++ yaffs_del_dir_contents(obj);
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "Deleting lost_found object %d",
++ obj->obj_id);
++ yaffs_unlink_obj(obj);
++ }
++}
++
++static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
++{
++ yaffs_del_dir_contents(dev->lost_n_found);
++}
++
++
++struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
++ const YCHAR *name)
++{
++ int sum;
++ struct list_head *i;
++ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
++ struct yaffs_obj *l;
++
++ if (!name)
++ return NULL;
++
++ if (!directory) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "tragedy: yaffs_find_by_name: null pointer directory"
++ );
++ BUG();
++ return NULL;
++ }
++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "tragedy: yaffs_find_by_name: non-directory"
++ );
++ BUG();
++ }
++
++ sum = yaffs_calc_name_sum(name);
++
++ list_for_each(i, &directory->variant.dir_variant.children) {
++ l = list_entry(i, struct yaffs_obj, siblings);
++
++ if (l->parent != directory)
++ BUG();
++
++ yaffs_check_obj_details_loaded(l);
++
++ /* Special case for lost-n-found */
++ if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
++ if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
++ return l;
++ } else if (l->sum == sum || l->hdr_chunk <= 0) {
++ /* LostnFound chunk called Objxxx
++ * Do a real check
++ */
++ yaffs_get_obj_name(l, buffer,
++ YAFFS_MAX_NAME_LENGTH + 1);
++ if (!strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH))
++ return l;
++ }
++ }
++ return NULL;
++}
++
++/* GetEquivalentObject dereferences any hard links to get to the
++ * actual object.
++ */
++
++struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
++{
++ if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
++ obj = obj->variant.hardlink_variant.equiv_obj;
++ yaffs_check_obj_details_loaded(obj);
++ }
++ return obj;
++}
++
++/*
++ * A note or two on object names.
++ * * If the object name is missing, we then make one up in the form objnnn
++ *
++ * * ASCII names are stored in the object header's name field from byte zero
++ * * Unicode names are historically stored starting from byte zero.
++ *
++ * Then there are automatic Unicode names...
++ * The purpose of these is to save names in a way that can be read as
++ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
++ * system to share files.
++ *
++ * These automatic unicode are stored slightly differently...
++ * - If the name can fit in the ASCII character space then they are saved as
++ * ascii names as per above.
++ * - If the name needs Unicode then the name is saved in Unicode
++ * starting at oh->name[1].
++
++ */
++static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
++ int buffer_size)
++{
++ /* Create an object name if we could not find one. */
++ if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
++ YCHAR local_name[20];
++ YCHAR num_string[20];
++ YCHAR *x = &num_string[19];
++ unsigned v = obj->obj_id;
++ num_string[19] = 0;
++ while (v > 0) {
++ x--;
++ *x = '0' + (v % 10);
++ v /= 10;
++ }
++ /* make up a name */
++ strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
++ strcat(local_name, x);
++ strncpy(name, local_name, buffer_size - 1);
++ }
++}
++
++int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size)
++{
++ memset(name, 0, buffer_size * sizeof(YCHAR));
++ yaffs_check_obj_details_loaded(obj);
++ if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
++ strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
++ } else if (obj->short_name[0]) {
++ strcpy(name, obj->short_name);
++ } else if (obj->hdr_chunk > 0) {
++ int result;
++ u8 *buffer = yaffs_get_temp_buffer(obj->my_dev);
++
++ struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
++
++ memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
++
++ if (obj->hdr_chunk > 0) {
++ result = yaffs_rd_chunk_tags_nand(obj->my_dev,
++ obj->hdr_chunk,
++ buffer, NULL);
++ }
++ yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
++ buffer_size);
++
++ yaffs_release_temp_buffer(obj->my_dev, buffer);
++ }
++
++ yaffs_fix_null_name(obj, name, buffer_size);
++
++ return strnlen(name, YAFFS_MAX_NAME_LENGTH);
++}
++
++loff_t yaffs_get_obj_length(struct yaffs_obj *obj)
++{
++ /* Dereference any hard linking */
++ obj = yaffs_get_equivalent_obj(obj);
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ return obj->variant.file_variant.file_size;
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
++ if (!obj->variant.symlink_variant.alias)
++ return 0;
++ return strnlen(obj->variant.symlink_variant.alias,
++ YAFFS_MAX_ALIAS_LENGTH);
++ } else {
++ /* Only a directory should drop through to here */
++ return obj->my_dev->data_bytes_per_chunk;
++ }
++}
++
++int yaffs_get_obj_link_count(struct yaffs_obj *obj)
++{
++ int count = 0;
++ struct list_head *i;
++
++ if (!obj->unlinked)
++ count++; /* the object itself */
++
++ list_for_each(i, &obj->hard_links)
++ count++; /* add the hard links; */
++
++ return count;
++}
++
++int yaffs_get_obj_inode(struct yaffs_obj *obj)
++{
++ obj = yaffs_get_equivalent_obj(obj);
++
++ return obj->obj_id;
++}
++
++unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
++{
++ obj = yaffs_get_equivalent_obj(obj);
++
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ return DT_REG;
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ return DT_DIR;
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ return DT_LNK;
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ return DT_REG;
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ if (S_ISFIFO(obj->yst_mode))
++ return DT_FIFO;
++ if (S_ISCHR(obj->yst_mode))
++ return DT_CHR;
++ if (S_ISBLK(obj->yst_mode))
++ return DT_BLK;
++ if (S_ISSOCK(obj->yst_mode))
++ return DT_SOCK;
++ return DT_REG;
++ break;
++ default:
++ return DT_REG;
++ break;
++ }
++}
++
++YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
++{
++ obj = yaffs_get_equivalent_obj(obj);
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
++ return yaffs_clone_str(obj->variant.symlink_variant.alias);
++ else
++ return yaffs_clone_str(_Y(""));
++}
++
++/*--------------------------- Initialisation code -------------------------- */
++
++static int yaffs_check_dev_fns(struct yaffs_dev *dev)
++{
++ struct yaffs_driver *drv = &dev->drv;
++ struct yaffs_tags_handler *tagger = &dev->tagger;
++
++ /* Common functions, gotta have */
++ if (!drv->drv_read_chunk_fn ||
++ !drv->drv_write_chunk_fn ||
++ !drv->drv_erase_fn)
++ return 0;
++
++ if (dev->param.is_yaffs2 &&
++ (!drv->drv_mark_bad_fn || !drv->drv_check_bad_fn))
++ return 0;
++
++ /* Install the default tags marshalling functions if needed. */
++ yaffs_tags_compat_install(dev);
++ yaffs_tags_marshall_install(dev);
++
++ /* Check we now have the marshalling functions required. */
++ if (!tagger->write_chunk_tags_fn ||
++ !tagger->read_chunk_tags_fn ||
++ !tagger->query_block_fn ||
++ !tagger->mark_bad_fn)
++ return 0;
++
++ return 1;
++}
++
++static int yaffs_create_initial_dir(struct yaffs_dev *dev)
++{
++ /* Initialise the unlinked, deleted, root and lost+found directories */
++ dev->lost_n_found = dev->root_dir = NULL;
++ dev->unlinked_dir = dev->del_dir = NULL;
++ dev->unlinked_dir =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
++ dev->del_dir =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
++ dev->root_dir =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
++ YAFFS_ROOT_MODE | S_IFDIR);
++ dev->lost_n_found =
++ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
++ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
++
++ if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
++ && dev->del_dir) {
++ yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
++ return YAFFS_OK;
++ }
++ return YAFFS_FAIL;
++}
++
++/* Low level init.
++ * Typically only used by yaffs_guts_initialise, but also used by the
++ * Low level yaffs driver tests.
++ */
++
++int yaffs_guts_ll_init(struct yaffs_dev *dev)
++{
++
++
++ yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_ll_init()");
++
++ if (!dev) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: Need a device"
++ );
++ return YAFFS_FAIL;
++ }
++
++ if (dev->ll_init)
++ return YAFFS_OK;
++
++ dev->internal_start_block = dev->param.start_block;
++ dev->internal_end_block = dev->param.end_block;
++ dev->block_offset = 0;
++ dev->chunk_offset = 0;
++ dev->n_free_chunks = 0;
++
++ dev->gc_block = 0;
++
++ if (dev->param.start_block == 0) {
++ dev->internal_start_block = dev->param.start_block + 1;
++ dev->internal_end_block = dev->param.end_block + 1;
++ dev->block_offset = 1;
++ dev->chunk_offset = dev->param.chunks_per_block;
++ }
++
++ /* Check geometry parameters. */
++
++ if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
++ dev->param.total_bytes_per_chunk < 1024) ||
++ (!dev->param.is_yaffs2 &&
++ dev->param.total_bytes_per_chunk < 512) ||
++ (dev->param.inband_tags && !dev->param.is_yaffs2) ||
++ dev->param.chunks_per_block < 2 ||
++ dev->param.n_reserved_blocks < 2 ||
++ dev->internal_start_block <= 0 ||
++ dev->internal_end_block <= 0 ||
++ dev->internal_end_block <=
++ (dev->internal_start_block + dev->param.n_reserved_blocks + 2)
++ ) {
++ /* otherwise it is too small */
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
++ dev->param.total_bytes_per_chunk,
++ dev->param.is_yaffs2 ? "2" : "",
++ dev->param.inband_tags);
++ return YAFFS_FAIL;
++ }
++
++ /* Sort out space for inband tags, if required */
++ if (dev->param.inband_tags)
++ dev->data_bytes_per_chunk =
++ dev->param.total_bytes_per_chunk -
++ sizeof(struct yaffs_packed_tags2_tags_only);
++ else
++ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
++
++ /* Got the right mix of functions? */
++ if (!yaffs_check_dev_fns(dev)) {
++ /* Function missing */
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "device function(s) missing or wrong");
++
++ return YAFFS_FAIL;
++ }
++
++ if (yaffs_init_nand(dev) != YAFFS_OK) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
++ return YAFFS_FAIL;
++ }
++
++ return YAFFS_OK;
++}
++
++
++int yaffs_guts_format_dev(struct yaffs_dev *dev)
++{
++ int i;
++ enum yaffs_block_state state;
++ u32 dummy;
++
++ if(yaffs_guts_ll_init(dev) != YAFFS_OK)
++ return YAFFS_FAIL;
++
++ if(dev->is_mounted)
++ return YAFFS_FAIL;
++
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ yaffs_query_init_block_state(dev, i, &state, &dummy);
++ if (state != YAFFS_BLOCK_STATE_DEAD)
++ yaffs_erase_block(dev, i);
++ }
++
++ return YAFFS_OK;
++}
++
++
++int yaffs_guts_initialise(struct yaffs_dev *dev)
++{
++ int init_failed = 0;
++ unsigned x;
++ int bits;
++
++ if(yaffs_guts_ll_init(dev) != YAFFS_OK)
++ return YAFFS_FAIL;
++
++ if (dev->is_mounted) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
++ return YAFFS_FAIL;
++ }
++
++ dev->is_mounted = 1;
++
++ /* OK now calculate a few things for the device */
++
++ /*
++ * Calculate all the chunk size manipulation numbers:
++ */
++ x = dev->data_bytes_per_chunk;
++ /* We always use dev->chunk_shift and dev->chunk_div */
++ dev->chunk_shift = calc_shifts(x);
++ x >>= dev->chunk_shift;
++ dev->chunk_div = x;
++ /* We only use chunk mask if chunk_div is 1 */
++ dev->chunk_mask = (1 << dev->chunk_shift) - 1;
++
++ /*
++ * Calculate chunk_grp_bits.
++ * We need to find the next power of 2 > than internal_end_block
++ */
++
++ x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
++
++ bits = calc_shifts_ceiling(x);
++
++ /* Set up tnode width if wide tnodes are enabled. */
++ if (!dev->param.wide_tnodes_disabled) {
++ /* bits must be even so that we end up with 32-bit words */
++ if (bits & 1)
++ bits++;
++ if (bits < 16)
++ dev->tnode_width = 16;
++ else
++ dev->tnode_width = bits;
++ } else {
++ dev->tnode_width = 16;
++ }
++
++ dev->tnode_mask = (1 << dev->tnode_width) - 1;
++
++ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
++ * so if the bitwidth of the
++ * chunk range we're using is greater than 16 we need
++ * to figure out chunk shift and chunk_grp_size
++ */
++
++ if (bits <= dev->tnode_width)
++ dev->chunk_grp_bits = 0;
++ else
++ dev->chunk_grp_bits = bits - dev->tnode_width;
++
++ dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
++ if (dev->tnode_size < sizeof(struct yaffs_tnode))
++ dev->tnode_size = sizeof(struct yaffs_tnode);
++
++ dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
++
++ if (dev->param.chunks_per_block < dev->chunk_grp_size) {
++ /* We have a problem because the soft delete won't work if
++ * the chunk group size > chunks per block.
++ * This can be remedied by using larger "virtual blocks".
++ */
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
++
++ return YAFFS_FAIL;
++ }
++
++ /* Finished verifying the device, continue with initialisation */
++
++ /* More device initialisation */
++ dev->all_gcs = 0;
++ dev->passive_gc_count = 0;
++ dev->oldest_dirty_gc_count = 0;
++ dev->bg_gcs = 0;
++ dev->gc_block_finder = 0;
++ dev->buffered_block = -1;
++ dev->doing_buffered_block_rewrite = 0;
++ dev->n_deleted_files = 0;
++ dev->n_bg_deletions = 0;
++ dev->n_unlinked_files = 0;
++ dev->n_ecc_fixed = 0;
++ dev->n_ecc_unfixed = 0;
++ dev->n_tags_ecc_fixed = 0;
++ dev->n_tags_ecc_unfixed = 0;
++ dev->n_erase_failures = 0;
++ dev->n_erased_blocks = 0;
++ dev->gc_disable = 0;
++ dev->has_pending_prioritised_gc = 1;
++ /* Assume the worst for now, will get fixed on first GC */
++ INIT_LIST_HEAD(&dev->dirty_dirs);
++ dev->oldest_dirty_seq = 0;
++ dev->oldest_dirty_block = 0;
++
++ /* Initialise temporary buffers and caches. */
++ if (!yaffs_init_tmp_buffers(dev))
++ init_failed = 1;
++
++ dev->cache = NULL;
++ dev->gc_cleanup_list = NULL;
++
++ if (!init_failed && dev->param.n_caches > 0) {
++ int i;
++ void *buf;
++ int cache_bytes =
++ dev->param.n_caches * sizeof(struct yaffs_cache);
++
++ if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
++ dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
++
++ dev->cache = kmalloc(cache_bytes, GFP_NOFS);
++
++ buf = (u8 *) dev->cache;
++
++ if (dev->cache)
++ memset(dev->cache, 0, cache_bytes);
++
++ for (i = 0; i < dev->param.n_caches && buf; i++) {
++ dev->cache[i].object = NULL;
++ dev->cache[i].last_use = 0;
++ dev->cache[i].dirty = 0;
++ dev->cache[i].data = buf =
++ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
++ }
++ if (!buf)
++ init_failed = 1;
++
++ dev->cache_last_use = 0;
++ }
++
++ dev->cache_hits = 0;
++
++ if (!init_failed) {
++ dev->gc_cleanup_list =
++ kmalloc(dev->param.chunks_per_block * sizeof(u32),
++ GFP_NOFS);
++ if (!dev->gc_cleanup_list)
++ init_failed = 1;
++ }
++
++ if (dev->param.is_yaffs2)
++ dev->param.use_header_file_size = 1;
++
++ if (!init_failed && !yaffs_init_blocks(dev))
++ init_failed = 1;
++
++ yaffs_init_tnodes_and_objs(dev);
++
++ if (!init_failed && !yaffs_create_initial_dir(dev))
++ init_failed = 1;
++
++ if (!init_failed && dev->param.is_yaffs2 &&
++ !dev->param.disable_summary &&
++ !yaffs_summary_init(dev))
++ init_failed = 1;
++
++ if (!init_failed) {
++ /* Now scan the flash. */
++ if (dev->param.is_yaffs2) {
++ if (yaffs2_checkpt_restore(dev)) {
++ yaffs_check_obj_details_loaded(dev->root_dir);
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT |
++ YAFFS_TRACE_MOUNT,
++ "yaffs: restored from checkpoint"
++ );
++ } else {
++
++ /* Clean up the mess caused by an aborted
++ * checkpoint load then scan backwards.
++ */
++ yaffs_deinit_blocks(dev);
++
++ yaffs_deinit_tnodes_and_objs(dev);
++
++ dev->n_erased_blocks = 0;
++ dev->n_free_chunks = 0;
++ dev->alloc_block = -1;
++ dev->alloc_page = -1;
++ dev->n_deleted_files = 0;
++ dev->n_unlinked_files = 0;
++ dev->n_bg_deletions = 0;
++
++ if (!init_failed && !yaffs_init_blocks(dev))
++ init_failed = 1;
++
++ yaffs_init_tnodes_and_objs(dev);
++
++ if (!init_failed
++ && !yaffs_create_initial_dir(dev))
++ init_failed = 1;
++
++ if (!init_failed && !yaffs2_scan_backwards(dev))
++ init_failed = 1;
++ }
++ } else if (!yaffs1_scan(dev)) {
++ init_failed = 1;
++ }
++
++ yaffs_strip_deleted_objs(dev);
++ yaffs_fix_hanging_objs(dev);
++ if (dev->param.empty_lost_n_found)
++ yaffs_empty_l_n_f(dev);
++ }
++
++ if (init_failed) {
++ /* Clean up the mess */
++ yaffs_trace(YAFFS_TRACE_TRACING,
++ "yaffs: yaffs_guts_initialise() aborted.");
++
++ yaffs_deinitialise(dev);
++ return YAFFS_FAIL;
++ }
++
++ /* Zero out stats */
++ dev->n_page_reads = 0;
++ dev->n_page_writes = 0;
++ dev->n_erasures = 0;
++ dev->n_gc_copies = 0;
++ dev->n_retried_writes = 0;
++
++ dev->n_retired_blocks = 0;
++
++ yaffs_verify_free_chunks(dev);
++ yaffs_verify_blocks(dev);
++
++ /* Clean up any aborted checkpoint data */
++ if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
++ yaffs2_checkpt_invalidate(dev);
++
++ yaffs_trace(YAFFS_TRACE_TRACING,
++ "yaffs: yaffs_guts_initialise() done.");
++ return YAFFS_OK;
++}
++
++void yaffs_deinitialise(struct yaffs_dev *dev)
++{
++ if (dev->is_mounted) {
++ int i;
++
++ yaffs_deinit_blocks(dev);
++ yaffs_deinit_tnodes_and_objs(dev);
++ yaffs_summary_deinit(dev);
++
++ if (dev->param.n_caches > 0 && dev->cache) {
++
++ for (i = 0; i < dev->param.n_caches; i++) {
++ kfree(dev->cache[i].data);
++ dev->cache[i].data = NULL;
++ }
++
++ kfree(dev->cache);
++ dev->cache = NULL;
++ }
++
++ kfree(dev->gc_cleanup_list);
++
++ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
++ kfree(dev->temp_buffer[i].buffer);
++
++ dev->is_mounted = 0;
++
++ yaffs_deinit_nand(dev);
++ }
++}
++
++int yaffs_count_free_chunks(struct yaffs_dev *dev)
++{
++ int n_free = 0;
++ int b;
++ struct yaffs_block_info *blk;
++
++ blk = dev->block_info;
++ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
++ switch (blk->block_state) {
++ case YAFFS_BLOCK_STATE_EMPTY:
++ case YAFFS_BLOCK_STATE_ALLOCATING:
++ case YAFFS_BLOCK_STATE_COLLECTING:
++ case YAFFS_BLOCK_STATE_FULL:
++ n_free +=
++ (dev->param.chunks_per_block - blk->pages_in_use +
++ blk->soft_del_pages);
++ break;
++ default:
++ break;
++ }
++ blk++;
++ }
++ return n_free;
++}
++
++int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
++{
++ /* This is what we report to the outside world */
++ int n_free;
++ int n_dirty_caches;
++ int blocks_for_checkpt;
++ int i;
++
++ n_free = dev->n_free_chunks;
++ n_free += dev->n_deleted_files;
++
++ /* Now count and subtract the number of dirty chunks in the cache. */
++
++ for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
++ if (dev->cache[i].dirty)
++ n_dirty_caches++;
++ }
++
++ n_free -= n_dirty_caches;
++
++ n_free -=
++ ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
++
++ /* Now figure checkpoint space and report that... */
++ blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
++
++ n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
++
++ if (n_free < 0)
++ n_free = 0;
++
++ return n_free;
++}
++
++
++
++/*
++ * Marshalling functions to get loff_t file sizes into and out of
++ * object headers.
++ */
++void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize)
++{
++ oh->file_size_low = (fsize & 0xFFFFFFFF);
++ oh->file_size_high = ((fsize >> 32) & 0xFFFFFFFF);
++}
++
++loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh)
++{
++ loff_t retval;
++
++ if (sizeof(loff_t) >= 8 && ~(oh->file_size_high))
++ retval = (((loff_t) oh->file_size_high) << 32) |
++ (((loff_t) oh->file_size_low) & 0xFFFFFFFF);
++ else
++ retval = (loff_t) oh->file_size_low;
++
++ return retval;
++}
++
++
++void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10])
++{
++ int i;
++ struct yaffs_block_info *bi;
++ int s;
++
++ for(i = 0; i < 10; i++)
++ bs[i] = 0;
++
++ for(i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ bi = yaffs_get_block_info(dev, i);
++ s = bi->block_state;
++ if(s > YAFFS_BLOCK_STATE_DEAD || s < YAFFS_BLOCK_STATE_UNKNOWN)
++ bs[0]++;
++ else
++ bs[s]++;
++ }
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_guts.h linux-3.14.4/fs/yaffs2/yaffs_guts.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_guts.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_guts.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,1007 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_GUTS_H__
++#define __YAFFS_GUTS_H__
++
++#include "yportenv.h"
++
++#define YAFFS_OK 1
++#define YAFFS_FAIL 0
++
++/* Give us a Y=0x59,
++ * Give us an A=0x41,
++ * Give us an FF=0xff
++ * Give us an S=0x53
++ * And what have we got...
++ */
++#define YAFFS_MAGIC 0x5941ff53
++
++/*
++ * Tnodes form a tree with the tnodes in "levels"
++ * Levels greater than 0 hold 8 slots which point to other tnodes.
++ * Those at level 0 hold 16 slots which point to chunks in NAND.
++ *
++ * A maximum level of 8 thust supports files of size up to:
++ *
++ * 2^(3*MAX_LEVEL+4)
++ *
++ * Thus a max level of 8 supports files with up to 2^^28 chunks which gives
++ * a maximum file size of around 512Gbytees with 2k chunks.
++ */
++#define YAFFS_NTNODES_LEVEL0 16
++#define YAFFS_TNODES_LEVEL0_BITS 4
++#define YAFFS_TNODES_LEVEL0_MASK 0xf
++
++#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2)
++#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1)
++#define YAFFS_TNODES_INTERNAL_MASK 0x7
++#define YAFFS_TNODES_MAX_LEVEL 8
++#define YAFFS_TNODES_MAX_BITS (YAFFS_TNODES_LEVEL0_BITS + \
++ YAFFS_TNODES_INTERNAL_BITS * \
++ YAFFS_TNODES_MAX_LEVEL)
++#define YAFFS_MAX_CHUNK_ID ((1 << YAFFS_TNODES_MAX_BITS) - 1)
++
++#define YAFFS_MAX_FILE_SIZE_32 0x7fffffff
++
++/* Constants for YAFFS1 mode */
++#define YAFFS_BYTES_PER_SPARE 16
++#define YAFFS_BYTES_PER_CHUNK 512
++#define YAFFS_CHUNK_SIZE_SHIFT 9
++#define YAFFS_CHUNKS_PER_BLOCK 32
++#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
++
++#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024
++#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32
++
++
++
++#define YAFFS_ALLOCATION_NOBJECTS 100
++#define YAFFS_ALLOCATION_NTNODES 100
++#define YAFFS_ALLOCATION_NLINKS 100
++
++#define YAFFS_NOBJECT_BUCKETS 256
++
++#define YAFFS_OBJECT_SPACE 0x40000
++#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE - 1)
++
++/* Binary data version stamps */
++#define YAFFS_SUMMARY_VERSION 1
++#define YAFFS_CHECKPOINT_VERSION 7
++
++#ifdef CONFIG_YAFFS_UNICODE
++#define YAFFS_MAX_NAME_LENGTH 127
++#define YAFFS_MAX_ALIAS_LENGTH 79
++#else
++#define YAFFS_MAX_NAME_LENGTH 255
++#define YAFFS_MAX_ALIAS_LENGTH 159
++#endif
++
++#define YAFFS_SHORT_NAME_LENGTH 15
++
++/* Some special object ids for pseudo objects */
++#define YAFFS_OBJECTID_ROOT 1
++#define YAFFS_OBJECTID_LOSTNFOUND 2
++#define YAFFS_OBJECTID_UNLINKED 3
++#define YAFFS_OBJECTID_DELETED 4
++
++/* Fake object Id for summary data */
++#define YAFFS_OBJECTID_SUMMARY 0x10
++
++/* Pseudo object ids for checkpointing */
++#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
++#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
++
++#define YAFFS_MAX_SHORT_OP_CACHES 20
++
++#define YAFFS_N_TEMP_BUFFERS 6
++
++/* We limit the number attempts at sucessfully saving a chunk of data.
++ * Small-page devices have 32 pages per block; large-page devices have 64.
++ * Default to something in the order of 5 to 10 blocks worth of chunks.
++ */
++#define YAFFS_WR_ATTEMPTS (5*64)
++
++/* Sequence numbers are used in YAFFS2 to determine block allocation order.
++ * The range is limited slightly to help distinguish bad numbers from good.
++ * This also allows us to perhaps in the future use special numbers for
++ * special purposes.
++ * EFFFFF00 allows the allocation of 8 blocks/second (~1Mbytes) for 15 years,
++ * and is a larger number than the lifetime of a 2GB device.
++ */
++#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
++#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xefffff00
++
++/* Special sequence number for bad block that failed to be marked bad */
++#define YAFFS_SEQUENCE_BAD_BLOCK 0xffff0000
++
++/* ChunkCache is used for short read/write operations.*/
++struct yaffs_cache {
++ struct yaffs_obj *object;
++ int chunk_id;
++ int last_use;
++ int dirty;
++ int n_bytes; /* Only valid if the cache is dirty */
++ int locked; /* Can't push out or flush while locked. */
++ u8 *data;
++};
++
++/* yaffs1 tags structures in RAM
++ * NB This uses bitfield. Bitfields should not straddle a u32 boundary
++ * otherwise the structure size will get blown out.
++ */
++
++struct yaffs_tags {
++ u32 chunk_id:20;
++ u32 serial_number:2;
++ u32 n_bytes_lsb:10;
++ u32 obj_id:18;
++ u32 ecc:12;
++ u32 n_bytes_msb:2;
++};
++
++union yaffs_tags_union {
++ struct yaffs_tags as_tags;
++ u8 as_bytes[8];
++};
++
++
++/* Stuff used for extended tags in YAFFS2 */
++
++enum yaffs_ecc_result {
++ YAFFS_ECC_RESULT_UNKNOWN,
++ YAFFS_ECC_RESULT_NO_ERROR,
++ YAFFS_ECC_RESULT_FIXED,
++ YAFFS_ECC_RESULT_UNFIXED
++};
++
++enum yaffs_obj_type {
++ YAFFS_OBJECT_TYPE_UNKNOWN,
++ YAFFS_OBJECT_TYPE_FILE,
++ YAFFS_OBJECT_TYPE_SYMLINK,
++ YAFFS_OBJECT_TYPE_DIRECTORY,
++ YAFFS_OBJECT_TYPE_HARDLINK,
++ YAFFS_OBJECT_TYPE_SPECIAL
++};
++
++#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
++
++struct yaffs_ext_tags {
++ unsigned chunk_used; /* Status of the chunk: used or unused */
++ unsigned obj_id; /* If 0 this is not used */
++ unsigned chunk_id; /* If 0 this is a header, else a data chunk */
++ unsigned n_bytes; /* Only valid for data chunks */
++
++ /* The following stuff only has meaning when we read */
++ enum yaffs_ecc_result ecc_result;
++ unsigned block_bad;
++
++ /* YAFFS 1 stuff */
++ unsigned is_deleted; /* The chunk is marked deleted */
++ unsigned serial_number; /* Yaffs1 2-bit serial number */
++
++ /* YAFFS2 stuff */
++ unsigned seq_number; /* The sequence number of this block */
++
++ /* Extra info if this is an object header (YAFFS2 only) */
++
++ unsigned extra_available; /* Extra info available if not zero */
++ unsigned extra_parent_id; /* The parent object */
++ unsigned extra_is_shrink; /* Is it a shrink header? */
++ unsigned extra_shadows; /* Does this shadow another object? */
++
++ enum yaffs_obj_type extra_obj_type; /* What object type? */
++
++ loff_t extra_file_size; /* Length if it is a file */
++ unsigned extra_equiv_id; /* Equivalent object for a hard link */
++};
++
++/* Spare structure for YAFFS1 */
++struct yaffs_spare {
++ u8 tb0;
++ u8 tb1;
++ u8 tb2;
++ u8 tb3;
++ u8 page_status; /* set to 0 to delete the chunk */
++ u8 block_status;
++ u8 tb4;
++ u8 tb5;
++ u8 ecc1[3];
++ u8 tb6;
++ u8 tb7;
++ u8 ecc2[3];
++};
++
++/*Special structure for passing through to mtd */
++struct yaffs_nand_spare {
++ struct yaffs_spare spare;
++ int eccres1;
++ int eccres2;
++};
++
++/* Block data in RAM */
++
++enum yaffs_block_state {
++ YAFFS_BLOCK_STATE_UNKNOWN = 0,
++
++ YAFFS_BLOCK_STATE_SCANNING,
++ /* Being scanned */
++
++ YAFFS_BLOCK_STATE_NEEDS_SCAN,
++ /* The block might have something on it (ie it is allocating or full,
++ * perhaps empty) but it needs to be scanned to determine its true
++ * state.
++ * This state is only valid during scanning.
++ * NB We tolerate empty because the pre-scanner might be incapable of
++ * deciding
++ * However, if this state is returned on a YAFFS2 device,
++ * then we expect a sequence number
++ */
++
++ YAFFS_BLOCK_STATE_EMPTY,
++ /* This block is empty */
++
++ YAFFS_BLOCK_STATE_ALLOCATING,
++ /* This block is partially allocated.
++ * At least one page holds valid data.
++ * This is the one currently being used for page
++ * allocation. Should never be more than one of these.
++ * If a block is only partially allocated at mount it is treated as
++ * full.
++ */
++
++ YAFFS_BLOCK_STATE_FULL,
++ /* All the pages in this block have been allocated.
++ * If a block was only partially allocated when mounted we treat
++ * it as fully allocated.
++ */
++
++ YAFFS_BLOCK_STATE_DIRTY,
++ /* The block was full and now all chunks have been deleted.
++ * Erase me, reuse me.
++ */
++
++ YAFFS_BLOCK_STATE_CHECKPOINT,
++ /* This block is assigned to holding checkpoint data. */
++
++ YAFFS_BLOCK_STATE_COLLECTING,
++ /* This block is being garbage collected */
++
++ YAFFS_BLOCK_STATE_DEAD
++ /* This block has failed and is not in use */
++};
++
++#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
++
++struct yaffs_block_info {
++
++ s32 soft_del_pages:10; /* number of soft deleted pages */
++ s32 pages_in_use:10; /* number of pages in use */
++ u32 block_state:4; /* One of the above block states. */
++ /* NB use unsigned because enum is sometimes
++ * an int */
++ u32 needs_retiring:1; /* Data has failed on this block, */
++ /*need to get valid data off and retire*/
++ u32 skip_erased_check:1;/* Skip the erased check on this block */
++ u32 gc_prioritise:1; /* An ECC check or blank check has failed.
++ Block should be prioritised for GC */
++ u32 chunk_error_strikes:3; /* How many times we've had ecc etc
++ failures on this block and tried to reuse it */
++ u32 has_summary:1; /* The block has a summary */
++
++ u32 has_shrink_hdr:1; /* This block has at least one shrink header */
++ u32 seq_number; /* block sequence number for yaffs2 */
++
++};
++
++/* -------------------------- Object structure -------------------------------*/
++/* This is the object structure as stored on NAND */
++
++struct yaffs_obj_hdr {
++ enum yaffs_obj_type type;
++
++ /* Apply to everything */
++ int parent_obj_id;
++ u16 sum_no_longer_used; /* checksum of name. No longer used */
++ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
++
++ /* The following apply to all object types except for hard links */
++ u32 yst_mode; /* protection */
++
++ u32 yst_uid;
++ u32 yst_gid;
++ u32 yst_atime;
++ u32 yst_mtime;
++ u32 yst_ctime;
++
++ /* File size applies to files only */
++ u32 file_size_low;
++
++ /* Equivalent object id applies to hard links only. */
++ int equiv_id;
++
++ /* Alias is for symlinks only. */
++ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
++
++ u32 yst_rdev; /* stuff for block and char devices (major/min) */
++
++ u32 win_ctime[2];
++ u32 win_atime[2];
++ u32 win_mtime[2];
++
++ u32 inband_shadowed_obj_id;
++ u32 inband_is_shrink;
++
++ u32 file_size_high;
++ u32 reserved[1];
++ int shadows_obj; /* This object header shadows the
++ specified object if > 0 */
++
++ /* is_shrink applies to object headers written when wemake a hole. */
++ u32 is_shrink;
++
++};
++
++/*--------------------------- Tnode -------------------------- */
++
++struct yaffs_tnode {
++ struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
++};
++
++/*------------------------ Object -----------------------------*/
++/* An object can be one of:
++ * - a directory (no data, has children links
++ * - a regular file (data.... not prunes :->).
++ * - a symlink [symbolic link] (the alias).
++ * - a hard link
++ */
++
++struct yaffs_file_var {
++ loff_t file_size;
++ loff_t scanned_size;
++ loff_t shrink_size;
++ int top_level;
++ struct yaffs_tnode *top;
++};
++
++struct yaffs_dir_var {
++ struct list_head children; /* list of child links */
++ struct list_head dirty; /* Entry for list of dirty directories */
++};
++
++struct yaffs_symlink_var {
++ YCHAR *alias;
++};
++
++struct yaffs_hardlink_var {
++ struct yaffs_obj *equiv_obj;
++ u32 equiv_id;
++};
++
++union yaffs_obj_var {
++ struct yaffs_file_var file_variant;
++ struct yaffs_dir_var dir_variant;
++ struct yaffs_symlink_var symlink_variant;
++ struct yaffs_hardlink_var hardlink_variant;
++};
++
++struct yaffs_obj {
++ u8 deleted:1; /* This should only apply to unlinked files. */
++ u8 soft_del:1; /* it has also been soft deleted */
++ u8 unlinked:1; /* An unlinked file.*/
++ u8 fake:1; /* A fake object has no presence on NAND. */
++ u8 rename_allowed:1; /* Some objects cannot be renamed. */
++ u8 unlink_allowed:1;
++ u8 dirty:1; /* the object needs to be written to flash */
++ u8 valid:1; /* When the file system is being loaded up, this
++ * object might be created before the data
++ * is available
++ * ie. file data chunks encountered before
++ * the header.
++ */
++ u8 lazy_loaded:1; /* This object has been lazy loaded and
++ * is missing some detail */
++
++ u8 defered_free:1; /* Object is removed from NAND, but is
++ * still in the inode cache.
++ * Free of object is defered.
++ * until the inode is released.
++ */
++ u8 being_created:1; /* This object is still being created
++ * so skip some verification checks. */
++ u8 is_shadowed:1; /* This object is shadowed on the way
++ * to being renamed. */
++
++ u8 xattr_known:1; /* We know if this has object has xattribs
++ * or not. */
++ u8 has_xattr:1; /* This object has xattribs.
++ * Only valid if xattr_known. */
++
++ u8 serial; /* serial number of chunk in NAND.*/
++ u16 sum; /* sum of the name to speed searching */
++
++ struct yaffs_dev *my_dev; /* The device I'm on */
++
++ struct list_head hash_link; /* list of objects in hash bucket */
++
++ struct list_head hard_links; /* hard linked object chain*/
++
++ /* directory structure stuff */
++ /* also used for linking up the free list */
++ struct yaffs_obj *parent;
++ struct list_head siblings;
++
++ /* Where's my object header in NAND? */
++ int hdr_chunk;
++
++ int n_data_chunks; /* Number of data chunks for this file. */
++
++ u32 obj_id; /* the object id value */
++
++ u32 yst_mode;
++
++ YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
++
++#ifdef CONFIG_YAFFS_WINCE
++ u32 win_ctime[2];
++ u32 win_mtime[2];
++ u32 win_atime[2];
++#else
++ u32 yst_uid;
++ u32 yst_gid;
++ u32 yst_atime;
++ u32 yst_mtime;
++ u32 yst_ctime;
++#endif
++
++ u32 yst_rdev;
++
++ void *my_inode;
++
++ enum yaffs_obj_type variant_type;
++
++ union yaffs_obj_var variant;
++
++};
++
++struct yaffs_obj_bucket {
++ struct list_head list;
++ int count;
++};
++
++/* yaffs_checkpt_obj holds the definition of an object as dumped
++ * by checkpointing.
++ */
++
++struct yaffs_checkpt_obj {
++ int struct_type;
++ u32 obj_id;
++ u32 parent_id;
++ int hdr_chunk;
++ enum yaffs_obj_type variant_type:3;
++ u8 deleted:1;
++ u8 soft_del:1;
++ u8 unlinked:1;
++ u8 fake:1;
++ u8 rename_allowed:1;
++ u8 unlink_allowed:1;
++ u8 serial;
++ int n_data_chunks;
++ loff_t size_or_equiv_obj;
++};
++
++/*--------------------- Temporary buffers ----------------
++ *
++ * These are chunk-sized working buffers. Each device has a few.
++ */
++
++struct yaffs_buffer {
++ u8 *buffer;
++ int in_use;
++};
++
++/*----------------- Device ---------------------------------*/
++
++struct yaffs_param {
++ const YCHAR *name;
++
++ /*
++ * Entry parameters set up way early. Yaffs sets up the rest.
++ * The structure should be zeroed out before use so that unused
++ * and default values are zero.
++ */
++
++ int inband_tags; /* Use unband tags */
++ u32 total_bytes_per_chunk; /* Should be >= 512, does not need to
++ be a power of 2 */
++ int chunks_per_block; /* does not need to be a power of 2 */
++ int spare_bytes_per_chunk; /* spare area size */
++ int start_block; /* Start block we're allowed to use */
++ int end_block; /* End block we're allowed to use */
++ int n_reserved_blocks; /* Tuneable so that we can reduce
++ * reserved blocks on NOR and RAM. */
++
++ int n_caches; /* If <= 0, then short op caching is disabled,
++ * else the number of short op caches.
++ */
++ int cache_bypass_aligned; /* If non-zero then bypass the cache for
++ * aligned writes.
++ */
++
++ int use_nand_ecc; /* Flag to decide whether or not to use
++ * NAND driver ECC on data (yaffs1) */
++ int tags_9bytes; /* Use 9 byte tags */
++ int no_tags_ecc; /* Flag to decide whether or not to do ECC
++ * on packed tags (yaffs2) */
++
++ int is_yaffs2; /* Use yaffs2 mode on this device */
++
++ int empty_lost_n_found; /* Auto-empty lost+found directory on mount */
++
++ int refresh_period; /* How often to check for a block refresh */
++
++ /* Checkpoint control. Can be set before or after initialisation */
++ u8 skip_checkpt_rd;
++ u8 skip_checkpt_wr;
++
++ int enable_xattr; /* Enable xattribs */
++
++ int max_objects; /*
++ * Set to limit the number of objects created.
++ * 0 = no limit.
++ */
++
++ /* The remove_obj_fn function must be supplied by OS flavours that
++ * need it.
++ * yaffs direct uses it to implement the faster readdir.
++ * Linux uses it to protect the directory during unlocking.
++ */
++ void (*remove_obj_fn) (struct yaffs_obj *obj);
++
++ /* Callback to mark the superblock dirty */
++ void (*sb_dirty_fn) (struct yaffs_dev *dev);
++
++ /* Callback to control garbage collection. */
++ unsigned (*gc_control_fn) (struct yaffs_dev *dev);
++
++ /* Debug control flags. Don't use unless you know what you're doing */
++ int use_header_file_size; /* Flag to determine if we should use
++ * file sizes from the header */
++ int disable_lazy_load; /* Disable lazy loading on this device */
++ int wide_tnodes_disabled; /* Set to disable wide tnodes */
++ int disable_soft_del; /* yaffs 1 only: Set to disable the use of
++ * softdeletion. */
++
++ int defered_dir_update; /* Set to defer directory updates */
++
++#ifdef CONFIG_YAFFS_AUTO_UNICODE
++ int auto_unicode;
++#endif
++ int always_check_erased; /* Force chunk erased check always on */
++
++ int disable_summary;
++ int disable_bad_block_marking;
++
++};
++
++struct yaffs_driver {
++ int (*drv_write_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
++ const u8 *data, int data_len,
++ const u8 *oob, int oob_len);
++ int (*drv_read_chunk_fn) (struct yaffs_dev *dev, int nand_chunk,
++ u8 *data, int data_len,
++ u8 *oob, int oob_len,
++ enum yaffs_ecc_result *ecc_result);
++ int (*drv_erase_fn) (struct yaffs_dev *dev, int block_no);
++ int (*drv_mark_bad_fn) (struct yaffs_dev *dev, int block_no);
++ int (*drv_check_bad_fn) (struct yaffs_dev *dev, int block_no);
++ int (*drv_initialise_fn) (struct yaffs_dev *dev);
++ int (*drv_deinitialise_fn) (struct yaffs_dev *dev);
++};
++
++struct yaffs_tags_handler {
++ int (*write_chunk_tags_fn) (struct yaffs_dev *dev,
++ int nand_chunk, const u8 *data,
++ const struct yaffs_ext_tags *tags);
++ int (*read_chunk_tags_fn) (struct yaffs_dev *dev,
++ int nand_chunk, u8 *data,
++ struct yaffs_ext_tags *tags);
++
++ int (*query_block_fn) (struct yaffs_dev *dev, int block_no,
++ enum yaffs_block_state *state,
++ u32 *seq_number);
++ int (*mark_bad_fn) (struct yaffs_dev *dev, int block_no);
++};
++
++struct yaffs_dev {
++ struct yaffs_param param;
++ struct yaffs_driver drv;
++ struct yaffs_tags_handler tagger;
++
++ /* Context storage. Holds extra OS specific data for this device */
++
++ void *os_context;
++ void *driver_context;
++
++ struct list_head dev_list;
++
++ int ll_init;
++ /* Runtime parameters. Set up by YAFFS. */
++ int data_bytes_per_chunk;
++
++ /* Non-wide tnode stuff */
++ u16 chunk_grp_bits; /* Number of bits that need to be resolved if
++ * the tnodes are not wide enough.
++ */
++ u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
++
++ /* Stuff to support wide tnodes */
++ u32 tnode_width;
++ u32 tnode_mask;
++ u32 tnode_size;
++
++ /* Stuff for figuring out file offset to chunk conversions */
++ u32 chunk_shift; /* Shift value */
++ u32 chunk_div; /* Divisor after shifting: 1 for 2^n sizes */
++ u32 chunk_mask; /* Mask to use for power-of-2 case */
++
++ int is_mounted;
++ int read_only;
++ int is_checkpointed;
++
++ /* Stuff to support block offsetting to support start block zero */
++ int internal_start_block;
++ int internal_end_block;
++ int block_offset;
++ int chunk_offset;
++
++ /* Runtime checkpointing stuff */
++ int checkpt_page_seq; /* running sequence number of checkpt pages */
++ int checkpt_byte_count;
++ int checkpt_byte_offs;
++ u8 *checkpt_buffer;
++ int checkpt_open_write;
++ int blocks_in_checkpt;
++ int checkpt_cur_chunk;
++ int checkpt_cur_block;
++ int checkpt_next_block;
++ int *checkpt_block_list;
++ int checkpt_max_blocks;
++ u32 checkpt_sum;
++ u32 checkpt_xor;
++
++ int checkpoint_blocks_required; /* Number of blocks needed to store
++ * current checkpoint set */
++
++ /* Block Info */
++ struct yaffs_block_info *block_info;
++ u8 *chunk_bits; /* bitmap of chunks in use */
++ u8 block_info_alt:1; /* allocated using alternative alloc */
++ u8 chunk_bits_alt:1; /* allocated using alternative alloc */
++ int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
++ * Must be consistent with chunks_per_block.
++ */
++
++ int n_erased_blocks;
++ int alloc_block; /* Current block being allocated off */
++ u32 alloc_page;
++ int alloc_block_finder; /* Used to search for next allocation block */
++
++ /* Object and Tnode memory management */
++ void *allocator;
++ int n_obj;
++ int n_tnodes;
++
++ int n_hardlinks;
++
++ struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
++ u32 bucket_finder;
++
++ int n_free_chunks;
++
++ /* Garbage collection control */
++ u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */
++ u32 n_clean_ups;
++
++ unsigned has_pending_prioritised_gc; /* We think this device might
++ have pending prioritised gcs */
++ unsigned gc_disable;
++ unsigned gc_block_finder;
++ unsigned gc_dirtiest;
++ unsigned gc_pages_in_use;
++ unsigned gc_not_done;
++ unsigned gc_block;
++ unsigned gc_chunk;
++ unsigned gc_skip;
++ struct yaffs_summary_tags *gc_sum_tags;
++
++ /* Special directories */
++ struct yaffs_obj *root_dir;
++ struct yaffs_obj *lost_n_found;
++
++ int buffered_block; /* Which block is buffered here? */
++ int doing_buffered_block_rewrite;
++
++ struct yaffs_cache *cache;
++ int cache_last_use;
++
++ /* Stuff for background deletion and unlinked files. */
++ struct yaffs_obj *unlinked_dir; /* Directory where unlinked and deleted
++ files live. */
++ struct yaffs_obj *del_dir; /* Directory where deleted objects are
++ sent to disappear. */
++ struct yaffs_obj *unlinked_deletion; /* Current file being
++ background deleted. */
++ int n_deleted_files; /* Count of files awaiting deletion; */
++ int n_unlinked_files; /* Count of unlinked files. */
++ int n_bg_deletions; /* Count of background deletions. */
++
++ /* Temporary buffer management */
++ struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
++ int max_temp;
++ int temp_in_use;
++ int unmanaged_buffer_allocs;
++ int unmanaged_buffer_deallocs;
++
++ /* yaffs2 runtime stuff */
++ unsigned seq_number; /* Sequence number of currently
++ allocating block */
++ unsigned oldest_dirty_seq;
++ unsigned oldest_dirty_block;
++
++ /* Block refreshing */
++ int refresh_skip; /* A skip down counter.
++ * Refresh happens when this gets to zero. */
++
++ /* Dirty directory handling */
++ struct list_head dirty_dirs; /* List of dirty directories */
++
++ /* Summary */
++ int chunks_per_summary;
++ struct yaffs_summary_tags *sum_tags;
++
++ /* Statistics */
++ u32 n_page_writes;
++ u32 n_page_reads;
++ u32 n_erasures;
++ u32 n_bad_queries;
++ u32 n_bad_markings;
++ u32 n_erase_failures;
++ u32 n_gc_copies;
++ u32 all_gcs;
++ u32 passive_gc_count;
++ u32 oldest_dirty_gc_count;
++ u32 n_gc_blocks;
++ u32 bg_gcs;
++ u32 n_retried_writes;
++ u32 n_retired_blocks;
++ u32 n_ecc_fixed;
++ u32 n_ecc_unfixed;
++ u32 n_tags_ecc_fixed;
++ u32 n_tags_ecc_unfixed;
++ u32 n_deletions;
++ u32 n_unmarked_deletions;
++ u32 refresh_count;
++ u32 cache_hits;
++ u32 tags_used;
++ u32 summary_used;
++
++};
++
++/* The CheckpointDevice structure holds the device information that changes
++ *at runtime and must be preserved over unmount/mount cycles.
++ */
++struct yaffs_checkpt_dev {
++ int struct_type;
++ int n_erased_blocks;
++ int alloc_block; /* Current block being allocated off */
++ u32 alloc_page;
++ int n_free_chunks;
++
++ int n_deleted_files; /* Count of files awaiting deletion; */
++ int n_unlinked_files; /* Count of unlinked files. */
++ int n_bg_deletions; /* Count of background deletions. */
++
++ /* yaffs2 runtime stuff */
++ unsigned seq_number; /* Sequence number of currently
++ * allocating block */
++
++};
++
++struct yaffs_checkpt_validity {
++ int struct_type;
++ u32 magic;
++ u32 version;
++ u32 head;
++};
++
++struct yaffs_shadow_fixer {
++ int obj_id;
++ int shadowed_id;
++ struct yaffs_shadow_fixer *next;
++};
++
++/* Structure for doing xattr modifications */
++struct yaffs_xattr_mod {
++ int set; /* If 0 then this is a deletion */
++ const YCHAR *name;
++ const void *data;
++ int size;
++ int flags;
++ int result;
++};
++
++/*----------------------- YAFFS Functions -----------------------*/
++
++int yaffs_guts_initialise(struct yaffs_dev *dev);
++void yaffs_deinitialise(struct yaffs_dev *dev);
++
++int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
++
++int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
++ struct yaffs_obj *new_dir, const YCHAR * new_name);
++
++int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
++int yaffs_del_obj(struct yaffs_obj *obj);
++struct yaffs_obj *yaffs_retype_obj(struct yaffs_obj *obj,
++ enum yaffs_obj_type type);
++
++
++int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
++loff_t yaffs_get_obj_length(struct yaffs_obj *obj);
++int yaffs_get_obj_inode(struct yaffs_obj *obj);
++unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
++int yaffs_get_obj_link_count(struct yaffs_obj *obj);
++
++/* File operations */
++int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
++ int n_bytes);
++int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
++ int n_bytes, int write_trhrough);
++int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
++
++struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
++ const YCHAR *name, u32 mode, u32 uid,
++ u32 gid);
++
++int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync);
++
++/* Flushing and checkpointing */
++void yaffs_flush_whole_cache(struct yaffs_dev *dev);
++
++int yaffs_checkpoint_save(struct yaffs_dev *dev);
++int yaffs_checkpoint_restore(struct yaffs_dev *dev);
++
++/* Directory operations */
++struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
++ u32 mode, u32 uid, u32 gid);
++struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
++ const YCHAR *name);
++struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
++
++/* Link operations */
++struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR *name,
++ struct yaffs_obj *equiv_obj);
++
++struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
++
++/* Symlink operations */
++struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
++ const YCHAR *name, u32 mode, u32 uid,
++ u32 gid, const YCHAR *alias);
++YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
++
++/* Special inodes (fifos, sockets and devices) */
++struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
++ const YCHAR *name, u32 mode, u32 uid,
++ u32 gid, u32 rdev);
++
++int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR *name,
++ const void *value, int size, int flags);
++int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR *name, void *value,
++ int size);
++int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
++int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR *name);
++
++/* Special directories */
++struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
++struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
++
++void yaffs_handle_defered_free(struct yaffs_obj *obj);
++
++void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
++
++int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
++
++/* Debug dump */
++int yaffs_dump_obj(struct yaffs_obj *obj);
++
++void yaffs_guts_test(struct yaffs_dev *dev);
++int yaffs_guts_ll_init(struct yaffs_dev *dev);
++
++
++/* A few useful functions to be used within the core files*/
++void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
++ int lyn);
++int yaffs_check_ff(u8 *buffer, int n_bytes);
++void yaffs_handle_chunk_error(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi);
++
++u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev);
++void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer);
++
++struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
++ int number,
++ enum yaffs_obj_type type);
++int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
++ int nand_chunk, int in_scan);
++void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR *name);
++void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
++ const struct yaffs_obj_hdr *oh);
++void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
++YCHAR *yaffs_clone_str(const YCHAR *str);
++void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list);
++void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
++int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name,
++ int force, int is_shrink, int shadows,
++ struct yaffs_xattr_mod *xop);
++void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
++ int backward_scanning);
++int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
++struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
++struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
++ struct yaffs_file_var *file_struct,
++ u32 chunk_id,
++ struct yaffs_tnode *passed_tn);
++
++int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
++ int n_bytes, int write_trhrough);
++void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
++void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
++
++int yaffs_count_free_chunks(struct yaffs_dev *dev);
++
++struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
++ struct yaffs_file_var *file_struct,
++ u32 chunk_id);
++
++u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
++ unsigned pos);
++
++int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
++
++int yaffs_guts_format_dev(struct yaffs_dev *dev);
++
++void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
++ int *chunk_out, u32 *offset_out);
++/*
++ * Marshalling functions to get loff_t file sizes into aand out of
++ * object headers.
++ */
++void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize);
++loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh);
++loff_t yaffs_max_file_size(struct yaffs_dev *dev);
++
++/*
++ * Debug function to count number of blocks in each state
++ * NB Needs to be called with correct number of integers
++ */
++
++void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10]);
++
++int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
++ struct yaffs_ext_tags *tags);
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_linux.h linux-3.14.4/fs/yaffs2/yaffs_linux.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_linux.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_linux.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,48 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_LINUX_H__
++#define __YAFFS_LINUX_H__
++
++#include "yportenv.h"
++
++struct yaffs_linux_context {
++ struct list_head context_list; /* List of these we have mounted */
++ struct yaffs_dev *dev;
++ struct super_block *super;
++ struct task_struct *bg_thread; /* Background thread for this device */
++ int bg_running;
++ struct mutex gross_lock; /* Gross locking mutex*/
++ u8 *spare_buffer; /* For mtdif2 use. Don't know the buffer size
++ * at compile time so we have to allocate it.
++ */
++ struct list_head search_contexts;
++ struct task_struct *readdir_process;
++ unsigned mount_id;
++ int dirty;
++};
++
++#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context))
++#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++#define WRITE_SIZE_STR "writesize"
++#define WRITE_SIZE(mtd) ((mtd)->writesize)
++#else
++#define WRITE_SIZE_STR "oobblock"
++#define WRITE_SIZE(mtd) ((mtd)->oobblock)
++#endif
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_mtdif.c linux-3.14.4/fs/yaffs2/yaffs_mtdif.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_mtdif.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_mtdif.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,309 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yportenv.h"
++
++#include "yaffs_mtdif.h"
++
++#include "linux/mtd/mtd.h"
++#include "linux/types.h"
++#include "linux/time.h"
++#include "linux/major.h"
++#include "linux/mtd/nand.h"
++#include "linux/kernel.h"
++#include "linux/version.h"
++#include "linux/types.h"
++
++#include "yaffs_trace.h"
++#include "yaffs_guts.h"
++#include "yaffs_linux.h"
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
++#define MTD_OPS_AUTO_OOB MTD_OOB_AUTO
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
++#define mtd_erase(m, ei) (m)->erase(m, ei)
++#define mtd_write_oob(m, addr, pops) (m)->write_oob(m, addr, pops)
++#define mtd_read_oob(m, addr, pops) (m)->read_oob(m, addr, pops)
++#define mtd_block_isbad(m, offs) (m)->block_isbad(m, offs)
++#define mtd_block_markbad(m, offs) (m)->block_markbad(m, offs)
++#endif
++
++
++
++int nandmtd_erase_block(struct yaffs_dev *dev, int block_no)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ u32 addr =
++ ((loff_t) block_no) * dev->param.total_bytes_per_chunk *
++ dev->param.chunks_per_block;
++ struct erase_info ei;
++ int retval = 0;
++
++ ei.mtd = mtd;
++ ei.addr = addr;
++ ei.len = dev->param.total_bytes_per_chunk * dev->param.chunks_per_block;
++ ei.time = 1000;
++ ei.retries = 2;
++ ei.callback = NULL;
++ ei.priv = (u_long) dev;
++
++ retval = mtd_erase(mtd, &ei);
++
++ if (retval == 0)
++ return YAFFS_OK;
++
++ return YAFFS_FAIL;
++}
++
++
++static int yaffs_mtd_write(struct yaffs_dev *dev, int nand_chunk,
++ const u8 *data, int data_len,
++ const u8 *oob, int oob_len)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ loff_t addr;
++ struct mtd_oob_ops ops;
++ int retval;
++
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "yaffs_mtd_write(%p, %d, %p, %d, %p, %d)\n",
++ dev, nand_chunk, data, data_len, oob, oob_len);
++
++ if (!data || !data_len) {
++ data = NULL;
++ data_len = 0;
++ }
++
++ if (!oob || !oob_len) {
++ oob = NULL;
++ oob_len = 0;
++ }
++
++ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
++ memset(&ops, 0, sizeof(ops));
++ ops.mode = MTD_OPS_AUTO_OOB;
++ ops.len = (data) ? data_len : 0;
++ ops.ooblen = oob_len;
++ ops.datbuf = (u8 *)data;
++ ops.oobbuf = (u8 *)oob;
++
++ retval = mtd_write_oob(mtd, addr, &ops);
++ if (retval) {
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "write_oob failed, chunk %d, mtd error %d",
++ nand_chunk, retval);
++ }
++ return retval ? YAFFS_FAIL : YAFFS_OK;
++}
++
++static int yaffs_mtd_read(struct yaffs_dev *dev, int nand_chunk,
++ u8 *data, int data_len,
++ u8 *oob, int oob_len,
++ enum yaffs_ecc_result *ecc_result)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ loff_t addr;
++ struct mtd_oob_ops ops;
++ int retval;
++
++ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
++ memset(&ops, 0, sizeof(ops));
++ ops.mode = MTD_OPS_AUTO_OOB;
++ ops.len = (data) ? data_len : 0;
++ ops.ooblen = oob_len;
++ ops.datbuf = data;
++ ops.oobbuf = oob;
++
++#if (MTD_VERSION_CODE < MTD_VERSION(2, 6, 20))
++ /* In MTD 2.6.18 to 2.6.19 nand_base.c:nand_do_read_oob() has a bug;
++ * help it out with ops.len = ops.ooblen when ops.datbuf == NULL.
++ */
++ ops.len = (ops.datbuf) ? ops.len : ops.ooblen;
++#endif
++ /* Read page and oob using MTD.
++ * Check status and determine ECC result.
++ */
++ retval = mtd_read_oob(mtd, addr, &ops);
++ if (retval)
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "read_oob failed, chunk %d, mtd error %d",
++ nand_chunk, retval);
++
++ switch (retval) {
++ case 0:
++ /* no error */
++ if(ecc_result)
++ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++ break;
++
++ case -EUCLEAN:
++ /* MTD's ECC fixed the data */
++ if(ecc_result)
++ *ecc_result = YAFFS_ECC_RESULT_FIXED;
++ dev->n_ecc_fixed++;
++ break;
++
++ case -EBADMSG:
++ default:
++ /* MTD's ECC could not fix the data */
++ dev->n_ecc_unfixed++;
++ if(ecc_result)
++ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++ return YAFFS_FAIL;
++ }
++
++ return YAFFS_OK;
++}
++
++static int yaffs_mtd_erase(struct yaffs_dev *dev, int block_no)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++
++ loff_t addr;
++ struct erase_info ei;
++ int retval = 0;
++ u32 block_size;
++
++ block_size = dev->param.total_bytes_per_chunk *
++ dev->param.chunks_per_block;
++ addr = ((loff_t) block_no) * block_size;
++
++ ei.mtd = mtd;
++ ei.addr = addr;
++ ei.len = block_size;
++ ei.time = 1000;
++ ei.retries = 2;
++ ei.callback = NULL;
++ ei.priv = (u_long) dev;
++
++ retval = mtd_erase(mtd, &ei);
++
++ if (retval == 0)
++ return YAFFS_OK;
++
++ return YAFFS_FAIL;
++}
++
++static int yaffs_mtd_mark_bad(struct yaffs_dev *dev, int block_no)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ int blocksize = dev->param.chunks_per_block * dev->param.total_bytes_per_chunk;
++ int retval;
++
++ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS, "marking block %d bad", block_no);
++
++ retval = mtd_block_markbad(mtd, (loff_t) blocksize * block_no);
++ return (retval) ? YAFFS_FAIL : YAFFS_OK;
++}
++
++static int yaffs_mtd_check_bad(struct yaffs_dev *dev, int block_no)
++{
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++ int blocksize = dev->param.chunks_per_block * dev->param.total_bytes_per_chunk;
++ int retval;
++
++ yaffs_trace(YAFFS_TRACE_MTD, "checking block %d bad", block_no);
++
++ retval = mtd_block_isbad(mtd, (loff_t) blocksize * block_no);
++ return (retval) ? YAFFS_FAIL : YAFFS_OK;
++}
++
++static int yaffs_mtd_initialise(struct yaffs_dev *dev)
++{
++ return YAFFS_OK;
++}
++
++static int yaffs_mtd_deinitialise(struct yaffs_dev *dev)
++{
++ return YAFFS_OK;
++}
++
++
++void yaffs_mtd_drv_install(struct yaffs_dev *dev)
++{
++ struct yaffs_driver *drv = &dev->drv;
++
++ drv->drv_write_chunk_fn = yaffs_mtd_write;
++ drv->drv_read_chunk_fn = yaffs_mtd_read;
++ drv->drv_erase_fn = yaffs_mtd_erase;
++ drv->drv_mark_bad_fn = yaffs_mtd_mark_bad;
++ drv->drv_check_bad_fn = yaffs_mtd_check_bad;
++ drv->drv_initialise_fn = yaffs_mtd_initialise;
++ drv->drv_deinitialise_fn = yaffs_mtd_deinitialise;
++}
++
++
++struct mtd_info * yaffs_get_mtd_device(dev_t sdev)
++{
++ struct mtd_info *mtd;
++
++ mtd = yaffs_get_mtd_device(sdev);
++
++ /* Check it's an mtd device..... */
++ if (MAJOR(sdev) != MTD_BLOCK_MAJOR)
++ return NULL; /* This isn't an mtd device */
++
++ /* Check it's NAND */
++ if (mtd->type != MTD_NANDFLASH) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: MTD device is not NAND it's type %d",
++ mtd->type);
++ return NULL;
++ }
++
++ yaffs_trace(YAFFS_TRACE_OS, " %s %d", WRITE_SIZE_STR, WRITE_SIZE(mtd));
++ yaffs_trace(YAFFS_TRACE_OS, " oobsize %d", mtd->oobsize);
++ yaffs_trace(YAFFS_TRACE_OS, " erasesize %d", mtd->erasesize);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++ yaffs_trace(YAFFS_TRACE_OS, " size %u", mtd->size);
++#else
++ yaffs_trace(YAFFS_TRACE_OS, " size %lld", mtd->size);
++#endif
++
++ return mtd;
++}
++
++int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags)
++{
++ if (yaffs_version == 2) {
++ if ((WRITE_SIZE(mtd) < YAFFS_MIN_YAFFS2_CHUNK_SIZE ||
++ mtd->oobsize < YAFFS_MIN_YAFFS2_SPARE_SIZE) &&
++ !inband_tags) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "MTD device does not have the right page sizes"
++ );
++ return -1;
++ }
++ } else {
++ if (WRITE_SIZE(mtd) < YAFFS_BYTES_PER_CHUNK ||
++ mtd->oobsize != YAFFS_BYTES_PER_SPARE) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "MTD device does not support have the right page sizes"
++ );
++ return -1;
++ }
++ }
++
++ return 0;
++}
++
++
++void yaffs_put_mtd_device(struct mtd_info *mtd)
++{
++ if(mtd)
++ put_mtd_device(mtd);
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_mtdif.h linux-3.14.4/fs/yaffs2/yaffs_mtdif.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_mtdif.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_mtdif.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,25 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_MTDIF_H__
++#define __YAFFS_MTDIF_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_mtd_drv_install(struct yaffs_dev *dev);
++struct mtd_info * yaffs_get_mtd_device(dev_t sdev);
++void yaffs_put_mtd_device(struct mtd_info *mtd);
++int yaffs_verify_mtd(struct mtd_info *mtd, int yaffs_version, int inband_tags);
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_nameval.c linux-3.14.4/fs/yaffs2/yaffs_nameval.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_nameval.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_nameval.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,208 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ * This simple implementation of a name-value store assumes a small number of
++* values and fits into a small finite buffer.
++ *
++ * Each attribute is stored as a record:
++ * sizeof(int) bytes record size.
++ * strnlen+1 bytes name null terminated.
++ * nbytes value.
++ * ----------
++ * total size stored in record size
++ *
++ * This code has not been tested with unicode yet.
++ */
++
++#include "yaffs_nameval.h"
++
++#include "yportenv.h"
++
++static int nval_find(const char *xb, int xb_size, const YCHAR *name,
++ int *exist_size)
++{
++ int pos = 0;
++ int size;
++
++ memcpy(&size, xb, sizeof(int));
++ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
++ if (!strncmp((YCHAR *) (xb + pos + sizeof(int)),
++ name, size)) {
++ if (exist_size)
++ *exist_size = size;
++ return pos;
++ }
++ pos += size;
++ if (pos < xb_size - sizeof(int))
++ memcpy(&size, xb + pos, sizeof(int));
++ else
++ size = 0;
++ }
++ if (exist_size)
++ *exist_size = 0;
++ return -ENODATA;
++}
++
++static int nval_used(const char *xb, int xb_size)
++{
++ int pos = 0;
++ int size;
++
++ memcpy(&size, xb + pos, sizeof(int));
++ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
++ pos += size;
++ if (pos < xb_size - sizeof(int))
++ memcpy(&size, xb + pos, sizeof(int));
++ else
++ size = 0;
++ }
++ return pos;
++}
++
++int nval_del(char *xb, int xb_size, const YCHAR *name)
++{
++ int pos = nval_find(xb, xb_size, name, NULL);
++ int size;
++
++ if (pos < 0 || pos >= xb_size)
++ return -ENODATA;
++
++ /* Find size, shift rest over this record,
++ * then zero out the rest of buffer */
++ memcpy(&size, xb + pos, sizeof(int));
++ memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
++ memset(xb + (xb_size - size), 0, size);
++ return 0;
++}
++
++int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf,
++ int bsize, int flags)
++{
++ int pos;
++ int namelen = strnlen(name, xb_size);
++ int reclen;
++ int size_exist = 0;
++ int space;
++ int start;
++
++ pos = nval_find(xb, xb_size, name, &size_exist);
++
++ if (flags & XATTR_CREATE && pos >= 0)
++ return -EEXIST;
++ if (flags & XATTR_REPLACE && pos < 0)
++ return -ENODATA;
++
++ start = nval_used(xb, xb_size);
++ space = xb_size - start + size_exist;
++
++ reclen = (sizeof(int) + namelen + 1 + bsize);
++
++ if (reclen > space)
++ return -ENOSPC;
++
++ if (pos >= 0) {
++ nval_del(xb, xb_size, name);
++ start = nval_used(xb, xb_size);
++ }
++
++ pos = start;
++
++ memcpy(xb + pos, &reclen, sizeof(int));
++ pos += sizeof(int);
++ strncpy((YCHAR *) (xb + pos), name, reclen);
++ pos += (namelen + 1);
++ memcpy(xb + pos, buf, bsize);
++ return 0;
++}
++
++int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
++ int bsize)
++{
++ int pos = nval_find(xb, xb_size, name, NULL);
++ int size;
++
++ if (pos >= 0 && pos < xb_size) {
++
++ memcpy(&size, xb + pos, sizeof(int));
++ pos += sizeof(int); /* advance past record length */
++ size -= sizeof(int);
++
++ /* Advance over name string */
++ while (xb[pos] && size > 0 && pos < xb_size) {
++ pos++;
++ size--;
++ }
++ /*Advance over NUL */
++ pos++;
++ size--;
++
++ /* If bsize is zero then this is a size query.
++ * Return the size, but don't copy.
++ */
++ if (!bsize)
++ return size;
++
++ if (size <= bsize) {
++ memcpy(buf, xb + pos, size);
++ return size;
++ }
++ }
++ if (pos >= 0)
++ return -ERANGE;
++
++ return -ENODATA;
++}
++
++int nval_list(const char *xb, int xb_size, char *buf, int bsize)
++{
++ int pos = 0;
++ int size;
++ int name_len;
++ int ncopied = 0;
++ int filled = 0;
++
++ memcpy(&size, xb + pos, sizeof(int));
++ while (size > sizeof(int) &&
++ size <= xb_size &&
++ (pos + size) < xb_size &&
++ !filled) {
++ pos += sizeof(int);
++ size -= sizeof(int);
++ name_len = strnlen((YCHAR *) (xb + pos), size);
++ if (ncopied + name_len + 1 < bsize) {
++ memcpy(buf, xb + pos, name_len * sizeof(YCHAR));
++ buf += name_len;
++ *buf = '\0';
++ buf++;
++ if (sizeof(YCHAR) > 1) {
++ *buf = '\0';
++ buf++;
++ }
++ ncopied += (name_len + 1);
++ } else {
++ filled = 1;
++ }
++ pos += size;
++ if (pos < xb_size - sizeof(int))
++ memcpy(&size, xb + pos, sizeof(int));
++ else
++ size = 0;
++ }
++ return ncopied;
++}
++
++int nval_hasvalues(const char *xb, int xb_size)
++{
++ return nval_used(xb, xb_size) > 0;
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_nameval.h linux-3.14.4/fs/yaffs2/yaffs_nameval.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_nameval.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_nameval.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,28 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __NAMEVAL_H__
++#define __NAMEVAL_H__
++
++#include "yportenv.h"
++
++int nval_del(char *xb, int xb_size, const YCHAR * name);
++int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
++ int bsize, int flags);
++int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
++ int bsize);
++int nval_list(const char *xb, int xb_size, char *buf, int bsize);
++int nval_hasvalues(const char *xb, int xb_size);
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_nand.c linux-3.14.4/fs/yaffs2/yaffs_nand.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_nand.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_nand.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,122 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_nand.h"
++#include "yaffs_tagscompat.h"
++
++#include "yaffs_getblockinfo.h"
++#include "yaffs_summary.h"
++
++static int apply_chunk_offset(struct yaffs_dev *dev, int chunk)
++{
++ return chunk - dev->chunk_offset;
++}
++
++int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
++ u8 *buffer, struct yaffs_ext_tags *tags)
++{
++ int result;
++ struct yaffs_ext_tags local_tags;
++ int flash_chunk = apply_chunk_offset(dev, nand_chunk);
++
++ dev->n_page_reads++;
++
++ /* If there are no tags provided use local tags. */
++ if (!tags)
++ tags = &local_tags;
++
++ result = dev->tagger.read_chunk_tags_fn(dev, flash_chunk, buffer, tags);
++ if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
++
++ struct yaffs_block_info *bi;
++ bi = yaffs_get_block_info(dev,
++ nand_chunk /
++ dev->param.chunks_per_block);
++ yaffs_handle_chunk_error(dev, bi);
++ }
++ return result;
++}
++
++int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
++ int nand_chunk,
++ const u8 *buffer, struct yaffs_ext_tags *tags)
++{
++ int result;
++ int flash_chunk = apply_chunk_offset(dev, nand_chunk);
++
++ dev->n_page_writes++;
++
++ if (!tags) {
++ yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags");
++ BUG();
++ return YAFFS_FAIL;
++ }
++
++ tags->seq_number = dev->seq_number;
++ tags->chunk_used = 1;
++ yaffs_trace(YAFFS_TRACE_WRITE,
++ "Writing chunk %d tags %d %d",
++ nand_chunk, tags->obj_id, tags->chunk_id);
++
++ result = dev->tagger.write_chunk_tags_fn(dev, flash_chunk,
++ buffer, tags);
++
++ yaffs_summary_add(dev, tags, nand_chunk);
++
++ return result;
++}
++
++int yaffs_mark_bad(struct yaffs_dev *dev, int block_no)
++{
++ block_no -= dev->block_offset;
++ dev->n_bad_markings++;
++
++ if (dev->param.disable_bad_block_marking)
++ return YAFFS_OK;
++
++ return dev->tagger.mark_bad_fn(dev, block_no);
++}
++
++
++int yaffs_query_init_block_state(struct yaffs_dev *dev,
++ int block_no,
++ enum yaffs_block_state *state,
++ u32 *seq_number)
++{
++ block_no -= dev->block_offset;
++ return dev->tagger.query_block_fn(dev, block_no, state, seq_number);
++}
++
++int yaffs_erase_block(struct yaffs_dev *dev, int block_no)
++{
++ int result;
++
++ block_no -= dev->block_offset;
++ dev->n_erasures++;
++ result = dev->drv.drv_erase_fn(dev, block_no);
++ return result;
++}
++
++int yaffs_init_nand(struct yaffs_dev *dev)
++{
++ if (dev->drv.drv_initialise_fn)
++ return dev->drv.drv_initialise_fn(dev);
++ return YAFFS_OK;
++}
++
++int yaffs_deinit_nand(struct yaffs_dev *dev)
++{
++ if (dev->drv.drv_deinitialise_fn)
++ return dev->drv.drv_deinitialise_fn(dev);
++ return YAFFS_OK;
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_nand.h linux-3.14.4/fs/yaffs2/yaffs_nand.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_nand.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_nand.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_NAND_H__
++#define __YAFFS_NAND_H__
++#include "yaffs_guts.h"
++
++int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
++ u8 *buffer, struct yaffs_ext_tags *tags);
++
++int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
++ int nand_chunk,
++ const u8 *buffer, struct yaffs_ext_tags *tags);
++
++int yaffs_mark_bad(struct yaffs_dev *dev, int block_no);
++
++int yaffs_query_init_block_state(struct yaffs_dev *dev,
++ int block_no,
++ enum yaffs_block_state *state,
++ unsigned *seq_number);
++
++int yaffs_erase_block(struct yaffs_dev *dev, int flash_block);
++
++int yaffs_init_nand(struct yaffs_dev *dev);
++int yaffs_deinit_nand(struct yaffs_dev *dev);
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_packedtags1.c linux-3.14.4/fs/yaffs2/yaffs_packedtags1.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_packedtags1.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_packedtags1.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,56 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_packedtags1.h"
++#include "yportenv.h"
++
++static const u8 all_ff[20] = {
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff,
++ 0xff, 0xff, 0xff, 0xff
++};
++
++void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
++ const struct yaffs_ext_tags *t)
++{
++ pt->chunk_id = t->chunk_id;
++ pt->serial_number = t->serial_number;
++ pt->n_bytes = t->n_bytes;
++ pt->obj_id = t->obj_id;
++ pt->ecc = 0;
++ pt->deleted = (t->is_deleted) ? 0 : 1;
++ pt->unused_stuff = 0;
++ pt->should_be_ff = 0xffffffff;
++}
++
++void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
++ const struct yaffs_packed_tags1 *pt)
++{
++
++ if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) {
++ t->block_bad = 0;
++ if (pt->should_be_ff != 0xffffffff)
++ t->block_bad = 1;
++ t->chunk_used = 1;
++ t->obj_id = pt->obj_id;
++ t->chunk_id = pt->chunk_id;
++ t->n_bytes = pt->n_bytes;
++ t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++ t->is_deleted = (pt->deleted) ? 0 : 1;
++ t->serial_number = pt->serial_number;
++ } else {
++ memset(t, 0, sizeof(struct yaffs_ext_tags));
++ }
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_packedtags1.h linux-3.14.4/fs/yaffs2/yaffs_packedtags1.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_packedtags1.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_packedtags1.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
++
++#ifndef __YAFFS_PACKEDTAGS1_H__
++#define __YAFFS_PACKEDTAGS1_H__
++
++#include "yaffs_guts.h"
++
++struct yaffs_packed_tags1 {
++ u32 chunk_id:20;
++ u32 serial_number:2;
++ u32 n_bytes:10;
++ u32 obj_id:18;
++ u32 ecc:12;
++ u32 deleted:1;
++ u32 unused_stuff:1;
++ unsigned should_be_ff;
++
++};
++
++void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
++ const struct yaffs_ext_tags *t);
++void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
++ const struct yaffs_packed_tags1 *pt);
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_packedtags2.c linux-3.14.4/fs/yaffs2/yaffs_packedtags2.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_packedtags2.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_packedtags2.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,197 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_packedtags2.h"
++#include "yportenv.h"
++#include "yaffs_trace.h"
++
++/* This code packs a set of extended tags into a binary structure for
++ * NAND storage
++ */
++
++/* Some of the information is "extra" struff which can be packed in to
++ * speed scanning
++ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
++ */
++
++/* Extra flags applied to chunk_id */
++
++#define EXTRA_HEADER_INFO_FLAG 0x80000000
++#define EXTRA_SHRINK_FLAG 0x40000000
++#define EXTRA_SHADOWS_FLAG 0x20000000
++#define EXTRA_SPARE_FLAGS 0x10000000
++
++#define ALL_EXTRA_FLAGS 0xf0000000
++
++/* Also, the top 4 bits of the object Id are set to the object type. */
++#define EXTRA_OBJECT_TYPE_SHIFT (28)
++#define EXTRA_OBJECT_TYPE_MASK ((0x0f) << EXTRA_OBJECT_TYPE_SHIFT)
++
++static void yaffs_dump_packed_tags2_tags_only(
++ const struct yaffs_packed_tags2_tags_only *ptt)
++{
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "packed tags obj %d chunk %d byte %d seq %d",
++ ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number);
++}
++
++static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt)
++{
++ yaffs_dump_packed_tags2_tags_only(&pt->t);
++}
++
++static void yaffs_dump_tags2(const struct yaffs_ext_tags *t)
++{
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d",
++ t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
++ t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
++ t->seq_number);
++
++}
++
++static int yaffs_check_tags_extra_packable(const struct yaffs_ext_tags *t)
++{
++ if (t->chunk_id != 0 || !t->extra_available)
++ return 0;
++
++ /* Check if the file size is too long to store */
++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE &&
++ (t->extra_file_size >> 31) != 0)
++ return 0;
++ return 1;
++}
++
++void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt,
++ const struct yaffs_ext_tags *t)
++{
++ ptt->chunk_id = t->chunk_id;
++ ptt->seq_number = t->seq_number;
++ ptt->n_bytes = t->n_bytes;
++ ptt->obj_id = t->obj_id;
++
++ /* Only store extra tags for object headers.
++ * If it is a file then only store if the file size is short\
++ * enough to fit.
++ */
++ if (yaffs_check_tags_extra_packable(t)) {
++ /* Store the extra header info instead */
++ /* We save the parent object in the chunk_id */
++ ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id;
++ if (t->extra_is_shrink)
++ ptt->chunk_id |= EXTRA_SHRINK_FLAG;
++ if (t->extra_shadows)
++ ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
++
++ ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
++ ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
++
++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ ptt->n_bytes = t->extra_equiv_id;
++ else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
++ ptt->n_bytes = (unsigned) t->extra_file_size;
++ else
++ ptt->n_bytes = 0;
++ }
++
++ yaffs_dump_packed_tags2_tags_only(ptt);
++ yaffs_dump_tags2(t);
++}
++
++void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
++ const struct yaffs_ext_tags *t, int tags_ecc)
++{
++ yaffs_pack_tags2_tags_only(&pt->t, t);
++
++ if (tags_ecc)
++ yaffs_ecc_calc_other((unsigned char *)&pt->t,
++ sizeof(struct yaffs_packed_tags2_tags_only),
++ &pt->ecc);
++}
++
++void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
++ struct yaffs_packed_tags2_tags_only *ptt)
++{
++ memset(t, 0, sizeof(struct yaffs_ext_tags));
++
++ if (ptt->seq_number == 0xffffffff)
++ return;
++
++ t->block_bad = 0;
++ t->chunk_used = 1;
++ t->obj_id = ptt->obj_id;
++ t->chunk_id = ptt->chunk_id;
++ t->n_bytes = ptt->n_bytes;
++ t->is_deleted = 0;
++ t->serial_number = 0;
++ t->seq_number = ptt->seq_number;
++
++ /* Do extra header info stuff */
++ if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
++ t->chunk_id = 0;
++ t->n_bytes = 0;
++
++ t->extra_available = 1;
++ t->extra_parent_id = ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
++ t->extra_is_shrink = ptt->chunk_id & EXTRA_SHRINK_FLAG ? 1 : 0;
++ t->extra_shadows = ptt->chunk_id & EXTRA_SHADOWS_FLAG ? 1 : 0;
++ t->extra_obj_type = ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
++ t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
++
++ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ t->extra_equiv_id = ptt->n_bytes;
++ else
++ t->extra_file_size = ptt->n_bytes;
++ }
++ yaffs_dump_packed_tags2_tags_only(ptt);
++ yaffs_dump_tags2(t);
++}
++
++void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
++ int tags_ecc)
++{
++ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++
++ if (pt->t.seq_number != 0xffffffff && tags_ecc) {
++ /* Chunk is in use and we need to do ECC */
++
++ struct yaffs_ecc_other ecc;
++ int result;
++ yaffs_ecc_calc_other((unsigned char *)&pt->t,
++ sizeof(struct yaffs_packed_tags2_tags_only),
++ &ecc);
++ result =
++ yaffs_ecc_correct_other((unsigned char *)&pt->t,
++ sizeof(struct yaffs_packed_tags2_tags_only),
++ &pt->ecc, &ecc);
++ switch (result) {
++ case 0:
++ ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++ break;
++ case 1:
++ ecc_result = YAFFS_ECC_RESULT_FIXED;
++ break;
++ case -1:
++ ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++ break;
++ default:
++ ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
++ }
++ }
++ yaffs_unpack_tags2_tags_only(t, &pt->t);
++
++ t->ecc_result = ecc_result;
++
++ yaffs_dump_packed_tags2(pt);
++ yaffs_dump_tags2(t);
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_packedtags2.h linux-3.14.4/fs/yaffs2/yaffs_packedtags2.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_packedtags2.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_packedtags2.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,47 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
++
++#ifndef __YAFFS_PACKEDTAGS2_H__
++#define __YAFFS_PACKEDTAGS2_H__
++
++#include "yaffs_guts.h"
++#include "yaffs_ecc.h"
++
++struct yaffs_packed_tags2_tags_only {
++ unsigned seq_number;
++ unsigned obj_id;
++ unsigned chunk_id;
++ unsigned n_bytes;
++};
++
++struct yaffs_packed_tags2 {
++ struct yaffs_packed_tags2_tags_only t;
++ struct yaffs_ecc_other ecc;
++};
++
++/* Full packed tags with ECC, used for oob tags */
++void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
++ const struct yaffs_ext_tags *t, int tags_ecc);
++void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
++ int tags_ecc);
++
++/* Only the tags part (no ECC for use with inband tags */
++void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt,
++ const struct yaffs_ext_tags *t);
++void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
++ struct yaffs_packed_tags2_tags_only *pt);
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_summary.c linux-3.14.4/fs/yaffs2/yaffs_summary.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_summary.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_summary.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,312 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/* Summaries write the useful part of the tags for the chunks in a block into an
++ * an array which is written to the last n chunks of the block.
++ * Reading the summaries gives all the tags for the block in one read. Much
++ * faster.
++ *
++ * Chunks holding summaries are marked with tags making it look like
++ * they are part of a fake file.
++ *
++ * The summary could also be used during gc.
++ *
++ */
++
++#include "yaffs_summary.h"
++#include "yaffs_packedtags2.h"
++#include "yaffs_nand.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_bitmap.h"
++
++/*
++ * The summary is built up in an array of summary tags.
++ * This gets written to the last one or two (maybe more) chunks in a block.
++ * A summary header is written as the first part of each chunk of summary data.
++ * The summary header must match or the summary is rejected.
++ */
++
++/* Summary tags don't need the sequence number because that is redundant. */
++struct yaffs_summary_tags {
++ unsigned obj_id;
++ unsigned chunk_id;
++ unsigned n_bytes;
++};
++
++/* Summary header */
++struct yaffs_summary_header {
++ unsigned version; /* Must match current version */
++ unsigned block; /* Must be this block */
++ unsigned seq; /* Must be this sequence number */
++ unsigned sum; /* Just add up all the bytes in the tags */
++};
++
++
++static void yaffs_summary_clear(struct yaffs_dev *dev)
++{
++ if (!dev->sum_tags)
++ return;
++ memset(dev->sum_tags, 0, dev->chunks_per_summary *
++ sizeof(struct yaffs_summary_tags));
++}
++
++
++void yaffs_summary_deinit(struct yaffs_dev *dev)
++{
++ kfree(dev->sum_tags);
++ dev->sum_tags = NULL;
++ kfree(dev->gc_sum_tags);
++ dev->gc_sum_tags = NULL;
++ dev->chunks_per_summary = 0;
++}
++
++int yaffs_summary_init(struct yaffs_dev *dev)
++{
++ int sum_bytes;
++ int chunks_used; /* Number of chunks used by summary */
++ int sum_tags_bytes;
++
++ sum_bytes = dev->param.chunks_per_block *
++ sizeof(struct yaffs_summary_tags);
++
++ chunks_used = (sum_bytes + dev->data_bytes_per_chunk - 1)/
++ (dev->data_bytes_per_chunk -
++ sizeof(struct yaffs_summary_header));
++
++ dev->chunks_per_summary = dev->param.chunks_per_block - chunks_used;
++ sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
++ dev->chunks_per_summary;
++ dev->sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
++ dev->gc_sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
++ if (!dev->sum_tags || !dev->gc_sum_tags) {
++ yaffs_summary_deinit(dev);
++ return YAFFS_FAIL;
++ }
++
++ yaffs_summary_clear(dev);
++
++ return YAFFS_OK;
++}
++
++static unsigned yaffs_summary_sum(struct yaffs_dev *dev)
++{
++ u8 *sum_buffer = (u8 *)dev->sum_tags;
++ int i;
++ unsigned sum = 0;
++
++ i = sizeof(struct yaffs_summary_tags) *
++ dev->chunks_per_summary;
++ while (i > 0) {
++ sum += *sum_buffer;
++ sum_buffer++;
++ i--;
++ }
++
++ return sum;
++}
++
++static int yaffs_summary_write(struct yaffs_dev *dev, int blk)
++{
++ struct yaffs_ext_tags tags;
++ u8 *buffer;
++ u8 *sum_buffer = (u8 *)dev->sum_tags;
++ int n_bytes;
++ int chunk_in_nand;
++ int chunk_in_block;
++ int result;
++ int this_tx;
++ struct yaffs_summary_header hdr;
++ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
++
++ buffer = yaffs_get_temp_buffer(dev);
++ n_bytes = sizeof(struct yaffs_summary_tags) *
++ dev->chunks_per_summary;
++ memset(&tags, 0, sizeof(struct yaffs_ext_tags));
++ tags.obj_id = YAFFS_OBJECTID_SUMMARY;
++ tags.chunk_id = 1;
++ chunk_in_block = dev->chunks_per_summary;
++ chunk_in_nand = dev->alloc_block * dev->param.chunks_per_block +
++ dev->chunks_per_summary;
++ hdr.version = YAFFS_SUMMARY_VERSION;
++ hdr.block = blk;
++ hdr.seq = bi->seq_number;
++ hdr.sum = yaffs_summary_sum(dev);
++
++ do {
++ this_tx = n_bytes;
++ if (this_tx > sum_bytes_per_chunk)
++ this_tx = sum_bytes_per_chunk;
++ memcpy(buffer, &hdr, sizeof(hdr));
++ memcpy(buffer + sizeof(hdr), sum_buffer, this_tx);
++ tags.n_bytes = this_tx + sizeof(hdr);
++ result = yaffs_wr_chunk_tags_nand(dev, chunk_in_nand,
++ buffer, &tags);
++
++ if (result != YAFFS_OK)
++ break;
++ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
++ bi->pages_in_use++;
++ dev->n_free_chunks--;
++
++ n_bytes -= this_tx;
++ sum_buffer += this_tx;
++ chunk_in_nand++;
++ chunk_in_block++;
++ tags.chunk_id++;
++ } while (result == YAFFS_OK && n_bytes > 0);
++ yaffs_release_temp_buffer(dev, buffer);
++
++
++ if (result == YAFFS_OK)
++ bi->has_summary = 1;
++
++
++ return result;
++}
++
++int yaffs_summary_read(struct yaffs_dev *dev,
++ struct yaffs_summary_tags *st,
++ int blk)
++{
++ struct yaffs_ext_tags tags;
++ u8 *buffer;
++ u8 *sum_buffer = (u8 *)st;
++ int n_bytes;
++ int chunk_id;
++ int chunk_in_nand;
++ int chunk_in_block;
++ int result;
++ int this_tx;
++ struct yaffs_summary_header hdr;
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
++ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
++ int sum_tags_bytes;
++
++ sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
++ dev->chunks_per_summary;
++ buffer = yaffs_get_temp_buffer(dev);
++ n_bytes = sizeof(struct yaffs_summary_tags) * dev->chunks_per_summary;
++ chunk_in_block = dev->chunks_per_summary;
++ chunk_in_nand = blk * dev->param.chunks_per_block +
++ dev->chunks_per_summary;
++ chunk_id = 1;
++ do {
++ this_tx = n_bytes;
++ if (this_tx > sum_bytes_per_chunk)
++ this_tx = sum_bytes_per_chunk;
++ result = yaffs_rd_chunk_tags_nand(dev, chunk_in_nand,
++ buffer, &tags);
++
++ if (tags.chunk_id != chunk_id ||
++ tags.obj_id != YAFFS_OBJECTID_SUMMARY ||
++ tags.chunk_used == 0 ||
++ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
++ tags.n_bytes != (this_tx + sizeof(hdr)))
++ result = YAFFS_FAIL;
++ if (result != YAFFS_OK)
++ break;
++
++ if (st == dev->sum_tags) {
++ /* If we're scanning then update the block info */
++ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
++ bi->pages_in_use++;
++ }
++ memcpy(&hdr, buffer, sizeof(hdr));
++ memcpy(sum_buffer, buffer + sizeof(hdr), this_tx);
++ n_bytes -= this_tx;
++ sum_buffer += this_tx;
++ chunk_in_nand++;
++ chunk_in_block++;
++ chunk_id++;
++ } while (result == YAFFS_OK && n_bytes > 0);
++ yaffs_release_temp_buffer(dev, buffer);
++
++ if (result == YAFFS_OK) {
++ /* Verify header */
++ if (hdr.version != YAFFS_SUMMARY_VERSION ||
++ hdr.seq != bi->seq_number ||
++ hdr.sum != yaffs_summary_sum(dev))
++ result = YAFFS_FAIL;
++ }
++
++ if (st == dev->sum_tags && result == YAFFS_OK)
++ bi->has_summary = 1;
++
++ return result;
++}
++
++int yaffs_summary_add(struct yaffs_dev *dev,
++ struct yaffs_ext_tags *tags,
++ int chunk_in_nand)
++{
++ struct yaffs_packed_tags2_tags_only tags_only;
++ struct yaffs_summary_tags *sum_tags;
++ int block_in_nand = chunk_in_nand / dev->param.chunks_per_block;
++ int chunk_in_block = chunk_in_nand % dev->param.chunks_per_block;
++
++ if (!dev->sum_tags)
++ return YAFFS_OK;
++
++ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
++ yaffs_pack_tags2_tags_only(&tags_only, tags);
++ sum_tags = &dev->sum_tags[chunk_in_block];
++ sum_tags->chunk_id = tags_only.chunk_id;
++ sum_tags->n_bytes = tags_only.n_bytes;
++ sum_tags->obj_id = tags_only.obj_id;
++
++ if (chunk_in_block == dev->chunks_per_summary - 1) {
++ /* Time to write out the summary */
++ yaffs_summary_write(dev, block_in_nand);
++ yaffs_summary_clear(dev);
++ yaffs_skip_rest_of_block(dev);
++ }
++ }
++ return YAFFS_OK;
++}
++
++int yaffs_summary_fetch(struct yaffs_dev *dev,
++ struct yaffs_ext_tags *tags,
++ int chunk_in_block)
++{
++ struct yaffs_packed_tags2_tags_only tags_only;
++ struct yaffs_summary_tags *sum_tags;
++ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
++ sum_tags = &dev->sum_tags[chunk_in_block];
++ tags_only.chunk_id = sum_tags->chunk_id;
++ tags_only.n_bytes = sum_tags->n_bytes;
++ tags_only.obj_id = sum_tags->obj_id;
++ yaffs_unpack_tags2_tags_only(tags, &tags_only);
++ return YAFFS_OK;
++ }
++ return YAFFS_FAIL;
++}
++
++void yaffs_summary_gc(struct yaffs_dev *dev, int blk)
++{
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
++ int i;
++
++ if (!bi->has_summary)
++ return;
++
++ for (i = dev->chunks_per_summary;
++ i < dev->param.chunks_per_block;
++ i++) {
++ if (yaffs_check_chunk_bit(dev, blk, i)) {
++ yaffs_clear_chunk_bit(dev, blk, i);
++ bi->pages_in_use--;
++ dev->n_free_chunks++;
++ }
++ }
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_summary.h linux-3.14.4/fs/yaffs2/yaffs_summary.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_summary.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_summary.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,37 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_SUMMARY_H__
++#define __YAFFS_SUMMARY_H__
++
++#include "yaffs_packedtags2.h"
++
++
++int yaffs_summary_init(struct yaffs_dev *dev);
++void yaffs_summary_deinit(struct yaffs_dev *dev);
++
++int yaffs_summary_add(struct yaffs_dev *dev,
++ struct yaffs_ext_tags *tags,
++ int chunk_in_block);
++int yaffs_summary_fetch(struct yaffs_dev *dev,
++ struct yaffs_ext_tags *tags,
++ int chunk_in_block);
++int yaffs_summary_read(struct yaffs_dev *dev,
++ struct yaffs_summary_tags *st,
++ int blk);
++void yaffs_summary_gc(struct yaffs_dev *dev, int blk);
++
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_tagscompat.c linux-3.14.4/fs/yaffs2/yaffs_tagscompat.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_tagscompat.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_tagscompat.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,381 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_tagscompat.h"
++#include "yaffs_ecc.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_trace.h"
++
++static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
++
++
++/********** Tags ECC calculations *********/
++
++
++void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
++{
++ /* Calculate an ecc */
++ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
++ unsigned i, j;
++ unsigned ecc = 0;
++ unsigned bit = 0;
++
++ tags->ecc = 0;
++
++ for (i = 0; i < 8; i++) {
++ for (j = 1; j & 0xff; j <<= 1) {
++ bit++;
++ if (b[i] & j)
++ ecc ^= bit;
++ }
++ }
++ tags->ecc = ecc;
++}
++
++int yaffs_check_tags_ecc(struct yaffs_tags *tags)
++{
++ unsigned ecc = tags->ecc;
++
++ yaffs_calc_tags_ecc(tags);
++
++ ecc ^= tags->ecc;
++
++ if (ecc && ecc <= 64) {
++ /* TODO: Handle the failure better. Retire? */
++ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
++
++ ecc--;
++
++ b[ecc / 8] ^= (1 << (ecc & 7));
++
++ /* Now recvalc the ecc */
++ yaffs_calc_tags_ecc(tags);
++
++ return 1; /* recovered error */
++ } else if (ecc) {
++ /* Wierd ecc failure value */
++ /* TODO Need to do somethiong here */
++ return -1; /* unrecovered error */
++ }
++ return 0;
++}
++
++/********** Tags **********/
++
++static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
++ struct yaffs_tags *tags_ptr)
++{
++ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
++
++ yaffs_calc_tags_ecc(tags_ptr);
++
++ spare_ptr->tb0 = tu->as_bytes[0];
++ spare_ptr->tb1 = tu->as_bytes[1];
++ spare_ptr->tb2 = tu->as_bytes[2];
++ spare_ptr->tb3 = tu->as_bytes[3];
++ spare_ptr->tb4 = tu->as_bytes[4];
++ spare_ptr->tb5 = tu->as_bytes[5];
++ spare_ptr->tb6 = tu->as_bytes[6];
++ spare_ptr->tb7 = tu->as_bytes[7];
++}
++
++static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
++ struct yaffs_spare *spare_ptr,
++ struct yaffs_tags *tags_ptr)
++{
++ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
++ int result;
++
++ tu->as_bytes[0] = spare_ptr->tb0;
++ tu->as_bytes[1] = spare_ptr->tb1;
++ tu->as_bytes[2] = spare_ptr->tb2;
++ tu->as_bytes[3] = spare_ptr->tb3;
++ tu->as_bytes[4] = spare_ptr->tb4;
++ tu->as_bytes[5] = spare_ptr->tb5;
++ tu->as_bytes[6] = spare_ptr->tb6;
++ tu->as_bytes[7] = spare_ptr->tb7;
++
++ result = yaffs_check_tags_ecc(tags_ptr);
++ if (result > 0)
++ dev->n_tags_ecc_fixed++;
++ else if (result < 0)
++ dev->n_tags_ecc_unfixed++;
++}
++
++static void yaffs_spare_init(struct yaffs_spare *spare)
++{
++ memset(spare, 0xff, sizeof(struct yaffs_spare));
++}
++
++static int yaffs_wr_nand(struct yaffs_dev *dev,
++ int nand_chunk, const u8 *data,
++ struct yaffs_spare *spare)
++{
++ int data_size = dev->data_bytes_per_chunk;
++
++ return dev->drv.drv_write_chunk_fn(dev, nand_chunk,
++ data, data_size,
++ (u8 *) spare, sizeof(*spare));
++}
++
++static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
++ int nand_chunk,
++ u8 *data,
++ struct yaffs_spare *spare,
++ enum yaffs_ecc_result *ecc_result,
++ int correct_errors)
++{
++ int ret_val;
++ struct yaffs_spare local_spare;
++ int data_size;
++ int spare_size;
++ int ecc_result1, ecc_result2;
++ u8 calc_ecc[3];
++
++ if (!spare) {
++ /* If we don't have a real spare, then we use a local one. */
++ /* Need this for the calculation of the ecc */
++ spare = &local_spare;
++ }
++ data_size = dev->data_bytes_per_chunk;
++ spare_size = sizeof(struct yaffs_spare);
++
++ if (dev->param.use_nand_ecc)
++ return dev->drv.drv_read_chunk_fn(dev, nand_chunk,
++ data, data_size,
++ (u8 *) spare, spare_size,
++ ecc_result);
++
++
++ /* Handle the ECC at this level. */
++
++ ret_val = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
++ data, data_size,
++ (u8 *)spare, spare_size,
++ NULL);
++ if (!data || !correct_errors)
++ return ret_val;
++
++ /* Do ECC correction if needed. */
++ yaffs_ecc_calc(data, calc_ecc);
++ ecc_result1 = yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
++ yaffs_ecc_calc(&data[256], calc_ecc);
++ ecc_result2 = yaffs_ecc_correct(&data[256], spare->ecc2, calc_ecc);
++
++ if (ecc_result1 > 0) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>>yaffs ecc error fix performed on chunk %d:0",
++ nand_chunk);
++ dev->n_ecc_fixed++;
++ } else if (ecc_result1 < 0) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>>yaffs ecc error unfixed on chunk %d:0",
++ nand_chunk);
++ dev->n_ecc_unfixed++;
++ }
++
++ if (ecc_result2 > 0) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>>yaffs ecc error fix performed on chunk %d:1",
++ nand_chunk);
++ dev->n_ecc_fixed++;
++ } else if (ecc_result2 < 0) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "**>>yaffs ecc error unfixed on chunk %d:1",
++ nand_chunk);
++ dev->n_ecc_unfixed++;
++ }
++
++ if (ecc_result1 || ecc_result2) {
++ /* We had a data problem on this page */
++ yaffs_handle_rd_data_error(dev, nand_chunk);
++ }
++
++ if (ecc_result1 < 0 || ecc_result2 < 0)
++ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++ else if (ecc_result1 > 0 || ecc_result2 > 0)
++ *ecc_result = YAFFS_ECC_RESULT_FIXED;
++ else
++ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
++
++ return ret_val;
++}
++
++/*
++ * Functions for robustisizing
++ */
++
++static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
++{
++ int flash_block = nand_chunk / dev->param.chunks_per_block;
++
++ /* Mark the block for retirement */
++ yaffs_get_block_info(dev, flash_block + dev->block_offset)->
++ needs_retiring = 1;
++ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
++ "**>>Block %d marked for retirement",
++ flash_block);
++
++ /* TODO:
++ * Just do a garbage collection on the affected block
++ * then retire the block
++ * NB recursion
++ */
++}
++
++static int yaffs_tags_compat_wr(struct yaffs_dev *dev,
++ int nand_chunk,
++ const u8 *data, const struct yaffs_ext_tags *ext_tags)
++{
++ struct yaffs_spare spare;
++ struct yaffs_tags tags;
++
++ yaffs_spare_init(&spare);
++
++ if (ext_tags->is_deleted)
++ spare.page_status = 0;
++ else {
++ tags.obj_id = ext_tags->obj_id;
++ tags.chunk_id = ext_tags->chunk_id;
++
++ tags.n_bytes_lsb = ext_tags->n_bytes & (1024 - 1);
++
++ if (dev->data_bytes_per_chunk >= 1024)
++ tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
++ else
++ tags.n_bytes_msb = 3;
++
++ tags.serial_number = ext_tags->serial_number;
++
++ if (!dev->param.use_nand_ecc && data) {
++ yaffs_ecc_calc(data, spare.ecc1);
++ yaffs_ecc_calc(&data[256], spare.ecc2);
++ }
++
++ yaffs_load_tags_to_spare(&spare, &tags);
++ }
++ return yaffs_wr_nand(dev, nand_chunk, data, &spare);
++}
++
++static int yaffs_tags_compat_rd(struct yaffs_dev *dev,
++ int nand_chunk,
++ u8 *data, struct yaffs_ext_tags *ext_tags)
++{
++ struct yaffs_spare spare;
++ struct yaffs_tags tags;
++ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
++ static struct yaffs_spare spare_ff;
++ static int init;
++ int deleted;
++
++ if (!init) {
++ memset(&spare_ff, 0xff, sizeof(spare_ff));
++ init = 1;
++ }
++
++ if (!yaffs_rd_chunk_nand(dev, nand_chunk,
++ data, &spare, &ecc_result, 1))
++ return YAFFS_FAIL;
++
++ /* ext_tags may be NULL */
++ if (!ext_tags)
++ return YAFFS_OK;
++
++ deleted = (hweight8(spare.page_status) < 7) ? 1 : 0;
++
++ ext_tags->is_deleted = deleted;
++ ext_tags->ecc_result = ecc_result;
++ ext_tags->block_bad = 0; /* We're reading it */
++ /* therefore it is not a bad block */
++ ext_tags->chunk_used =
++ memcmp(&spare_ff, &spare, sizeof(spare_ff)) ? 1 : 0;
++
++ if (ext_tags->chunk_used) {
++ yaffs_get_tags_from_spare(dev, &spare, &tags);
++ ext_tags->obj_id = tags.obj_id;
++ ext_tags->chunk_id = tags.chunk_id;
++ ext_tags->n_bytes = tags.n_bytes_lsb;
++
++ if (dev->data_bytes_per_chunk >= 1024)
++ ext_tags->n_bytes |=
++ (((unsigned)tags.n_bytes_msb) << 10);
++
++ ext_tags->serial_number = tags.serial_number;
++ }
++
++ return YAFFS_OK;
++}
++
++static int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
++{
++ struct yaffs_spare spare;
++
++ memset(&spare, 0xff, sizeof(struct yaffs_spare));
++
++ spare.block_status = 'Y';
++
++ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
++ &spare);
++ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
++ NULL, &spare);
++
++ return YAFFS_OK;
++}
++
++static int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
++ int block_no,
++ enum yaffs_block_state *state,
++ u32 *seq_number)
++{
++ struct yaffs_spare spare0, spare1;
++ static struct yaffs_spare spare_ff;
++ static int init;
++ enum yaffs_ecc_result dummy;
++
++ if (!init) {
++ memset(&spare_ff, 0xff, sizeof(spare_ff));
++ init = 1;
++ }
++
++ *seq_number = 0;
++
++ /* Look for bad block markers in the first two chunks */
++ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block,
++ NULL, &spare0, &dummy, 0);
++ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
++ NULL, &spare1, &dummy, 0);
++
++ if (hweight8(spare0.block_status & spare1.block_status) < 7)
++ *state = YAFFS_BLOCK_STATE_DEAD;
++ else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
++ *state = YAFFS_BLOCK_STATE_EMPTY;
++ else
++ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
++
++ return YAFFS_OK;
++}
++
++void yaffs_tags_compat_install(struct yaffs_dev *dev)
++{
++ if(dev->param.is_yaffs2)
++ return;
++ if(!dev->tagger.write_chunk_tags_fn)
++ dev->tagger.write_chunk_tags_fn = yaffs_tags_compat_wr;
++ if(!dev->tagger.read_chunk_tags_fn)
++ dev->tagger.read_chunk_tags_fn = yaffs_tags_compat_rd;
++ if(!dev->tagger.query_block_fn)
++ dev->tagger.query_block_fn = yaffs_tags_compat_query_block;
++ if(!dev->tagger.mark_bad_fn)
++ dev->tagger.mark_bad_fn = yaffs_tags_compat_mark_bad;
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_tagscompat.h linux-3.14.4/fs/yaffs2/yaffs_tagscompat.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_tagscompat.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_tagscompat.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,44 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_TAGSCOMPAT_H__
++#define __YAFFS_TAGSCOMPAT_H__
++
++
++#include "yaffs_guts.h"
++
++#if 0
++
++
++int yaffs_tags_compat_wr(struct yaffs_dev *dev,
++ int nand_chunk,
++ const u8 *data, const struct yaffs_ext_tags *tags);
++int yaffs_tags_compat_rd(struct yaffs_dev *dev,
++ int nand_chunk,
++ u8 *data, struct yaffs_ext_tags *tags);
++int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no);
++int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
++ int block_no,
++ enum yaffs_block_state *state,
++ u32 *seq_number);
++
++#endif
++
++
++void yaffs_tags_compat_install(struct yaffs_dev *dev);
++void yaffs_calc_tags_ecc(struct yaffs_tags *tags);
++int yaffs_check_tags_ecc(struct yaffs_tags *tags);
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_tagsmarshall.c linux-3.14.4/fs/yaffs2/yaffs_tagsmarshall.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_tagsmarshall.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_tagsmarshall.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,199 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yaffs_packedtags2.h"
++
++static int yaffs_tags_marshall_write(struct yaffs_dev *dev,
++ int nand_chunk, const u8 *data,
++ const struct yaffs_ext_tags *tags)
++{
++ struct yaffs_packed_tags2 pt;
++ int retval;
++
++ int packed_tags_size =
++ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
++ void *packed_tags_ptr =
++ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
++
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "yaffs_tags_marshall_write chunk %d data %p tags %p",
++ nand_chunk, data, tags);
++
++ /* For yaffs2 writing there must be both data and tags.
++ * If we're using inband tags, then the tags are stuffed into
++ * the end of the data buffer.
++ */
++ if (!data || !tags)
++ BUG();
++ else if (dev->param.inband_tags) {
++ struct yaffs_packed_tags2_tags_only *pt2tp;
++ pt2tp =
++ (struct yaffs_packed_tags2_tags_only *)(data +
++ dev->
++ data_bytes_per_chunk);
++ yaffs_pack_tags2_tags_only(pt2tp, tags);
++ } else {
++ yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
++ }
++
++ retval = dev->drv.drv_write_chunk_fn(dev, nand_chunk,
++ data, dev->param.total_bytes_per_chunk,
++ (dev->param.inband_tags) ? NULL : packed_tags_ptr,
++ (dev->param.inband_tags) ? 0 : packed_tags_size);
++
++ return retval;
++}
++
++static int yaffs_tags_marshall_read(struct yaffs_dev *dev,
++ int nand_chunk, u8 *data,
++ struct yaffs_ext_tags *tags)
++{
++ int retval = 0;
++ int local_data = 0;
++ u8 spare_buffer[100];
++ enum yaffs_ecc_result ecc_result;
++
++ struct yaffs_packed_tags2 pt;
++
++ int packed_tags_size =
++ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
++ void *packed_tags_ptr =
++ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
++
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "yaffs_tags_marshall_read chunk %d data %p tags %p",
++ nand_chunk, data, tags);
++
++ if (dev->param.inband_tags) {
++ if (!data) {
++ local_data = 1;
++ data = yaffs_get_temp_buffer(dev);
++ }
++ }
++
++ if (dev->param.inband_tags || (data && !tags))
++ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
++ data, dev->param.total_bytes_per_chunk,
++ NULL, 0,
++ &ecc_result);
++ else if (tags)
++ retval = dev->drv.drv_read_chunk_fn(dev, nand_chunk,
++ data, dev->param.total_bytes_per_chunk,
++ spare_buffer, packed_tags_size,
++ &ecc_result);
++ else
++ BUG();
++
++
++ if (dev->param.inband_tags) {
++ if (tags) {
++ struct yaffs_packed_tags2_tags_only *pt2tp;
++ pt2tp =
++ (struct yaffs_packed_tags2_tags_only *)
++ &data[dev->data_bytes_per_chunk];
++ yaffs_unpack_tags2_tags_only(tags, pt2tp);
++ }
++ } else if (tags) {
++ memcpy(packed_tags_ptr, spare_buffer, packed_tags_size);
++ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
++ }
++
++ if (local_data)
++ yaffs_release_temp_buffer(dev, data);
++
++ if (tags && ecc_result == YAFFS_ECC_RESULT_UNFIXED) {
++ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
++ dev->n_ecc_unfixed++;
++ }
++
++ if (tags && ecc_result == -YAFFS_ECC_RESULT_FIXED) {
++ if (tags->ecc_result <= YAFFS_ECC_RESULT_NO_ERROR)
++ tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
++ dev->n_ecc_fixed++;
++ }
++
++ if (ecc_result < YAFFS_ECC_RESULT_UNFIXED)
++ return YAFFS_OK;
++ else
++ return YAFFS_FAIL;
++}
++
++static int yaffs_tags_marshall_query_block(struct yaffs_dev *dev, int block_no,
++ enum yaffs_block_state *state,
++ u32 *seq_number)
++{
++ int retval;
++
++ yaffs_trace(YAFFS_TRACE_MTD, "yaffs_tags_marshall_query_block %d",
++ block_no);
++
++ retval = dev->drv.drv_check_bad_fn(dev, block_no);
++
++ if (retval== YAFFS_FAIL) {
++ yaffs_trace(YAFFS_TRACE_MTD, "block is bad");
++
++ *state = YAFFS_BLOCK_STATE_DEAD;
++ *seq_number = 0;
++ } else {
++ struct yaffs_ext_tags t;
++
++ yaffs_tags_marshall_read(dev,
++ block_no * dev->param.chunks_per_block,
++ NULL, &t);
++
++ if (t.chunk_used) {
++ *seq_number = t.seq_number;
++ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
++ } else {
++ *seq_number = 0;
++ *state = YAFFS_BLOCK_STATE_EMPTY;
++ }
++ }
++
++ yaffs_trace(YAFFS_TRACE_MTD,
++ "block query returns seq %d state %d",
++ *seq_number, *state);
++
++ if (retval == 0)
++ return YAFFS_OK;
++ else
++ return YAFFS_FAIL;
++}
++
++static int yaffs_tags_marshall_mark_bad(struct yaffs_dev *dev, int block_no)
++{
++ return dev->drv.drv_mark_bad_fn(dev, block_no);
++
++}
++
++
++void yaffs_tags_marshall_install(struct yaffs_dev *dev)
++{
++ if (!dev->param.is_yaffs2)
++ return;
++
++ if (!dev->tagger.write_chunk_tags_fn)
++ dev->tagger.write_chunk_tags_fn = yaffs_tags_marshall_write;
++
++ if (!dev->tagger.read_chunk_tags_fn)
++ dev->tagger.read_chunk_tags_fn = yaffs_tags_marshall_read;
++
++ if (!dev->tagger.query_block_fn)
++ dev->tagger.query_block_fn = yaffs_tags_marshall_query_block;
++
++ if (!dev->tagger.mark_bad_fn)
++ dev->tagger.mark_bad_fn = yaffs_tags_marshall_mark_bad;
++
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_tagsmarshall.h linux-3.14.4/fs/yaffs2/yaffs_tagsmarshall.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_tagsmarshall.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_tagsmarshall.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,22 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_TAGSMARSHALL_H__
++#define __YAFFS_TAGSMARSHALL_H__
++
++#include "yaffs_guts.h"
++void yaffs_tags_marshall_install(struct yaffs_dev *dev);
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_trace.h linux-3.14.4/fs/yaffs2/yaffs_trace.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_trace.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_trace.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,57 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YTRACE_H__
++#define __YTRACE_H__
++
++extern unsigned int yaffs_trace_mask;
++extern unsigned int yaffs_wr_attempts;
++
++/*
++ * Tracing flags.
++ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
++ */
++
++#define YAFFS_TRACE_OS 0x00000002
++#define YAFFS_TRACE_ALLOCATE 0x00000004
++#define YAFFS_TRACE_SCAN 0x00000008
++#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
++#define YAFFS_TRACE_ERASE 0x00000020
++#define YAFFS_TRACE_GC 0x00000040
++#define YAFFS_TRACE_WRITE 0x00000080
++#define YAFFS_TRACE_TRACING 0x00000100
++#define YAFFS_TRACE_DELETION 0x00000200
++#define YAFFS_TRACE_BUFFERS 0x00000400
++#define YAFFS_TRACE_NANDACCESS 0x00000800
++#define YAFFS_TRACE_GC_DETAIL 0x00001000
++#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
++#define YAFFS_TRACE_MTD 0x00004000
++#define YAFFS_TRACE_CHECKPOINT 0x00008000
++
++#define YAFFS_TRACE_VERIFY 0x00010000
++#define YAFFS_TRACE_VERIFY_NAND 0x00020000
++#define YAFFS_TRACE_VERIFY_FULL 0x00040000
++#define YAFFS_TRACE_VERIFY_ALL 0x000f0000
++
++#define YAFFS_TRACE_SYNC 0x00100000
++#define YAFFS_TRACE_BACKGROUND 0x00200000
++#define YAFFS_TRACE_LOCK 0x00400000
++#define YAFFS_TRACE_MOUNT 0x00800000
++
++#define YAFFS_TRACE_ERROR 0x40000000
++#define YAFFS_TRACE_BUG 0x80000000
++#define YAFFS_TRACE_ALWAYS 0xf0000000
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_verify.c linux-3.14.4/fs/yaffs2/yaffs_verify.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_verify.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_verify.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,529 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_verify.h"
++#include "yaffs_trace.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_nand.h"
++
++int yaffs_skip_verification(struct yaffs_dev *dev)
++{
++ (void) dev;
++ return !(yaffs_trace_mask &
++ (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_skip_full_verification(struct yaffs_dev *dev)
++{
++ (void) dev;
++ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
++}
++
++static int yaffs_skip_nand_verification(struct yaffs_dev *dev)
++{
++ (void) dev;
++ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
++}
++
++static const char * const block_state_name[] = {
++ "Unknown",
++ "Needs scan",
++ "Scanning",
++ "Empty",
++ "Allocating",
++ "Full",
++ "Dirty",
++ "Checkpoint",
++ "Collecting",
++ "Dead"
++};
++
++void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n)
++{
++ int actually_used;
++ int in_use;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ /* Report illegal runtime states */
++ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Block %d has undefined state %d",
++ n, bi->block_state);
++
++ switch (bi->block_state) {
++ case YAFFS_BLOCK_STATE_UNKNOWN:
++ case YAFFS_BLOCK_STATE_SCANNING:
++ case YAFFS_BLOCK_STATE_NEEDS_SCAN:
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Block %d has bad run-state %s",
++ n, block_state_name[bi->block_state]);
++ }
++
++ /* Check pages in use and soft deletions are legal */
++
++ actually_used = bi->pages_in_use - bi->soft_del_pages;
++
++ if (bi->pages_in_use < 0 ||
++ bi->pages_in_use > dev->param.chunks_per_block ||
++ bi->soft_del_pages < 0 ||
++ bi->soft_del_pages > dev->param.chunks_per_block ||
++ actually_used < 0 || actually_used > dev->param.chunks_per_block)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Block %d has illegal values pages_in_used %d soft_del_pages %d",
++ n, bi->pages_in_use, bi->soft_del_pages);
++
++ /* Check chunk bitmap legal */
++ in_use = yaffs_count_chunk_bits(dev, n);
++ if (in_use != bi->pages_in_use)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Block %d has inconsistent values pages_in_use %d counted chunk bits %d",
++ n, bi->pages_in_use, in_use);
++}
++
++void yaffs_verify_collected_blk(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi, int n)
++{
++ yaffs_verify_blk(dev, bi, n);
++
++ /* After collection the block should be in the erased state */
++
++ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
++ bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "Block %d is in state %d after gc, should be erased",
++ n, bi->block_state);
++ }
++}
++
++void yaffs_verify_blocks(struct yaffs_dev *dev)
++{
++ int i;
++ int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
++ int illegal_states = 0;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ memset(state_count, 0, sizeof(state_count));
++
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
++ yaffs_verify_blk(dev, bi, i);
++
++ if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
++ state_count[bi->block_state]++;
++ else
++ illegal_states++;
++ }
++
++ yaffs_trace(YAFFS_TRACE_VERIFY, "Block summary");
++
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "%d blocks have illegal states",
++ illegal_states);
++ if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Too many allocating blocks");
++
++ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "%s %d blocks",
++ block_state_name[i], state_count[i]);
++
++ if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT])
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Checkpoint block count wrong dev %d count %d",
++ dev->blocks_in_checkpt,
++ state_count[YAFFS_BLOCK_STATE_CHECKPOINT]);
++
++ if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY])
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Erased block count wrong dev %d count %d",
++ dev->n_erased_blocks,
++ state_count[YAFFS_BLOCK_STATE_EMPTY]);
++
++ if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Too many collecting blocks %d (max is 1)",
++ state_count[YAFFS_BLOCK_STATE_COLLECTING]);
++}
++
++/*
++ * Verify the object header. oh must be valid, but obj and tags may be NULL in
++ * which case those tests will not be performed.
++ */
++void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
++ struct yaffs_ext_tags *tags, int parent_check)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++
++ if (!(tags && obj && oh)) {
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Verifying object header tags %p obj %p oh %p",
++ tags, obj, oh);
++ return;
++ }
++
++ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
++ oh->type > YAFFS_OBJECT_TYPE_MAX)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d header type is illegal value 0x%x",
++ tags->obj_id, oh->type);
++
++ if (tags->obj_id != obj->obj_id)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d header mismatch obj_id %d",
++ tags->obj_id, obj->obj_id);
++
++ /*
++ * Check that the object's parent ids match if parent_check requested.
++ *
++ * Tests do not apply to the root object.
++ */
++
++ if (parent_check && tags->obj_id > 1 && !obj->parent)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d header mismatch parent_id %d obj->parent is NULL",
++ tags->obj_id, oh->parent_obj_id);
++
++ if (parent_check && obj->parent &&
++ oh->parent_obj_id != obj->parent->obj_id &&
++ (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
++ obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d header mismatch parent_id %d parent_obj_id %d",
++ tags->obj_id, oh->parent_obj_id,
++ obj->parent->obj_id);
++
++ if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d header name is NULL",
++ obj->obj_id);
++
++ if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff) /* Junk name */
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d header name is 0xff",
++ obj->obj_id);
++}
++
++void yaffs_verify_file(struct yaffs_obj *obj)
++{
++ u32 x;
++ int required_depth;
++ int actual_depth;
++ int last_chunk;
++ u32 offset_in_chunk;
++ u32 the_chunk;
++
++ u32 i;
++ struct yaffs_dev *dev;
++ struct yaffs_ext_tags tags;
++ struct yaffs_tnode *tn;
++ u32 obj_id;
++
++ if (!obj)
++ return;
++
++ if (yaffs_skip_verification(obj->my_dev))
++ return;
++
++ dev = obj->my_dev;
++ obj_id = obj->obj_id;
++
++
++ /* Check file size is consistent with tnode depth */
++ yaffs_addr_to_chunk(dev, obj->variant.file_variant.file_size,
++ &last_chunk, &offset_in_chunk);
++ last_chunk++;
++ x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS;
++ required_depth = 0;
++ while (x > 0) {
++ x >>= YAFFS_TNODES_INTERNAL_BITS;
++ required_depth++;
++ }
++
++ actual_depth = obj->variant.file_variant.top_level;
++
++ /* Check that the chunks in the tnode tree are all correct.
++ * We do this by scanning through the tnode tree and
++ * checking the tags for every chunk match.
++ */
++
++ if (yaffs_skip_nand_verification(dev))
++ return;
++
++ for (i = 1; i <= last_chunk; i++) {
++ tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
++
++ if (!tn)
++ continue;
++
++ the_chunk = yaffs_get_group_base(dev, tn, i);
++ if (the_chunk > 0) {
++ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
++ &tags);
++ if (tags.obj_id != obj_id || tags.chunk_id != i)
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)",
++ obj_id, i, the_chunk,
++ tags.obj_id, tags.chunk_id);
++ }
++ }
++}
++
++void yaffs_verify_link(struct yaffs_obj *obj)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++
++ /* Verify sane equivalent object */
++}
++
++void yaffs_verify_symlink(struct yaffs_obj *obj)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++
++ /* Verify symlink string */
++}
++
++void yaffs_verify_special(struct yaffs_obj *obj)
++{
++ if (obj && yaffs_skip_verification(obj->my_dev))
++ return;
++}
++
++void yaffs_verify_obj(struct yaffs_obj *obj)
++{
++ struct yaffs_dev *dev;
++ u32 chunk_min;
++ u32 chunk_max;
++ u32 chunk_id_ok;
++ u32 chunk_in_range;
++ u32 chunk_wrongly_deleted;
++ u32 chunk_valid;
++
++ if (!obj)
++ return;
++
++ if (obj->being_created)
++ return;
++
++ dev = obj->my_dev;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ /* Check sane object header chunk */
++
++ chunk_min = dev->internal_start_block * dev->param.chunks_per_block;
++ chunk_max =
++ (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1;
++
++ chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min &&
++ ((unsigned)(obj->hdr_chunk)) <= chunk_max);
++ chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0);
++ chunk_valid = chunk_in_range &&
++ yaffs_check_chunk_bit(dev,
++ obj->hdr_chunk / dev->param.chunks_per_block,
++ obj->hdr_chunk % dev->param.chunks_per_block);
++ chunk_wrongly_deleted = chunk_in_range && !chunk_valid;
++
++ if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted))
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d has chunk_id %d %s %s",
++ obj->obj_id, obj->hdr_chunk,
++ chunk_id_ok ? "" : ",out of range",
++ chunk_wrongly_deleted ? ",marked as deleted" : "");
++
++ if (chunk_valid && !yaffs_skip_nand_verification(dev)) {
++ struct yaffs_ext_tags tags;
++ struct yaffs_obj_hdr *oh;
++ u8 *buffer = yaffs_get_temp_buffer(dev);
++
++ oh = (struct yaffs_obj_hdr *)buffer;
++
++ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags);
++
++ yaffs_verify_oh(obj, oh, &tags, 1);
++
++ yaffs_release_temp_buffer(dev, buffer);
++ }
++
++ /* Verify it has a parent */
++ if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) {
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d has parent pointer %p which does not look like an object",
++ obj->obj_id, obj->parent);
++ }
++
++ /* Verify parent is a directory */
++ if (obj->parent &&
++ obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d's parent is not a directory (type %d)",
++ obj->obj_id, obj->parent->variant_type);
++ }
++
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ yaffs_verify_file(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ yaffs_verify_symlink(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ yaffs_verify_dir(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ yaffs_verify_link(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ yaffs_verify_special(obj);
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ default:
++ yaffs_trace(YAFFS_TRACE_VERIFY,
++ "Obj %d has illegaltype %d",
++ obj->obj_id, obj->variant_type);
++ break;
++ }
++}
++
++void yaffs_verify_objects(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj;
++ int i;
++ struct list_head *lh;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ /* Iterate through the objects in each hash entry */
++
++ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
++ list_for_each(lh, &dev->obj_bucket[i].list) {
++ obj = list_entry(lh, struct yaffs_obj, hash_link);
++ yaffs_verify_obj(obj);
++ }
++ }
++}
++
++void yaffs_verify_obj_in_dir(struct yaffs_obj *obj)
++{
++ struct list_head *lh;
++ struct yaffs_obj *list_obj;
++ int count = 0;
++
++ if (!obj) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify");
++ BUG();
++ return;
++ }
++
++ if (yaffs_skip_verification(obj->my_dev))
++ return;
++
++ if (!obj->parent) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent");
++ BUG();
++ return;
++ }
++
++ if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory");
++ BUG();
++ }
++
++ /* Iterate through the objects in each hash entry */
++
++ list_for_each(lh, &obj->parent->variant.dir_variant.children) {
++ list_obj = list_entry(lh, struct yaffs_obj, siblings);
++ yaffs_verify_obj(list_obj);
++ if (obj == list_obj)
++ count++;
++ }
++
++ if (count != 1) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "Object in directory %d times",
++ count);
++ BUG();
++ }
++}
++
++void yaffs_verify_dir(struct yaffs_obj *directory)
++{
++ struct list_head *lh;
++ struct yaffs_obj *list_obj;
++
++ if (!directory) {
++ BUG();
++ return;
++ }
++
++ if (yaffs_skip_full_verification(directory->my_dev))
++ return;
++
++ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "Directory has wrong type: %d",
++ directory->variant_type);
++ BUG();
++ }
++
++ /* Iterate through the objects in each hash entry */
++
++ list_for_each(lh, &directory->variant.dir_variant.children) {
++ list_obj = list_entry(lh, struct yaffs_obj, siblings);
++ if (list_obj->parent != directory) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "Object in directory list has wrong parent %p",
++ list_obj->parent);
++ BUG();
++ }
++ yaffs_verify_obj_in_dir(list_obj);
++ }
++}
++
++static int yaffs_free_verification_failures;
++
++void yaffs_verify_free_chunks(struct yaffs_dev *dev)
++{
++ int counted;
++ int difference;
++
++ if (yaffs_skip_verification(dev))
++ return;
++
++ counted = yaffs_count_free_chunks(dev);
++
++ difference = dev->n_free_chunks - counted;
++
++ if (difference) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "Freechunks verification failure %d %d %d",
++ dev->n_free_chunks, counted, difference);
++ yaffs_free_verification_failures++;
++ }
++}
++
++int yaffs_verify_file_sane(struct yaffs_obj *in)
++{
++ (void) in;
++ return YAFFS_OK;
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_verify.h linux-3.14.4/fs/yaffs2/yaffs_verify.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_verify.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_verify.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,43 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_VERIFY_H__
++#define __YAFFS_VERIFY_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi,
++ int n);
++void yaffs_verify_collected_blk(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi, int n);
++void yaffs_verify_blocks(struct yaffs_dev *dev);
++
++void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
++ struct yaffs_ext_tags *tags, int parent_check);
++void yaffs_verify_file(struct yaffs_obj *obj);
++void yaffs_verify_link(struct yaffs_obj *obj);
++void yaffs_verify_symlink(struct yaffs_obj *obj);
++void yaffs_verify_special(struct yaffs_obj *obj);
++void yaffs_verify_obj(struct yaffs_obj *obj);
++void yaffs_verify_objects(struct yaffs_dev *dev);
++void yaffs_verify_obj_in_dir(struct yaffs_obj *obj);
++void yaffs_verify_dir(struct yaffs_obj *directory);
++void yaffs_verify_free_chunks(struct yaffs_dev *dev);
++
++int yaffs_verify_file_sane(struct yaffs_obj *obj);
++
++int yaffs_skip_verification(struct yaffs_dev *dev);
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_vfs.c linux-3.14.4/fs/yaffs2/yaffs_vfs.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_vfs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_vfs.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,3600 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ * Acknowledgements:
++ * Luc van OostenRyck for numerous patches.
++ * Nick Bane for numerous patches.
++ * Nick Bane for 2.5/2.6 integration.
++ * Andras Toth for mknod rdev issue.
++ * Michael Fischer for finding the problem with inode inconsistency.
++ * Some code bodily lifted from JFFS
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++/*
++ *
++ * This is the file system front-end to YAFFS that hooks it up to
++ * the VFS.
++ *
++ * Special notes:
++ * >> 2.4: sb->u.generic_sbp points to the struct yaffs_dev associated with
++ * this superblock
++ * >> 2.6: sb->s_fs_info points to the struct yaffs_dev associated with this
++ * superblock
++ * >> inode->u.generic_ip points to the associated struct yaffs_obj.
++ */
++
++/*
++ * There are two variants of the VFS glue code. This variant should compile
++ * for any version of Linux.
++ */
++#include <linux/version.h>
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10))
++#define YAFFS_COMPILE_BACKGROUND
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23))
++#define YAFFS_COMPILE_FREEZER
++#endif
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
++#define YAFFS_COMPILE_EXPORTFS
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
++#define YAFFS_USE_SETATTR_COPY
++#define YAFFS_USE_TRUNCATE_SETSIZE
++#endif
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
++#define YAFFS_HAS_EVICT_INODE
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++#define YAFFS_NEW_FOLLOW_LINK 1
++#else
++#define YAFFS_NEW_FOLLOW_LINK 0
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
++#define YAFFS_HAS_WRITE_SUPER
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++#include <linux/config.h>
++#endif
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/proc_fs.h>
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39))
++#include <linux/smp_lock.h>
++#endif
++#include <linux/pagemap.h>
++#include <linux/mtd/mtd.h>
++#include <linux/interrupt.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++#include <linux/namei.h>
++#endif
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++#include <linux/exportfs.h>
++#endif
++
++#ifdef YAFFS_COMPILE_BACKGROUND
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#endif
++#ifdef YAFFS_COMPILE_FREEZER
++#include <linux/freezer.h>
++#endif
++
++#include <asm/div64.h>
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++#include <linux/statfs.h>
++
++#define UnlockPage(p) unlock_page(p)
++#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
++
++/* FIXME: use sb->s_id instead ? */
++#define yaffs_devname(sb, buf) bdevname(sb->s_bdev, buf)
++
++#else
++
++#include <linux/locks.h>
++#define BDEVNAME_SIZE 0
++#define yaffs_devname(sb, buf) kdevname(sb->s_dev)
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0))
++/* added NCB 26/5/2006 for 2.4.25-vrs2-tcl1 kernel */
++#define __user
++#endif
++
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
++#define YPROC_ROOT (&proc_root)
++#else
++#define YPROC_ROOT NULL
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
++#define Y_INIT_TIMER(a) init_timer(a)
++#else
++#define Y_INIT_TIMER(a) init_timer_on_stack(a)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27))
++#define YAFFS_USE_WRITE_BEGIN_END 1
++#else
++#define YAFFS_USE_WRITE_BEGIN_END 0
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
++#define YAFFS_SUPER_HAS_DIRTY
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
++#define set_nlink(inode, count) do { (inode)->i_nlink = (count); } while(0)
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 28))
++static uint32_t YCALCBLOCKS(uint64_t partition_size, uint32_t block_size)
++{
++ uint64_t result = partition_size;
++ do_div(result, block_size);
++ return (uint32_t) result;
++}
++#else
++#define YCALCBLOCKS(s, b) ((s)/(b))
++#endif
++
++#include <linux/uaccess.h>
++#include <linux/mtd/mtd.h>
++
++#include "yportenv.h"
++#include "yaffs_trace.h"
++#include "yaffs_guts.h"
++#include "yaffs_attribs.h"
++
++#include "yaffs_linux.h"
++
++#include "yaffs_mtdif.h"
++#include "yaffs_packedtags2.h"
++#include "yaffs_getblockinfo.h"
++
++unsigned int yaffs_trace_mask =
++ YAFFS_TRACE_BAD_BLOCKS |
++ YAFFS_TRACE_ALWAYS |
++ 0;
++
++unsigned int yaffs_wr_attempts = YAFFS_WR_ATTEMPTS;
++unsigned int yaffs_auto_checkpoint = 1;
++unsigned int yaffs_gc_control = 1;
++unsigned int yaffs_bg_enable = 1;
++unsigned int yaffs_auto_select = 1;
++/* Module Parameters */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++module_param(yaffs_trace_mask, uint, 0644);
++module_param(yaffs_wr_attempts, uint, 0644);
++module_param(yaffs_auto_checkpoint, uint, 0644);
++module_param(yaffs_gc_control, uint, 0644);
++module_param(yaffs_bg_enable, uint, 0644);
++#else
++MODULE_PARM(yaffs_trace_mask, "i");
++MODULE_PARM(yaffs_wr_attempts, "i");
++MODULE_PARM(yaffs_auto_checkpoint, "i");
++MODULE_PARM(yaffs_gc_control, "i");
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
++/* use iget and read_inode */
++#define Y_IGET(sb, inum) iget((sb), (inum))
++
++#else
++/* Call local equivalent */
++#define YAFFS_USE_OWN_IGET
++#define Y_IGET(sb, inum) yaffs_iget((sb), (inum))
++
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++#define yaffs_inode_to_obj_lv(iptr) ((iptr)->i_private)
++#else
++#define yaffs_inode_to_obj_lv(iptr) ((iptr)->u.generic_ip)
++#endif
++
++#define yaffs_inode_to_obj(iptr) \
++ ((struct yaffs_obj *)(yaffs_inode_to_obj_lv(iptr)))
++#define yaffs_dentry_to_obj(dptr) yaffs_inode_to_obj((dptr)->d_inode)
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->s_fs_info)
++#else
++#define yaffs_super_to_dev(sb) ((struct yaffs_dev *)sb->u.generic_sbp)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
++#define Y_CLEAR_INODE(i) clear_inode(i)
++#else
++#define Y_CLEAR_INODE(i) end_writeback(i)
++#endif
++
++
++#define update_dir_time(dir) do {\
++ (dir)->i_ctime = (dir)->i_mtime = CURRENT_TIME; \
++ } while (0)
++
++static void yaffs_fill_inode_from_obj(struct inode *inode,
++ struct yaffs_obj *obj);
++
++
++static void yaffs_gross_lock(struct yaffs_dev *dev)
++{
++ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locking %p", current);
++ mutex_lock(&(yaffs_dev_to_lc(dev)->gross_lock));
++ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs locked %p", current);
++}
++
++static void yaffs_gross_unlock(struct yaffs_dev *dev)
++{
++ yaffs_trace(YAFFS_TRACE_LOCK, "yaffs unlocking %p", current);
++ mutex_unlock(&(yaffs_dev_to_lc(dev)->gross_lock));
++}
++
++
++static int yaffs_readpage_nolock(struct file *f, struct page *pg)
++{
++ /* Lifted from jffs2 */
++
++ struct yaffs_obj *obj;
++ unsigned char *pg_buf;
++ int ret;
++ loff_t pos = ((loff_t) pg->index) << PAGE_CACHE_SHIFT;
++ struct yaffs_dev *dev;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readpage_nolock at %lld, size %08x",
++ (long long)pos,
++ (unsigned)PAGE_CACHE_SIZE);
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ BUG_ON(!PageLocked(pg));
++#else
++ if (!PageLocked(pg))
++ PAGE_BUG(pg);
++#endif
++
++ pg_buf = kmap(pg);
++ /* FIXME: Can kmap fail? */
++
++ yaffs_gross_lock(dev);
++
++ ret = yaffs_file_rd(obj, pg_buf, pos, PAGE_CACHE_SIZE);
++
++ yaffs_gross_unlock(dev);
++
++ if (ret >= 0)
++ ret = 0;
++
++ if (ret) {
++ ClearPageUptodate(pg);
++ SetPageError(pg);
++ } else {
++ SetPageUptodate(pg);
++ ClearPageError(pg);
++ }
++
++ flush_dcache_page(pg);
++ kunmap(pg);
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage_nolock done");
++ return ret;
++}
++
++static int yaffs_readpage_unlock(struct file *f, struct page *pg)
++{
++ int ret = yaffs_readpage_nolock(f, pg);
++ UnlockPage(pg);
++ return ret;
++}
++
++static int yaffs_readpage(struct file *f, struct page *pg)
++{
++ int ret;
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage");
++ ret = yaffs_readpage_unlock(f, pg);
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_readpage done");
++ return ret;
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
++#define YCRED_FSUID() from_kuid(&init_user_ns, current_fsuid())
++#define YCRED_FSGID() from_kgid(&init_user_ns, current_fsgid())
++#else
++#define YCRED_FSUID() YCRED(current)->fsuid
++#define YCRED_FSGID() YCRED(current)->fsgid
++
++static inline uid_t i_uid_read(const struct inode *inode)
++{
++ return inode->i_uid;
++}
++
++static inline gid_t i_gid_read(const struct inode *inode)
++{
++ return inode->i_gid;
++}
++
++static inline void i_uid_write(struct inode *inode, uid_t uid)
++{
++ inode->i_uid = uid;
++}
++
++static inline void i_gid_write(struct inode *inode, gid_t gid)
++{
++ inode->i_gid = gid;
++}
++#endif
++
++static void yaffs_set_super_dirty_val(struct yaffs_dev *dev, int val)
++{
++ struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
++
++ if (lc)
++ lc->dirty = val;
++
++# ifdef YAFFS_SUPER_HAS_DIRTY
++ {
++ struct super_block *sb = lc->super;
++
++ if (sb)
++ sb->s_dirt = val;
++ }
++#endif
++
++}
++
++static void yaffs_set_super_dirty(struct yaffs_dev *dev)
++{
++ yaffs_set_super_dirty_val(dev, 1);
++}
++
++static void yaffs_clear_super_dirty(struct yaffs_dev *dev)
++{
++ yaffs_set_super_dirty_val(dev, 0);
++}
++
++static int yaffs_check_super_dirty(struct yaffs_dev *dev)
++{
++ struct yaffs_linux_context *lc = yaffs_dev_to_lc(dev);
++
++ if (lc && lc->dirty)
++ return 1;
++
++# ifdef YAFFS_SUPER_HAS_DIRTY
++ {
++ struct super_block *sb = lc->super;
++
++ if (sb && sb->s_dirt)
++ return 1;
++ }
++#endif
++ return 0;
++
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_writepage(struct page *page, struct writeback_control *wbc)
++#else
++static int yaffs_writepage(struct page *page)
++#endif
++{
++ struct yaffs_dev *dev;
++ struct address_space *mapping = page->mapping;
++ struct inode *inode;
++ unsigned long end_index;
++ char *buffer;
++ struct yaffs_obj *obj;
++ int n_written = 0;
++ unsigned n_bytes;
++ loff_t i_size;
++
++ if (!mapping)
++ BUG();
++ inode = mapping->host;
++ if (!inode)
++ BUG();
++ i_size = i_size_read(inode);
++
++ end_index = i_size >> PAGE_CACHE_SHIFT;
++
++ if (page->index < end_index)
++ n_bytes = PAGE_CACHE_SIZE;
++ else {
++ n_bytes = i_size & (PAGE_CACHE_SIZE - 1);
++
++ if (page->index > end_index || !n_bytes) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_writepage at %lld, inode size = %lld!!",
++ ((loff_t)page->index) << PAGE_CACHE_SHIFT,
++ inode->i_size);
++ yaffs_trace(YAFFS_TRACE_OS,
++ " -> don't care!!");
++
++ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
++ set_page_writeback(page);
++ unlock_page(page);
++ end_page_writeback(page);
++ return 0;
++ }
++ }
++
++ if (n_bytes != PAGE_CACHE_SIZE)
++ zero_user_segment(page, n_bytes, PAGE_CACHE_SIZE);
++
++ get_page(page);
++
++ buffer = kmap(page);
++
++ obj = yaffs_inode_to_obj(inode);
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_writepage at %lld, size %08x",
++ ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes);
++ yaffs_trace(YAFFS_TRACE_OS,
++ "writepag0: obj = %lld, ino = %lld",
++ obj->variant.file_variant.file_size, inode->i_size);
++
++ n_written = yaffs_wr_file(obj, buffer,
++ ((loff_t)page->index) << PAGE_CACHE_SHIFT, n_bytes, 0);
++
++ yaffs_set_super_dirty(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "writepag1: obj = %lld, ino = %lld",
++ obj->variant.file_variant.file_size, inode->i_size);
++
++ yaffs_gross_unlock(dev);
++
++ kunmap(page);
++ set_page_writeback(page);
++ unlock_page(page);
++ end_page_writeback(page);
++ put_page(page);
++
++ return (n_written == n_bytes) ? 0 : -ENOSPC;
++}
++
++/* Space holding and freeing is done to ensure we have space available for write_begin/end */
++/* For now we just assume few parallel writes and check against a small number. */
++/* Todo: need to do this with a counter to handle parallel reads better */
++
++static ssize_t yaffs_hold_space(struct file *f)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++
++ int n_free_chunks;
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ n_free_chunks = yaffs_get_n_free_chunks(dev);
++
++ yaffs_gross_unlock(dev);
++
++ return (n_free_chunks > 20) ? 1 : 0;
++}
++
++static void yaffs_release_space(struct file *f)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ yaffs_gross_unlock(dev);
++}
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_begin(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned flags,
++ struct page **pagep, void **fsdata)
++{
++ struct page *pg = NULL;
++ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
++
++ int ret = 0;
++ int space_held = 0;
++
++ /* Get a page */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ pg = grab_cache_page_write_begin(mapping, index, flags);
++#else
++ pg = __grab_cache_page(mapping, index);
++#endif
++
++ *pagep = pg;
++ if (!pg) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ yaffs_trace(YAFFS_TRACE_OS,
++ "start yaffs_write_begin index %d(%x) uptodate %d",
++ (int)index, (int)index, Page_Uptodate(pg) ? 1 : 0);
++
++ /* Get fs space */
++ space_held = yaffs_hold_space(filp);
++
++ if (!space_held) {
++ ret = -ENOSPC;
++ goto out;
++ }
++
++ /* Update page if required */
++
++ if (!Page_Uptodate(pg))
++ ret = yaffs_readpage_nolock(filp, pg);
++
++ if (ret)
++ goto out;
++
++ /* Happy path return */
++ yaffs_trace(YAFFS_TRACE_OS, "end yaffs_write_begin - ok");
++
++ return 0;
++
++out:
++ yaffs_trace(YAFFS_TRACE_OS,
++ "end yaffs_write_begin fail returning %d", ret);
++ if (space_held)
++ yaffs_release_space(filp);
++ if (pg) {
++ unlock_page(pg);
++ page_cache_release(pg);
++ }
++ return ret;
++}
++
++#else
++
++static int yaffs_prepare_write(struct file *f, struct page *pg,
++ unsigned offset, unsigned to)
++{
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_prepair_write");
++
++ if (!Page_Uptodate(pg))
++ return yaffs_readpage_nolock(f, pg);
++ return 0;
++}
++#endif
++
++
++static ssize_t yaffs_file_write(struct file *f, const char *buf, size_t n,
++ loff_t * pos)
++{
++ struct yaffs_obj *obj;
++ int n_written;
++ loff_t ipos;
++ struct inode *inode;
++ struct yaffs_dev *dev;
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++
++ if (!obj) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_file_write: hey obj is null!");
++ return -EINVAL;
++ }
++
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ inode = f->f_dentry->d_inode;
++
++ if (!S_ISBLK(inode->i_mode) && f->f_flags & O_APPEND)
++ ipos = inode->i_size;
++ else
++ ipos = *pos;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_file_write about to write writing %u(%x) bytes to object %d at %lld",
++ (unsigned)n, (unsigned)n, obj->obj_id, ipos);
++
++ n_written = yaffs_wr_file(obj, buf, ipos, n, 0);
++
++ yaffs_set_super_dirty(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_file_write: %d(%x) bytes written",
++ (unsigned)n, (unsigned)n);
++
++ if (n_written > 0) {
++ ipos += n_written;
++ *pos = ipos;
++ if (ipos > inode->i_size) {
++ inode->i_size = ipos;
++ inode->i_blocks = (ipos + 511) >> 9;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_file_write size updated to %lld bytes, %d blocks",
++ ipos, (int)(inode->i_blocks));
++ }
++
++ }
++ yaffs_gross_unlock(dev);
++ return (n_written == 0) && (n > 0) ? -ENOSPC : n_written;
++}
++
++
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++static int yaffs_write_end(struct file *filp, struct address_space *mapping,
++ loff_t pos, unsigned len, unsigned copied,
++ struct page *pg, void *fsdadata)
++{
++ int ret = 0;
++ void *addr, *kva;
++ uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);
++
++ kva = kmap(pg);
++ addr = kva + offset_into_page;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_write_end addr %p pos %lld n_bytes %d",
++ addr, pos, copied);
++
++ ret = yaffs_file_write(filp, addr, copied, &pos);
++
++ if (ret != copied) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_write_end not same size ret %d copied %d",
++ ret, copied);
++ SetPageError(pg);
++ }
++
++ kunmap(pg);
++
++ yaffs_release_space(filp);
++ unlock_page(pg);
++ page_cache_release(pg);
++ return ret;
++}
++#else
++
++static int yaffs_commit_write(struct file *f, struct page *pg, unsigned offset,
++ unsigned to)
++{
++ void *addr, *kva;
++
++ loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
++ int n_bytes = to - offset;
++ int n_written;
++
++ kva = kmap(pg);
++ addr = kva + offset;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_commit_write addr %p pos %lld n_bytes %d",
++ addr, pos, n_bytes);
++
++ n_written = yaffs_file_write(f, addr, n_bytes, &pos);
++
++ if (n_written != n_bytes) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_commit_write not same size n_written %d n_bytes %d",
++ n_written, n_bytes);
++ SetPageError(pg);
++ }
++ kunmap(pg);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_commit_write returning %d",
++ n_written == n_bytes ? 0 : n_written);
++
++ return n_written == n_bytes ? 0 : n_written;
++}
++#endif
++
++static struct address_space_operations yaffs_file_address_operations = {
++ .readpage = yaffs_readpage,
++ .writepage = yaffs_writepage,
++#if (YAFFS_USE_WRITE_BEGIN_END > 0)
++ .write_begin = yaffs_write_begin,
++ .write_end = yaffs_write_end,
++#else
++ .prepare_write = yaffs_prepare_write,
++ .commit_write = yaffs_commit_write,
++#endif
++};
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_file_flush(struct file *file, fl_owner_t id)
++#else
++static int yaffs_file_flush(struct file *file)
++#endif
++{
++ struct yaffs_obj *obj = yaffs_dentry_to_obj(file->f_dentry);
++
++ struct yaffs_dev *dev = obj->my_dev;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_file_flush object %d (%s)",
++ obj->obj_id,
++ obj->dirty ? "dirty" : "clean");
++
++ yaffs_gross_lock(dev);
++
++ yaffs_flush_file(obj, 1, 0);
++
++ yaffs_gross_unlock(dev);
++
++ return 0;
++}
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++static int yaffs_sync_object(struct file *file, loff_t start, loff_t end, int datasync)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++static int yaffs_sync_object(struct file *file, int datasync)
++#else
++static int yaffs_sync_object(struct file *file, struct dentry *dentry,
++ int datasync)
++#endif
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 34))
++ struct dentry *dentry = file->f_path.dentry;
++#endif
++
++ obj = yaffs_dentry_to_obj(dentry);
++
++ dev = obj->my_dev;
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
++ "yaffs_sync_object");
++ yaffs_gross_lock(dev);
++ yaffs_flush_file(obj, 1, datasync);
++ yaffs_gross_unlock(dev);
++ return 0;
++}
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22))
++static const struct file_operations yaffs_file_operations = {
++ .read = do_sync_read,
++ .write = do_sync_write,
++ .aio_read = generic_file_aio_read,
++ .aio_write = generic_file_aio_write,
++ .mmap = generic_file_mmap,
++ .flush = yaffs_file_flush,
++ .fsync = yaffs_sync_object,
++ .splice_read = generic_file_splice_read,
++ .splice_write = generic_file_splice_write,
++ .llseek = generic_file_llseek,
++};
++
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
++
++static const struct file_operations yaffs_file_operations = {
++ .read = do_sync_read,
++ .write = do_sync_write,
++ .aio_read = generic_file_aio_read,
++ .aio_write = generic_file_aio_write,
++ .mmap = generic_file_mmap,
++ .flush = yaffs_file_flush,
++ .fsync = yaffs_sync_object,
++ .sendfile = generic_file_sendfile,
++};
++
++#else
++
++static const struct file_operations yaffs_file_operations = {
++ .read = generic_file_read,
++ .write = generic_file_write,
++ .mmap = generic_file_mmap,
++ .flush = yaffs_file_flush,
++ .fsync = yaffs_sync_object,
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ .sendfile = generic_file_sendfile,
++#endif
++};
++#endif
++
++
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25))
++static void zero_user_segment(struct page *page, unsigned start, unsigned end)
++{
++ void *kaddr = kmap_atomic(page, KM_USER0);
++ memset(kaddr + start, 0, end - start);
++ kunmap_atomic(kaddr, KM_USER0);
++ flush_dcache_page(page);
++}
++#endif
++
++
++static int yaffs_vfs_setsize(struct inode *inode, loff_t newsize)
++{
++#ifdef YAFFS_USE_TRUNCATE_SETSIZE
++ truncate_setsize(inode, newsize);
++ return 0;
++#else
++ truncate_inode_pages(&inode->i_data, newsize);
++ return 0;
++#endif
++
++}
++
++
++static int yaffs_vfs_setattr(struct inode *inode, struct iattr *attr)
++{
++#ifdef YAFFS_USE_SETATTR_COPY
++ setattr_copy(inode, attr);
++ return 0;
++#else
++ return inode_setattr(inode, attr);
++#endif
++
++}
++
++static int yaffs_setattr(struct dentry *dentry, struct iattr *attr)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ struct yaffs_dev *dev;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_setattr of object %d",
++ yaffs_inode_to_obj(inode)->obj_id);
++#if 0
++ /* Fail if a requested resize >= 2GB */
++ if (attr->ia_valid & ATTR_SIZE && (attr->ia_size >> 31))
++ error = -EINVAL;
++#endif
++
++ if (error == 0)
++ error = inode_change_ok(inode, attr);
++ if (error == 0) {
++ int result;
++ if (!error) {
++ error = yaffs_vfs_setattr(inode, attr);
++ yaffs_trace(YAFFS_TRACE_OS, "inode_setattr called");
++ if (attr->ia_valid & ATTR_SIZE) {
++ yaffs_vfs_setsize(inode, attr->ia_size);
++ inode->i_blocks = (inode->i_size + 511) >> 9;
++ }
++ }
++ dev = yaffs_inode_to_obj(inode)->my_dev;
++ if (attr->ia_valid & ATTR_SIZE) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "resize to %d(%x)",
++ (int)(attr->ia_size),
++ (int)(attr->ia_size));
++ }
++ yaffs_gross_lock(dev);
++ result = yaffs_set_attribs(yaffs_inode_to_obj(inode), attr);
++ if (result == YAFFS_OK) {
++ error = 0;
++ } else {
++ error = -EPERM;
++ }
++ yaffs_gross_unlock(dev);
++
++ }
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setattr done returning %d", error);
++
++ return error;
++}
++
++static int yaffs_setxattr(struct dentry *dentry, const char *name,
++ const void *value, size_t size, int flags)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ struct yaffs_dev *dev;
++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr of object %d", obj->obj_id);
++
++ if (error == 0) {
++ int result;
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ result = yaffs_set_xattrib(obj, name, value, size, flags);
++ if (result == YAFFS_OK)
++ error = 0;
++ else if (result < 0)
++ error = result;
++ yaffs_gross_unlock(dev);
++
++ }
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_setxattr done returning %d", error);
++
++ return error;
++}
++
++static ssize_t yaffs_getxattr(struct dentry * dentry, const char *name,
++ void *buff, size_t size)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ struct yaffs_dev *dev;
++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_getxattr \"%s\" from object %d",
++ name, obj->obj_id);
++
++ if (error == 0) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ error = yaffs_get_xattrib(obj, name, buff, size);
++ yaffs_gross_unlock(dev);
++
++ }
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_getxattr done returning %d", error);
++
++ return error;
++}
++
++static int yaffs_removexattr(struct dentry *dentry, const char *name)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ struct yaffs_dev *dev;
++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_removexattr of object %d", obj->obj_id);
++
++ if (error == 0) {
++ int result;
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ result = yaffs_remove_xattrib(obj, name);
++ if (result == YAFFS_OK)
++ error = 0;
++ else if (result < 0)
++ error = result;
++ yaffs_gross_unlock(dev);
++
++ }
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_removexattr done returning %d", error);
++
++ return error;
++}
++
++static ssize_t yaffs_listxattr(struct dentry * dentry, char *buff, size_t size)
++{
++ struct inode *inode = dentry->d_inode;
++ int error = 0;
++ struct yaffs_dev *dev;
++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_listxattr of object %d", obj->obj_id);
++
++ if (error == 0) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ error = yaffs_list_xattrib(obj, buff, size);
++ yaffs_gross_unlock(dev);
++
++ }
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_listxattr done returning %d", error);
++
++ return error;
++}
++
++
++static const struct inode_operations yaffs_file_inode_operations = {
++ .setattr = yaffs_setattr,
++ .setxattr = yaffs_setxattr,
++ .getxattr = yaffs_getxattr,
++ .listxattr = yaffs_listxattr,
++ .removexattr = yaffs_removexattr,
++};
++
++
++static int yaffs_readlink(struct dentry *dentry, char __user * buffer,
++ int buflen)
++{
++ unsigned char *alias;
++ int ret;
++
++ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
++
++ yaffs_gross_unlock(dev);
++
++ if (!alias)
++ return -ENOMEM;
++
++ ret = vfs_readlink(dentry, buffer, buflen, alias);
++ kfree(alias);
++ return ret;
++}
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++static void *yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++ void *ret;
++#else
++static int yaffs_follow_link(struct dentry *dentry, struct nameidata *nd)
++{
++ int ret
++#endif
++ unsigned char *alias;
++ int ret_int = 0;
++ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ alias = yaffs_get_symlink_alias(yaffs_dentry_to_obj(dentry));
++ yaffs_gross_unlock(dev);
++
++ if (!alias) {
++ ret_int = -ENOMEM;
++ goto out;
++ }
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++ nd_set_link(nd, alias);
++ ret = alias;
++out:
++ if (ret_int)
++ ret = ERR_PTR(ret_int);
++ return ret;
++#else
++ ret = vfs_follow_link(nd, alias);
++ kfree(alias);
++out:
++ if (ret_int)
++ ret = ret_int;
++ return ret;
++#endif
++}
++
++
++#ifdef YAFFS_HAS_PUT_INODE
++
++/* For now put inode is just for debugging
++ * Put inode is called when the inode **structure** is put.
++ */
++static void yaffs_put_inode(struct inode *inode)
++{
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_put_inode: ino %d, count %d"),
++ (int)inode->i_ino, atomic_read(&inode->i_count);
++
++}
++#endif
++
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++void yaffs_put_link(struct dentry *dentry, struct nameidata *nd, void *alias)
++{
++ kfree(alias);
++}
++#endif
++
++static const struct inode_operations yaffs_symlink_inode_operations = {
++ .readlink = yaffs_readlink,
++ .follow_link = yaffs_follow_link,
++#if (YAFFS_NEW_FOLLOW_LINK == 1)
++ .put_link = yaffs_put_link,
++#endif
++ .setattr = yaffs_setattr,
++ .setxattr = yaffs_setxattr,
++ .getxattr = yaffs_getxattr,
++ .listxattr = yaffs_listxattr,
++ .removexattr = yaffs_removexattr,
++};
++
++#ifdef YAFFS_USE_OWN_IGET
++
++static struct inode *yaffs_iget(struct super_block *sb, unsigned long ino)
++{
++ struct inode *inode;
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_iget for %lu", ino);
++
++ inode = iget_locked(sb, ino);
++ if (!inode)
++ return ERR_PTR(-ENOMEM);
++ if (!(inode->i_state & I_NEW))
++ return inode;
++
++ /* NB This is called as a side effect of other functions, but
++ * we had to release the lock to prevent deadlocks, so
++ * need to lock again.
++ */
++
++ yaffs_gross_lock(dev);
++
++ obj = yaffs_find_by_number(dev, inode->i_ino);
++
++ yaffs_fill_inode_from_obj(inode, obj);
++
++ yaffs_gross_unlock(dev);
++
++ unlock_new_inode(inode);
++ return inode;
++}
++
++#else
++
++static void yaffs_read_inode(struct inode *inode)
++{
++ /* NB This is called as a side effect of other functions, but
++ * we had to release the lock to prevent deadlocks, so
++ * need to lock again.
++ */
++
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev = yaffs_super_to_dev(inode->i_sb);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_read_inode for %d", (int)inode->i_ino);
++
++ if (current != yaffs_dev_to_lc(dev)->readdir_process)
++ yaffs_gross_lock(dev);
++
++ obj = yaffs_find_by_number(dev, inode->i_ino);
++
++ yaffs_fill_inode_from_obj(inode, obj);
++
++ if (current != yaffs_dev_to_lc(dev)->readdir_process)
++ yaffs_gross_unlock(dev);
++}
++
++#endif
++
++
++
++struct inode *yaffs_get_inode(struct super_block *sb, int mode, int dev,
++ struct yaffs_obj *obj)
++{
++ struct inode *inode;
++
++ if (!sb) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_get_inode for NULL super_block!!");
++ return NULL;
++
++ }
++
++ if (!obj) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_get_inode for NULL object!!");
++ return NULL;
++
++ }
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_get_inode for object %d", obj->obj_id);
++
++ inode = Y_IGET(sb, obj->obj_id);
++ if (IS_ERR(inode))
++ return NULL;
++
++ /* NB Side effect: iget calls back to yaffs_read_inode(). */
++ /* iget also increments the inode's i_count */
++ /* NB You can't be holding gross_lock or deadlock will happen! */
++
++ return inode;
++}
++
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
++#define YCRED(x) x
++#else
++#define YCRED(x) (x->cred)
++#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
++ dev_t rdev)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ dev_t rdev)
++#else
++static int yaffs_mknod(struct inode *dir, struct dentry *dentry, int mode,
++ int rdev)
++#endif
++{
++ struct inode *inode;
++
++ struct yaffs_obj *obj = NULL;
++ struct yaffs_dev *dev;
++
++ struct yaffs_obj *parent = yaffs_inode_to_obj(dir);
++
++ int error = -ENOSPC;
++ uid_t uid = YCRED_FSUID();
++ gid_t gid =
++ (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID();
++
++ if ((dir->i_mode & S_ISGID) && S_ISDIR(mode))
++ mode |= S_ISGID;
++
++ if (parent) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_mknod: parent object %d type %d",
++ parent->obj_id, parent->variant_type);
++ } else {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_mknod: could not get parent object");
++ return -EPERM;
++ }
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_mknod: making oject for %s, mode %x dev %x",
++ dentry->d_name.name, mode, rdev);
++
++ dev = parent->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ switch (mode & S_IFMT) {
++ default:
++ /* Special (socket, fifo, device...) */
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making special");
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ obj =
++ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
++ gid, old_encode_dev(rdev));
++#else
++ obj =
++ yaffs_create_special(parent, dentry->d_name.name, mode, uid,
++ gid, rdev);
++#endif
++ break;
++ case S_IFREG: /* file */
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making file");
++ obj = yaffs_create_file(parent, dentry->d_name.name, mode, uid,
++ gid);
++ break;
++ case S_IFDIR: /* directory */
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making directory");
++ obj = yaffs_create_dir(parent, dentry->d_name.name, mode,
++ uid, gid);
++ break;
++ case S_IFLNK: /* symlink */
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod: making symlink");
++ obj = NULL; /* Do we ever get here? */
++ break;
++ }
++
++ /* Can not call yaffs_get_inode() with gross lock held */
++ yaffs_gross_unlock(dev);
++
++ if (obj) {
++ inode = yaffs_get_inode(dir->i_sb, mode, rdev, obj);
++ d_instantiate(dentry, inode);
++ update_dir_time(dir);
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_mknod created object %d count = %d",
++ obj->obj_id, atomic_read(&inode->i_count));
++ error = 0;
++ yaffs_fill_inode_from_obj(dir, parent);
++ } else {
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mknod failed making object");
++ error = -ENOMEM;
++ }
++
++ return error;
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
++#else
++static int yaffs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++#endif
++{
++ int ret_val;
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_mkdir");
++ ret_val = yaffs_mknod(dir, dentry, mode | S_IFDIR, 0);
++ return ret_val;
++}
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
++ bool dummy)
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
++ struct nameidata *n)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode,
++ struct nameidata *n)
++#else
++static int yaffs_create(struct inode *dir, struct dentry *dentry, int mode)
++#endif
++{
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_create");
++ return yaffs_mknod(dir, dentry, mode | S_IFREG, 0);
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++ unsigned int dummy)
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry,
++ struct nameidata *n)
++#else
++static struct dentry *yaffs_lookup(struct inode *dir, struct dentry *dentry)
++#endif
++{
++ struct yaffs_obj *obj;
++ struct inode *inode = NULL; /* NCB 2.5/2.6 needs NULL here */
++
++ struct yaffs_dev *dev = yaffs_inode_to_obj(dir)->my_dev;
++
++ if (current != yaffs_dev_to_lc(dev)->readdir_process)
++ yaffs_gross_lock(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup for %d:%s",
++ yaffs_inode_to_obj(dir)->obj_id, dentry->d_name.name);
++
++ obj = yaffs_find_by_name(yaffs_inode_to_obj(dir), dentry->d_name.name);
++
++ obj = yaffs_get_equivalent_obj(obj); /* in case it was a hardlink */
++
++ /* Can't hold gross lock when calling yaffs_get_inode() */
++ if (current != yaffs_dev_to_lc(dev)->readdir_process)
++ yaffs_gross_unlock(dev);
++
++ if (obj) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_lookup found %d", obj->obj_id);
++
++ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++ } else {
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_lookup not found");
++
++ }
++
++/* added NCB for 2.5/6 compatability - forces add even if inode is
++ * NULL which creates dentry hash */
++ d_add(dentry, inode);
++
++ return NULL;
++}
++
++/*
++ * Create a link...
++ */
++static int yaffs_link(struct dentry *old_dentry, struct inode *dir,
++ struct dentry *dentry)
++{
++ struct inode *inode = old_dentry->d_inode;
++ struct yaffs_obj *obj = NULL;
++ struct yaffs_obj *link = NULL;
++ struct yaffs_dev *dev;
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_link");
++
++ obj = yaffs_inode_to_obj(inode);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ if (!S_ISDIR(inode->i_mode)) /* Don't link directories */
++ link =
++ yaffs_link_obj(yaffs_inode_to_obj(dir), dentry->d_name.name,
++ obj);
++
++ if (link) {
++ set_nlink(old_dentry->d_inode, yaffs_get_obj_link_count(obj));
++ d_instantiate(dentry, old_dentry->d_inode);
++ atomic_inc(&old_dentry->d_inode->i_count);
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_link link count %d i_count %d",
++ old_dentry->d_inode->i_nlink,
++ atomic_read(&old_dentry->d_inode->i_count));
++ }
++
++ yaffs_gross_unlock(dev);
++
++ if (link) {
++ update_dir_time(dir);
++ return 0;
++ }
++
++ return -EPERM;
++}
++
++static int yaffs_symlink(struct inode *dir, struct dentry *dentry,
++ const char *symname)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++ uid_t uid = YCRED_FSUID();
++ gid_t gid =
++ (dir->i_mode & S_ISGID) ? i_gid_read(dir) : YCRED_FSGID();
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_symlink");
++
++ if (strnlen(dentry->d_name.name, YAFFS_MAX_NAME_LENGTH + 1) >
++ YAFFS_MAX_NAME_LENGTH)
++ return -ENAMETOOLONG;
++
++ if (strnlen(symname, YAFFS_MAX_ALIAS_LENGTH + 1) >
++ YAFFS_MAX_ALIAS_LENGTH)
++ return -ENAMETOOLONG;
++
++ dev = yaffs_inode_to_obj(dir)->my_dev;
++ yaffs_gross_lock(dev);
++ obj = yaffs_create_symlink(yaffs_inode_to_obj(dir), dentry->d_name.name,
++ S_IFLNK | S_IRWXUGO, uid, gid, symname);
++ yaffs_gross_unlock(dev);
++
++ if (obj) {
++ struct inode *inode;
++
++ inode = yaffs_get_inode(dir->i_sb, obj->yst_mode, 0, obj);
++ d_instantiate(dentry, inode);
++ update_dir_time(dir);
++ yaffs_trace(YAFFS_TRACE_OS, "symlink created OK");
++ return 0;
++ } else {
++ yaffs_trace(YAFFS_TRACE_OS, "symlink not created");
++ }
++
++ return -ENOMEM;
++}
++
++/*
++ * The VFS layer already does all the dentry stuff for rename.
++ *
++ * NB: POSIX says you can rename an object over an old object of the same name
++ */
++static int yaffs_rename(struct inode *old_dir, struct dentry *old_dentry,
++ struct inode *new_dir, struct dentry *new_dentry)
++{
++ struct yaffs_dev *dev;
++ int ret_val = YAFFS_FAIL;
++ struct yaffs_obj *target;
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_rename");
++ dev = yaffs_inode_to_obj(old_dir)->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ /* Check if the target is an existing directory that is not empty. */
++ target = yaffs_find_by_name(yaffs_inode_to_obj(new_dir),
++ new_dentry->d_name.name);
++
++ if (target && target->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY &&
++ !list_empty(&target->variant.dir_variant.children)) {
++
++ yaffs_trace(YAFFS_TRACE_OS, "target is non-empty dir");
++
++ ret_val = YAFFS_FAIL;
++ } else {
++ /* Now does unlinking internally using shadowing mechanism */
++ yaffs_trace(YAFFS_TRACE_OS, "calling yaffs_rename_obj");
++
++ ret_val = yaffs_rename_obj(yaffs_inode_to_obj(old_dir),
++ old_dentry->d_name.name,
++ yaffs_inode_to_obj(new_dir),
++ new_dentry->d_name.name);
++ }
++ yaffs_gross_unlock(dev);
++
++ if (ret_val == YAFFS_OK) {
++ if (target)
++ inode_dec_link_count(new_dentry->d_inode);
++
++ update_dir_time(old_dir);
++ if (old_dir != new_dir)
++ update_dir_time(new_dir);
++ return 0;
++ } else {
++ return -ENOTEMPTY;
++ }
++}
++
++
++
++
++static int yaffs_unlink(struct inode *dir, struct dentry *dentry)
++{
++ int ret_val;
++
++ struct yaffs_dev *dev;
++ struct yaffs_obj *obj;
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_unlink %d:%s",
++ (int)(dir->i_ino), dentry->d_name.name);
++ obj = yaffs_inode_to_obj(dir);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ ret_val = yaffs_unlinker(obj, dentry->d_name.name);
++
++ if (ret_val == YAFFS_OK) {
++ inode_dec_link_count(dentry->d_inode);
++ dir->i_version++;
++ yaffs_gross_unlock(dev);
++ update_dir_time(dir);
++ return 0;
++ }
++ yaffs_gross_unlock(dev);
++ return -ENOTEMPTY;
++}
++
++
++
++static const struct inode_operations yaffs_dir_inode_operations = {
++ .create = yaffs_create,
++ .lookup = yaffs_lookup,
++ .link = yaffs_link,
++ .unlink = yaffs_unlink,
++ .symlink = yaffs_symlink,
++ .mkdir = yaffs_mkdir,
++ .rmdir = yaffs_unlink,
++ .mknod = yaffs_mknod,
++ .rename = yaffs_rename,
++ .setattr = yaffs_setattr,
++ .setxattr = yaffs_setxattr,
++ .getxattr = yaffs_getxattr,
++ .listxattr = yaffs_listxattr,
++ .removexattr = yaffs_removexattr,
++};
++
++/*-----------------------------------------------------------------*/
++/* Directory search context allows us to unlock access to yaffs during
++ * filldir without causing problems with the directory being modified.
++ * This is similar to the tried and tested mechanism used in yaffs direct.
++ *
++ * A search context iterates along a doubly linked list of siblings in the
++ * directory. If the iterating object is deleted then this would corrupt
++ * the list iteration, likely causing a crash. The search context avoids
++ * this by using the remove_obj_fn to move the search context to the
++ * next object before the object is deleted.
++ *
++ * Many readdirs (and thus seach conexts) may be alive simulateously so
++ * each struct yaffs_dev has a list of these.
++ *
++ * A seach context lives for the duration of a readdir.
++ *
++ * All these functions must be called while yaffs is locked.
++ */
++
++struct yaffs_search_context {
++ struct yaffs_dev *dev;
++ struct yaffs_obj *dir_obj;
++ struct yaffs_obj *next_return;
++ struct list_head others;
++};
++
++/*
++ * yaffs_new_search() creates a new search context, initialises it and
++ * adds it to the device's search context list.
++ *
++ * Called at start of readdir.
++ */
++static struct yaffs_search_context *yaffs_new_search(struct yaffs_obj *dir)
++{
++ struct yaffs_dev *dev = dir->my_dev;
++ struct yaffs_search_context *sc =
++ kmalloc(sizeof(struct yaffs_search_context), GFP_NOFS);
++ if (sc) {
++ sc->dir_obj = dir;
++ sc->dev = dev;
++ if (list_empty(&sc->dir_obj->variant.dir_variant.children))
++ sc->next_return = NULL;
++ else
++ sc->next_return =
++ list_entry(dir->variant.dir_variant.children.next,
++ struct yaffs_obj, siblings);
++ INIT_LIST_HEAD(&sc->others);
++ list_add(&sc->others, &(yaffs_dev_to_lc(dev)->search_contexts));
++ }
++ return sc;
++}
++
++/*
++ * yaffs_search_end() disposes of a search context and cleans up.
++ */
++static void yaffs_search_end(struct yaffs_search_context *sc)
++{
++ if (sc) {
++ list_del(&sc->others);
++ kfree(sc);
++ }
++}
++
++/*
++ * yaffs_search_advance() moves a search context to the next object.
++ * Called when the search iterates or when an object removal causes
++ * the search context to be moved to the next object.
++ */
++static void yaffs_search_advance(struct yaffs_search_context *sc)
++{
++ if (!sc)
++ return;
++
++ if (sc->next_return == NULL ||
++ list_empty(&sc->dir_obj->variant.dir_variant.children))
++ sc->next_return = NULL;
++ else {
++ struct list_head *next = sc->next_return->siblings.next;
++
++ if (next == &sc->dir_obj->variant.dir_variant.children)
++ sc->next_return = NULL; /* end of list */
++ else
++ sc->next_return =
++ list_entry(next, struct yaffs_obj, siblings);
++ }
++}
++
++/*
++ * yaffs_remove_obj_callback() is called when an object is unlinked.
++ * We check open search contexts and advance any which are currently
++ * on the object being iterated.
++ */
++static void yaffs_remove_obj_callback(struct yaffs_obj *obj)
++{
++
++ struct list_head *i;
++ struct yaffs_search_context *sc;
++ struct list_head *search_contexts =
++ &(yaffs_dev_to_lc(obj->my_dev)->search_contexts);
++
++ /* Iterate through the directory search contexts.
++ * If any are currently on the object being removed, then advance
++ * the search context to the next object to prevent a hanging pointer.
++ */
++ list_for_each(i, search_contexts) {
++ sc = list_entry(i, struct yaffs_search_context, others);
++ if (sc->next_return == obj)
++ yaffs_search_advance(sc);
++ }
++
++}
++
++
++/*-----------------------------------------------------------------*/
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
++static int yaffs_readdir(struct file *file, struct dir_context *ctx)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++ struct yaffs_search_context *sc;
++ struct inode *inode = file->f_dentry->d_inode;
++ unsigned long offset, curoffs;
++ struct yaffs_obj *l;
++ int ret_val = 0;
++
++ char name[YAFFS_MAX_NAME_LENGTH + 1];
++
++ obj = yaffs_dentry_to_obj(file->f_dentry);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ yaffs_dev_to_lc(dev)->readdir_process = current;
++
++ offset = ctx->pos;
++
++ sc = yaffs_new_search(obj);
++ if (!sc) {
++ ret_val = -ENOMEM;
++ goto out;
++ }
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: starting at %d", (int)offset);
++
++ if (offset == 0) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: entry . ino %d",
++ (int)inode->i_ino);
++ yaffs_gross_unlock(dev);
++ if (!dir_emit_dot(file, ctx)) {
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++ yaffs_gross_lock(dev);
++ offset++;
++ ctx->pos++;
++ }
++ if (offset == 1) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: entry .. ino %d",
++ (int)file->f_dentry->d_parent->d_inode->i_ino);
++ yaffs_gross_unlock(dev);
++ if (!dir_emit_dotdot(file, ctx)) {
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++ yaffs_gross_lock(dev);
++ offset++;
++ ctx->pos++;
++ }
++
++ curoffs = 1;
++
++ /* If the directory has changed since the open or last call to
++ readdir, rewind to after the 2 canned entries. */
++ if (file->f_version != inode->i_version) {
++ offset = 2;
++ ctx->pos = offset;
++ file->f_version = inode->i_version;
++ }
++
++ while (sc->next_return) {
++ curoffs++;
++ l = sc->next_return;
++ if (curoffs >= offset) {
++ int this_inode = yaffs_get_obj_inode(l);
++ int this_type = yaffs_get_obj_type(l);
++
++ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: %s inode %d",
++ name, yaffs_get_obj_inode(l));
++
++ yaffs_gross_unlock(dev);
++
++ if (!dir_emit(ctx, name, strlen(name),
++ this_inode, this_type) < 0) {
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++
++ yaffs_gross_lock(dev);
++
++ offset++;
++ ctx->pos++;
++ }
++ yaffs_search_advance(sc);
++ }
++
++out:
++ yaffs_search_end(sc);
++ yaffs_dev_to_lc(dev)->readdir_process = NULL;
++ yaffs_gross_unlock(dev);
++
++ return ret_val;
++}
++#else
++static int yaffs_readdir(struct file *f, void *dirent, filldir_t filldir)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++ struct yaffs_search_context *sc;
++ struct inode *inode = f->f_dentry->d_inode;
++ unsigned long offset, curoffs;
++ struct yaffs_obj *l;
++ int ret_val = 0;
++
++ char name[YAFFS_MAX_NAME_LENGTH + 1];
++
++ obj = yaffs_dentry_to_obj(f->f_dentry);
++ dev = obj->my_dev;
++
++ yaffs_gross_lock(dev);
++
++ yaffs_dev_to_lc(dev)->readdir_process = current;
++
++ offset = f->f_pos;
++
++ sc = yaffs_new_search(obj);
++ if (!sc) {
++ ret_val = -ENOMEM;
++ goto out;
++ }
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: starting at %d", (int)offset);
++
++ if (offset == 0) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: entry . ino %d",
++ (int)inode->i_ino);
++ yaffs_gross_unlock(dev);
++ if (filldir(dirent, ".", 1, offset, inode->i_ino, DT_DIR) < 0) {
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++ yaffs_gross_lock(dev);
++ offset++;
++ f->f_pos++;
++ }
++ if (offset == 1) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: entry .. ino %d",
++ (int)f->f_dentry->d_parent->d_inode->i_ino);
++ yaffs_gross_unlock(dev);
++ if (filldir(dirent, "..", 2, offset,
++ f->f_dentry->d_parent->d_inode->i_ino,
++ DT_DIR) < 0) {
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++ yaffs_gross_lock(dev);
++ offset++;
++ f->f_pos++;
++ }
++
++ curoffs = 1;
++
++ /* If the directory has changed since the open or last call to
++ readdir, rewind to after the 2 canned entries. */
++ if (f->f_version != inode->i_version) {
++ offset = 2;
++ f->f_pos = offset;
++ f->f_version = inode->i_version;
++ }
++
++ while (sc->next_return) {
++ curoffs++;
++ l = sc->next_return;
++ if (curoffs >= offset) {
++ int this_inode = yaffs_get_obj_inode(l);
++ int this_type = yaffs_get_obj_type(l);
++
++ yaffs_get_obj_name(l, name, YAFFS_MAX_NAME_LENGTH + 1);
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_readdir: %s inode %d",
++ name, yaffs_get_obj_inode(l));
++
++ yaffs_gross_unlock(dev);
++
++ if (filldir(dirent,
++ name,
++ strlen(name),
++ offset, this_inode, this_type) < 0) {
++ yaffs_gross_lock(dev);
++ goto out;
++ }
++
++ yaffs_gross_lock(dev);
++
++ offset++;
++ f->f_pos++;
++ }
++ yaffs_search_advance(sc);
++ }
++
++out:
++ yaffs_search_end(sc);
++ yaffs_dev_to_lc(dev)->readdir_process = NULL;
++ yaffs_gross_unlock(dev);
++
++ return ret_val;
++}
++#endif
++
++static const struct file_operations yaffs_dir_operations = {
++ .read = generic_read_dir,
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
++ .iterate = yaffs_readdir,
++#else
++ .readdir = yaffs_readdir,
++#endif
++ .fsync = yaffs_sync_object,
++ .llseek = generic_file_llseek,
++};
++
++static void yaffs_fill_inode_from_obj(struct inode *inode,
++ struct yaffs_obj *obj)
++{
++ if (inode && obj) {
++
++ /* Check mode against the variant type and attempt to repair if broken. */
++ u32 mode = obj->yst_mode;
++ switch (obj->variant_type) {
++ case YAFFS_OBJECT_TYPE_FILE:
++ if (!S_ISREG(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFREG;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ if (!S_ISLNK(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFLNK;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ if (!S_ISDIR(mode)) {
++ obj->yst_mode &= ~S_IFMT;
++ obj->yst_mode |= S_IFDIR;
++ }
++
++ break;
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ default:
++ /* TODO? */
++ break;
++ }
++
++ inode->i_flags |= S_NOATIME;
++
++ inode->i_ino = obj->obj_id;
++ inode->i_mode = obj->yst_mode;
++ i_uid_write(inode, obj->yst_uid);
++ i_gid_write(inode, obj->yst_gid);
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++ inode->i_blksize = inode->i_sb->s_blocksize;
++#endif
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++
++ inode->i_rdev = old_decode_dev(obj->yst_rdev);
++ inode->i_atime.tv_sec = (time_t) (obj->yst_atime);
++ inode->i_atime.tv_nsec = 0;
++ inode->i_mtime.tv_sec = (time_t) obj->yst_mtime;
++ inode->i_mtime.tv_nsec = 0;
++ inode->i_ctime.tv_sec = (time_t) obj->yst_ctime;
++ inode->i_ctime.tv_nsec = 0;
++#else
++ inode->i_rdev = obj->yst_rdev;
++ inode->i_atime = obj->yst_atime;
++ inode->i_mtime = obj->yst_mtime;
++ inode->i_ctime = obj->yst_ctime;
++#endif
++ inode->i_size = yaffs_get_obj_length(obj);
++ inode->i_blocks = (inode->i_size + 511) >> 9;
++
++ set_nlink(inode, yaffs_get_obj_link_count(obj));
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_fill_inode mode %x uid %d gid %d size %lld count %d",
++ inode->i_mode, i_uid_read(inode), i_gid_read(inode),
++ inode->i_size, atomic_read(&inode->i_count));
++
++ switch (obj->yst_mode & S_IFMT) {
++ default: /* fifo, device or socket */
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ init_special_inode(inode, obj->yst_mode,
++ old_decode_dev(obj->yst_rdev));
++#else
++ init_special_inode(inode, obj->yst_mode,
++ (dev_t) (obj->yst_rdev));
++#endif
++ break;
++ case S_IFREG: /* file */
++ inode->i_op = &yaffs_file_inode_operations;
++ inode->i_fop = &yaffs_file_operations;
++ inode->i_mapping->a_ops =
++ &yaffs_file_address_operations;
++ break;
++ case S_IFDIR: /* directory */
++ inode->i_op = &yaffs_dir_inode_operations;
++ inode->i_fop = &yaffs_dir_operations;
++ break;
++ case S_IFLNK: /* symlink */
++ inode->i_op = &yaffs_symlink_inode_operations;
++ break;
++ }
++
++ yaffs_inode_to_obj_lv(inode) = obj;
++
++ obj->my_inode = inode;
++
++ } else {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_fill_inode invalid parameters");
++ }
++
++}
++
++
++
++/*
++ * yaffs background thread functions .
++ * yaffs_bg_thread_fn() the thread function
++ * yaffs_bg_start() launches the background thread.
++ * yaffs_bg_stop() cleans up the background thread.
++ *
++ * NB:
++ * The thread should only run after the yaffs is initialised
++ * The thread should be stopped before yaffs is unmounted.
++ * The thread should not do any writing while the fs is in read only.
++ */
++
++static unsigned yaffs_bg_gc_urgency(struct yaffs_dev *dev)
++{
++ unsigned erased_chunks =
++ dev->n_erased_blocks * dev->param.chunks_per_block;
++ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
++ unsigned scattered = 0; /* Free chunks not in an erased block */
++
++ if (erased_chunks < dev->n_free_chunks)
++ scattered = (dev->n_free_chunks - erased_chunks);
++
++ if (!context->bg_running)
++ return 0;
++ else if (scattered < (dev->param.chunks_per_block * 2))
++ return 0;
++ else if (erased_chunks > dev->n_free_chunks / 2)
++ return 0;
++ else if (erased_chunks > dev->n_free_chunks / 4)
++ return 1;
++ else
++ return 2;
++}
++
++#ifdef YAFFS_COMPILE_BACKGROUND
++
++void yaffs_background_waker(unsigned long data)
++{
++ wake_up_process((struct task_struct *)data);
++}
++
++static int yaffs_bg_thread_fn(void *data)
++{
++ struct yaffs_dev *dev = (struct yaffs_dev *)data;
++ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
++ unsigned long now = jiffies;
++ unsigned long next_dir_update = now;
++ unsigned long next_gc = now;
++ unsigned long expires;
++ unsigned int urgency;
++
++ int gc_result;
++ struct timer_list timer;
++
++ yaffs_trace(YAFFS_TRACE_BACKGROUND,
++ "yaffs_background starting for dev %p", (void *)dev);
++
++#ifdef YAFFS_COMPILE_FREEZER
++ set_freezable();
++#endif
++ while (context->bg_running) {
++ yaffs_trace(YAFFS_TRACE_BACKGROUND, "yaffs_background");
++
++ if (kthread_should_stop())
++ break;
++
++#ifdef YAFFS_COMPILE_FREEZER
++ if (try_to_freeze())
++ continue;
++#endif
++ yaffs_gross_lock(dev);
++
++ now = jiffies;
++
++ if (time_after(now, next_dir_update) && yaffs_bg_enable) {
++ yaffs_update_dirty_dirs(dev);
++ next_dir_update = now + HZ;
++ }
++
++ if (time_after(now, next_gc) && yaffs_bg_enable) {
++ if (!dev->is_checkpointed) {
++ urgency = yaffs_bg_gc_urgency(dev);
++ gc_result = yaffs_bg_gc(dev, urgency);
++ if (urgency > 1)
++ next_gc = now + HZ / 20 + 1;
++ else if (urgency > 0)
++ next_gc = now + HZ / 10 + 1;
++ else
++ next_gc = now + HZ * 2;
++ } else {
++ /*
++ * gc not running so set to next_dir_update
++ * to cut down on wake ups
++ */
++ next_gc = next_dir_update;
++ }
++ }
++ yaffs_gross_unlock(dev);
++#if 1
++ expires = next_dir_update;
++ if (time_before(next_gc, expires))
++ expires = next_gc;
++ if (time_before(expires, now))
++ expires = now + HZ;
++
++ Y_INIT_TIMER(&timer);
++ timer.expires = expires + 1;
++ timer.data = (unsigned long)current;
++ timer.function = yaffs_background_waker;
++
++ set_current_state(TASK_INTERRUPTIBLE);
++ add_timer(&timer);
++ schedule();
++ del_timer_sync(&timer);
++#else
++ msleep(10);
++#endif
++ }
++
++ return 0;
++}
++
++static int yaffs_bg_start(struct yaffs_dev *dev)
++{
++ int retval = 0;
++ struct yaffs_linux_context *context = yaffs_dev_to_lc(dev);
++
++ if (dev->read_only)
++ return -1;
++
++ context->bg_running = 1;
++
++ context->bg_thread = kthread_run(yaffs_bg_thread_fn,
++ (void *)dev, "yaffs-bg-%d",
++ context->mount_id);
++
++ if (IS_ERR(context->bg_thread)) {
++ retval = PTR_ERR(context->bg_thread);
++ context->bg_thread = NULL;
++ context->bg_running = 0;
++ }
++ return retval;
++}
++
++static void yaffs_bg_stop(struct yaffs_dev *dev)
++{
++ struct yaffs_linux_context *ctxt = yaffs_dev_to_lc(dev);
++
++ ctxt->bg_running = 0;
++
++ if (ctxt->bg_thread) {
++ kthread_stop(ctxt->bg_thread);
++ ctxt->bg_thread = NULL;
++ }
++}
++#else
++static int yaffs_bg_thread_fn(void *data)
++{
++ return 0;
++}
++
++static int yaffs_bg_start(struct yaffs_dev *dev)
++{
++ return 0;
++}
++
++static void yaffs_bg_stop(struct yaffs_dev *dev)
++{
++}
++#endif
++
++
++static void yaffs_flush_inodes(struct super_block *sb)
++{
++ struct inode *iptr;
++ struct yaffs_obj *obj;
++
++ list_for_each_entry(iptr, &sb->s_inodes, i_sb_list) {
++ obj = yaffs_inode_to_obj(iptr);
++ if (obj) {
++ yaffs_trace(YAFFS_TRACE_OS,
++ "flushing obj %d",
++ obj->obj_id);
++ yaffs_flush_file(obj, 1, 0);
++ }
++ }
++}
++
++static void yaffs_flush_super(struct super_block *sb, int do_checkpoint)
++{
++ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++ if (!dev)
++ return;
++
++ yaffs_flush_inodes(sb);
++ yaffs_update_dirty_dirs(dev);
++ yaffs_flush_whole_cache(dev);
++ if (do_checkpoint)
++ yaffs_checkpoint_save(dev);
++}
++
++static LIST_HEAD(yaffs_context_list);
++struct mutex yaffs_context_lock;
++
++static void yaffs_put_super(struct super_block *sb)
++{
++ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
++ "yaffs_put_super");
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
++ "Shutting down yaffs background thread");
++ yaffs_bg_stop(dev);
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_BACKGROUND,
++ "yaffs background thread shut down");
++
++ yaffs_gross_lock(dev);
++
++ yaffs_flush_super(sb, 1);
++
++ yaffs_deinitialise(dev);
++
++ yaffs_gross_unlock(dev);
++
++ mutex_lock(&yaffs_context_lock);
++ list_del_init(&(yaffs_dev_to_lc(dev)->context_list));
++ mutex_unlock(&yaffs_context_lock);
++
++ if (yaffs_dev_to_lc(dev)->spare_buffer) {
++ kfree(yaffs_dev_to_lc(dev)->spare_buffer);
++ yaffs_dev_to_lc(dev)->spare_buffer = NULL;
++ }
++
++ kfree(dev);
++
++ yaffs_put_mtd_device(mtd);
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_ALWAYS,
++ "yaffs_put_super done");
++}
++
++
++static unsigned yaffs_gc_control_callback(struct yaffs_dev *dev)
++{
++ return yaffs_gc_control;
++}
++
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++
++static struct inode *yaffs2_nfs_get_inode(struct super_block *sb, uint64_t ino,
++ uint32_t generation)
++{
++ return Y_IGET(sb, ino);
++}
++
++static struct dentry *yaffs2_fh_to_dentry(struct super_block *sb,
++ struct fid *fid, int fh_len,
++ int fh_type)
++{
++ return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
++ yaffs2_nfs_get_inode);
++}
++
++static struct dentry *yaffs2_fh_to_parent(struct super_block *sb,
++ struct fid *fid, int fh_len,
++ int fh_type)
++{
++ return generic_fh_to_parent(sb, fid, fh_len, fh_type,
++ yaffs2_nfs_get_inode);
++}
++
++struct dentry *yaffs2_get_parent(struct dentry *dentry)
++{
++
++ struct super_block *sb = dentry->d_inode->i_sb;
++ struct dentry *parent = ERR_PTR(-ENOENT);
++ struct inode *inode;
++ unsigned long parent_ino;
++ struct yaffs_obj *d_obj;
++ struct yaffs_obj *parent_obj;
++
++ d_obj = yaffs_inode_to_obj(dentry->d_inode);
++
++ if (d_obj) {
++ parent_obj = d_obj->parent;
++ if (parent_obj) {
++ parent_ino = yaffs_get_obj_inode(parent_obj);
++ inode = Y_IGET(sb, parent_ino);
++
++ if (IS_ERR(inode)) {
++ parent = ERR_CAST(inode);
++ } else {
++ parent = d_obtain_alias(inode);
++ if (!IS_ERR(parent)) {
++ parent = ERR_PTR(-ENOMEM);
++ iput(inode);
++ }
++ }
++ }
++ }
++
++ return parent;
++}
++
++/* Just declare a zero structure as a NULL value implies
++ * using the default functions of exportfs.
++ */
++
++static struct export_operations yaffs_export_ops = {
++ .fh_to_dentry = yaffs2_fh_to_dentry,
++ .fh_to_parent = yaffs2_fh_to_parent,
++ .get_parent = yaffs2_get_parent,
++};
++
++#endif
++
++static void yaffs_unstitch_obj(struct inode *inode, struct yaffs_obj *obj)
++{
++ /* Clear the association between the inode and
++ * the struct yaffs_obj.
++ */
++ obj->my_inode = NULL;
++ yaffs_inode_to_obj_lv(inode) = NULL;
++
++ /* If the object freeing was deferred, then the real
++ * free happens now.
++ * This should fix the inode inconsistency problem.
++ */
++ yaffs_handle_defered_free(obj);
++}
++
++#ifdef YAFFS_HAS_EVICT_INODE
++/* yaffs_evict_inode combines into one operation what was previously done in
++ * yaffs_clear_inode() and yaffs_delete_inode()
++ *
++ */
++static void yaffs_evict_inode(struct inode *inode)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++ int deleteme = 0;
++
++ obj = yaffs_inode_to_obj(inode);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_evict_inode: ino %d, count %d %s",
++ (int)inode->i_ino, atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object");
++
++ if (!inode->i_nlink && !is_bad_inode(inode))
++ deleteme = 1;
++ truncate_inode_pages(&inode->i_data, 0);
++ Y_CLEAR_INODE(inode);
++
++ if (deleteme && obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_del_obj(obj);
++ yaffs_gross_unlock(dev);
++ }
++ if (obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_unstitch_obj(inode, obj);
++ yaffs_gross_unlock(dev);
++ }
++}
++#else
++
++/* clear is called to tell the fs to release any per-inode data it holds.
++ * The object might still exist on disk and is just being thrown out of the cache
++ * or else the object has actually been deleted and we're being called via
++ * the chain
++ * yaffs_delete_inode() -> clear_inode()->yaffs_clear_inode()
++ */
++
++static void yaffs_clear_inode(struct inode *inode)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_dev *dev;
++
++ obj = yaffs_inode_to_obj(inode);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_clear_inode: ino %d, count %d %s",
++ (int)inode->i_ino, atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object");
++
++ if (obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_unstitch_obj(inode, obj);
++ yaffs_gross_unlock(dev);
++ }
++
++}
++
++/* delete is called when the link count is zero and the inode
++ * is put (ie. nobody wants to know about it anymore, time to
++ * delete the file).
++ * NB Must call clear_inode()
++ */
++static void yaffs_delete_inode(struct inode *inode)
++{
++ struct yaffs_obj *obj = yaffs_inode_to_obj(inode);
++ struct yaffs_dev *dev;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_delete_inode: ino %d, count %d %s",
++ (int)inode->i_ino, atomic_read(&inode->i_count),
++ obj ? "object exists" : "null object");
++
++ if (obj) {
++ dev = obj->my_dev;
++ yaffs_gross_lock(dev);
++ yaffs_del_obj(obj);
++ yaffs_gross_unlock(dev);
++ }
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 13))
++ truncate_inode_pages(&inode->i_data, 0);
++#endif
++ clear_inode(inode);
++}
++#endif
++
++
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++ struct yaffs_dev *dev = yaffs_dentry_to_obj(dentry)->my_dev;
++ struct super_block *sb = dentry->d_sb;
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_statfs(struct super_block *sb, struct kstatfs *buf)
++{
++ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++#else
++static int yaffs_statfs(struct super_block *sb, struct statfs *buf)
++{
++ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++#endif
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_statfs");
++
++ yaffs_gross_lock(dev);
++
++ buf->f_type = YAFFS_MAGIC;
++ buf->f_bsize = sb->s_blocksize;
++ buf->f_namelen = 255;
++
++ if (dev->data_bytes_per_chunk & (dev->data_bytes_per_chunk - 1)) {
++ /* Do this if chunk size is not a power of 2 */
++
++ uint64_t bytes_in_dev;
++ uint64_t bytes_free;
++
++ bytes_in_dev =
++ ((uint64_t)
++ ((dev->param.end_block - dev->param.start_block +
++ 1))) * ((uint64_t) (dev->param.chunks_per_block *
++ dev->data_bytes_per_chunk));
++
++ do_div(bytes_in_dev, sb->s_blocksize); /* bytes_in_dev becomes the number of blocks */
++ buf->f_blocks = bytes_in_dev;
++
++ bytes_free = ((uint64_t) (yaffs_get_n_free_chunks(dev))) *
++ ((uint64_t) (dev->data_bytes_per_chunk));
++
++ do_div(bytes_free, sb->s_blocksize);
++
++ buf->f_bfree = bytes_free;
++
++ } else if (sb->s_blocksize > dev->data_bytes_per_chunk) {
++
++ buf->f_blocks =
++ (dev->param.end_block - dev->param.start_block + 1) *
++ dev->param.chunks_per_block /
++ (sb->s_blocksize / dev->data_bytes_per_chunk);
++ buf->f_bfree =
++ yaffs_get_n_free_chunks(dev) /
++ (sb->s_blocksize / dev->data_bytes_per_chunk);
++ } else {
++ buf->f_blocks =
++ (dev->param.end_block - dev->param.start_block + 1) *
++ dev->param.chunks_per_block *
++ (dev->data_bytes_per_chunk / sb->s_blocksize);
++
++ buf->f_bfree =
++ yaffs_get_n_free_chunks(dev) *
++ (dev->data_bytes_per_chunk / sb->s_blocksize);
++ }
++
++ buf->f_files = 0;
++ buf->f_ffree = 0;
++ buf->f_bavail = buf->f_bfree;
++
++ yaffs_gross_unlock(dev);
++ return 0;
++}
++
++
++
++static int yaffs_do_sync_fs(struct super_block *sb, int request_checkpoint)
++{
++
++ struct yaffs_dev *dev = yaffs_super_to_dev(sb);
++ unsigned int oneshot_checkpoint = (yaffs_auto_checkpoint & 4);
++ unsigned gc_urgent = yaffs_bg_gc_urgency(dev);
++ int do_checkpoint;
++ int dirty = yaffs_check_super_dirty(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
++ "yaffs_do_sync_fs: gc-urgency %d %s %s%s",
++ gc_urgent,
++ dirty ? "dirty" : "clean",
++ request_checkpoint ? "checkpoint requested" : "no checkpoint",
++ oneshot_checkpoint ? " one-shot" : "");
++
++ yaffs_gross_lock(dev);
++ do_checkpoint = ((request_checkpoint && !gc_urgent) ||
++ oneshot_checkpoint) && !dev->is_checkpointed;
++
++ if (dirty || do_checkpoint) {
++ yaffs_flush_super(sb, !dev->is_checkpointed && do_checkpoint);
++ yaffs_clear_super_dirty(dev);
++ if (oneshot_checkpoint)
++ yaffs_auto_checkpoint &= ~4;
++ }
++ yaffs_gross_unlock(dev);
++
++ return 0;
++}
++
++
++#ifdef YAFFS_HAS_WRITE_SUPER
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static void yaffs_write_super(struct super_block *sb)
++#else
++static int yaffs_write_super(struct super_block *sb)
++#endif
++{
++ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 2);
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC | YAFFS_TRACE_BACKGROUND,
++ "yaffs_write_super %s",
++ request_checkpoint ? " checkpt" : "");
++
++ yaffs_do_sync_fs(sb, request_checkpoint);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
++ return 0;
++#endif
++}
++#endif
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_sync_fs(struct super_block *sb, int wait)
++#else
++static int yaffs_sync_fs(struct super_block *sb)
++#endif
++{
++ unsigned request_checkpoint = (yaffs_auto_checkpoint >= 1);
++
++ yaffs_trace(YAFFS_TRACE_OS | YAFFS_TRACE_SYNC,
++ "yaffs_sync_fs%s", request_checkpoint ? " checkpt" : "");
++
++ yaffs_do_sync_fs(sb, request_checkpoint);
++
++ return 0;
++}
++
++
++
++static const struct super_operations yaffs_super_ops = {
++ .statfs = yaffs_statfs,
++
++#ifndef YAFFS_USE_OWN_IGET
++ .read_inode = yaffs_read_inode,
++#endif
++#ifdef YAFFS_HAS_PUT_INODE
++ .put_inode = yaffs_put_inode,
++#endif
++ .put_super = yaffs_put_super,
++#ifdef YAFFS_HAS_EVICT_INODE
++ .evict_inode = yaffs_evict_inode,
++#else
++ .delete_inode = yaffs_delete_inode,
++ .clear_inode = yaffs_clear_inode,
++#endif
++ .sync_fs = yaffs_sync_fs,
++#ifdef YAFFS_HAS_WRITE_SUPER
++ .write_super = yaffs_write_super,
++#endif
++};
++
++struct yaffs_options {
++ int inband_tags;
++ int skip_checkpoint_read;
++ int skip_checkpoint_write;
++ int no_cache;
++ int tags_ecc_on;
++ int tags_ecc_overridden;
++ int lazy_loading_enabled;
++ int lazy_loading_overridden;
++ int empty_lost_and_found;
++ int empty_lost_and_found_overridden;
++ int disable_summary;
++};
++
++#define MAX_OPT_LEN 30
++static int yaffs_parse_options(struct yaffs_options *options,
++ const char *options_str)
++{
++ char cur_opt[MAX_OPT_LEN + 1];
++ int p;
++ int error = 0;
++
++ /* Parse through the options which is a comma seperated list */
++
++ while (options_str && *options_str && !error) {
++ memset(cur_opt, 0, MAX_OPT_LEN + 1);
++ p = 0;
++
++ while (*options_str == ',')
++ options_str++;
++
++ while (*options_str && *options_str != ',') {
++ if (p < MAX_OPT_LEN) {
++ cur_opt[p] = *options_str;
++ p++;
++ }
++ options_str++;
++ }
++
++ if (!strcmp(cur_opt, "inband-tags")) {
++ options->inband_tags = 1;
++ } else if (!strcmp(cur_opt, "tags-ecc-off")) {
++ options->tags_ecc_on = 0;
++ options->tags_ecc_overridden = 1;
++ } else if (!strcmp(cur_opt, "tags-ecc-on")) {
++ options->tags_ecc_on = 1;
++ options->tags_ecc_overridden = 1;
++ } else if (!strcmp(cur_opt, "lazy-loading-off")) {
++ options->lazy_loading_enabled = 0;
++ options->lazy_loading_overridden = 1;
++ } else if (!strcmp(cur_opt, "lazy-loading-on")) {
++ options->lazy_loading_enabled = 1;
++ options->lazy_loading_overridden = 1;
++ } else if (!strcmp(cur_opt, "disable-summary")) {
++ options->disable_summary = 1;
++ } else if (!strcmp(cur_opt, "empty-lost-and-found-off")) {
++ options->empty_lost_and_found = 0;
++ options->empty_lost_and_found_overridden = 1;
++ } else if (!strcmp(cur_opt, "empty-lost-and-found-on")) {
++ options->empty_lost_and_found = 1;
++ options->empty_lost_and_found_overridden = 1;
++ } else if (!strcmp(cur_opt, "no-cache")) {
++ options->no_cache = 1;
++ } else if (!strcmp(cur_opt, "no-checkpoint-read")) {
++ options->skip_checkpoint_read = 1;
++ } else if (!strcmp(cur_opt, "no-checkpoint-write")) {
++ options->skip_checkpoint_write = 1;
++ } else if (!strcmp(cur_opt, "no-checkpoint")) {
++ options->skip_checkpoint_read = 1;
++ options->skip_checkpoint_write = 1;
++ } else {
++ printk(KERN_INFO "yaffs: Bad mount option \"%s\"\n",
++ cur_opt);
++ error = 1;
++ }
++ }
++
++ return error;
++}
++
++
++static struct dentry *yaffs_make_root(struct inode *inode)
++{
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
++ struct dentry *root = d_alloc_root(inode);
++
++ if (!root)
++ iput(inode);
++
++ return root;
++#else
++ return d_make_root(inode);
++#endif
++}
++
++
++
++
++static struct super_block *yaffs_internal_read_super(int yaffs_version,
++ struct super_block *sb,
++ void *data, int silent)
++{
++ int n_blocks;
++ struct inode *inode = NULL;
++ struct dentry *root;
++ struct yaffs_dev *dev = 0;
++ char devname_buf[BDEVNAME_SIZE + 1];
++ struct mtd_info *mtd;
++ int err;
++ char *data_str = (char *)data;
++ struct yaffs_linux_context *context = NULL;
++ struct yaffs_param *param;
++
++ int read_only = 0;
++ int inband_tags = 0;
++
++ struct yaffs_options options;
++
++ unsigned mount_id;
++ int found;
++ struct yaffs_linux_context *context_iterator;
++ struct list_head *l;
++
++ if (!sb) {
++ printk(KERN_INFO "yaffs: sb is NULL\n");
++ return NULL;
++ }
++
++ sb->s_magic = YAFFS_MAGIC;
++ sb->s_op = &yaffs_super_ops;
++ sb->s_flags |= MS_NOATIME;
++
++ read_only = ((sb->s_flags & MS_RDONLY) != 0);
++
++#ifdef YAFFS_COMPILE_EXPORTFS
++ sb->s_export_op = &yaffs_export_ops;
++#endif
++
++ if (!sb->s_dev)
++ printk(KERN_INFO "yaffs: sb->s_dev is NULL\n");
++ else if (!yaffs_devname(sb, devname_buf))
++ printk(KERN_INFO "yaffs: devname is NULL\n");
++ else
++ printk(KERN_INFO "yaffs: dev is %d name is \"%s\" %s\n",
++ sb->s_dev,
++ yaffs_devname(sb, devname_buf), read_only ? "ro" : "rw");
++
++ if (!data_str)
++ data_str = "";
++
++ printk(KERN_INFO "yaffs: passed flags \"%s\"\n", data_str);
++
++ memset(&options, 0, sizeof(options));
++
++ if (yaffs_parse_options(&options, data_str)) {
++ /* Option parsing failed */
++ return NULL;
++ }
++
++ sb->s_blocksize = PAGE_CACHE_SIZE;
++ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_read_super: Using yaffs%d", yaffs_version);
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_read_super: block size %d", (int)(sb->s_blocksize));
++
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: Attempting MTD mount of %u.%u,\"%s\"",
++ MAJOR(sb->s_dev), MINOR(sb->s_dev),
++ yaffs_devname(sb, devname_buf));
++
++ /* Get the device */
++ mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
++ if (IS_ERR(mtd)) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs: MTD device %u either not valid or unavailable",
++ MINOR(sb->s_dev));
++ return NULL;
++ }
++
++ if (yaffs_auto_select && yaffs_version == 1 && WRITE_SIZE(mtd) >= 2048) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs2");
++ yaffs_version = 2;
++ }
++
++ /* Added NCB 26/5/2006 for completeness */
++ if (yaffs_version == 2 && !options.inband_tags
++ && WRITE_SIZE(mtd) == 512) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "auto selecting yaffs1");
++ yaffs_version = 1;
++ }
++
++ if (mtd->oobavail < sizeof(struct yaffs_packed_tags2) ||
++ options.inband_tags)
++ inband_tags = 1;
++
++ if(yaffs_verify_mtd(mtd, yaffs_version, inband_tags) < 0)
++ return NULL;
++
++ /* OK, so if we got here, we have an MTD that's NAND and looks
++ * like it has the right capabilities
++ * Set the struct yaffs_dev up for mtd
++ */
++
++ if (!read_only && !(mtd->flags & MTD_WRITEABLE)) {
++ read_only = 1;
++ printk(KERN_INFO
++ "yaffs: mtd is read only, setting superblock read only\n"
++ );
++ sb->s_flags |= MS_RDONLY;
++ }
++
++ dev = kmalloc(sizeof(struct yaffs_dev), GFP_KERNEL);
++ context = kmalloc(sizeof(struct yaffs_linux_context), GFP_KERNEL);
++
++ if (!dev || !context) {
++ kfree(dev);
++ kfree(context);
++ dev = NULL;
++ context = NULL;
++
++ /* Deep shit could not allocate device structure */
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs_read_super: Failed trying to allocate struct yaffs_dev."
++ );
++ return NULL;
++ }
++ memset(dev, 0, sizeof(struct yaffs_dev));
++ param = &(dev->param);
++
++ memset(context, 0, sizeof(struct yaffs_linux_context));
++ dev->os_context = context;
++ INIT_LIST_HEAD(&(context->context_list));
++ context->dev = dev;
++ context->super = sb;
++
++ dev->read_only = read_only;
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++ sb->s_fs_info = dev;
++#else
++ sb->u.generic_sbp = dev;
++#endif
++
++
++ dev->driver_context = mtd;
++ param->name = mtd->name;
++
++ /* Set up the memory size parameters.... */
++
++
++ param->n_reserved_blocks = 5;
++ param->n_caches = (options.no_cache) ? 0 : 10;
++ param->inband_tags = inband_tags;
++
++ param->enable_xattr = 1;
++ if (options.lazy_loading_overridden)
++ param->disable_lazy_load = !options.lazy_loading_enabled;
++
++ param->defered_dir_update = 1;
++
++ if (options.tags_ecc_overridden)
++ param->no_tags_ecc = !options.tags_ecc_on;
++
++ param->empty_lost_n_found = 1;
++ param->refresh_period = 500;
++ param->disable_summary = options.disable_summary;
++
++
++#ifdef CONFIG_YAFFS_DISABLE_BAD_BLOCK_MARKING
++ param->disable_bad_block_marking = 1;
++#endif
++ if (options.empty_lost_and_found_overridden)
++ param->empty_lost_n_found = options.empty_lost_and_found;
++
++ /* ... and the functions. */
++ if (yaffs_version == 2) {
++ param->is_yaffs2 = 1;
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++ param->total_bytes_per_chunk = mtd->writesize;
++ param->chunks_per_block = mtd->erasesize / mtd->writesize;
++#else
++ param->total_bytes_per_chunk = mtd->oobblock;
++ param->chunks_per_block = mtd->erasesize / mtd->oobblock;
++#endif
++ n_blocks = YCALCBLOCKS(mtd->size, mtd->erasesize);
++
++ param->start_block = 0;
++ param->end_block = n_blocks - 1;
++ } else {
++ param->is_yaffs2 = 0;
++ n_blocks = YCALCBLOCKS(mtd->size,
++ YAFFS_CHUNKS_PER_BLOCK * YAFFS_BYTES_PER_CHUNK);
++
++ param->chunks_per_block = YAFFS_CHUNKS_PER_BLOCK;
++ param->total_bytes_per_chunk = YAFFS_BYTES_PER_CHUNK;
++ }
++
++ param->start_block = 0;
++ param->end_block = n_blocks - 1;
++
++ yaffs_mtd_drv_install(dev);
++
++ param->sb_dirty_fn = yaffs_set_super_dirty;
++ param->gc_control_fn = yaffs_gc_control_callback;
++
++ yaffs_dev_to_lc(dev)->super = sb;
++
++ param->use_nand_ecc = 1;
++
++ param->skip_checkpt_rd = options.skip_checkpoint_read;
++ param->skip_checkpt_wr = options.skip_checkpoint_write;
++
++ mutex_lock(&yaffs_context_lock);
++ /* Get a mount id */
++ found = 0;
++ for (mount_id = 0; !found; mount_id++) {
++ found = 1;
++ list_for_each(l, &yaffs_context_list) {
++ context_iterator =
++ list_entry(l, struct yaffs_linux_context,
++ context_list);
++ if (context_iterator->mount_id == mount_id)
++ found = 0;
++ }
++ }
++ context->mount_id = mount_id;
++
++ list_add_tail(&(yaffs_dev_to_lc(dev)->context_list),
++ &yaffs_context_list);
++ mutex_unlock(&yaffs_context_lock);
++
++ /* Directory search handling... */
++ INIT_LIST_HEAD(&(yaffs_dev_to_lc(dev)->search_contexts));
++ param->remove_obj_fn = yaffs_remove_obj_callback;
++
++ mutex_init(&(yaffs_dev_to_lc(dev)->gross_lock));
++
++ yaffs_gross_lock(dev);
++
++ err = yaffs_guts_initialise(dev);
++
++ yaffs_trace(YAFFS_TRACE_OS,
++ "yaffs_read_super: guts initialised %s",
++ (err == YAFFS_OK) ? "OK" : "FAILED");
++
++ if (err == YAFFS_OK)
++ yaffs_bg_start(dev);
++
++ if (!context->bg_thread)
++ param->defered_dir_update = 0;
++
++ sb->s_maxbytes = yaffs_max_file_size(dev);
++
++ /* Release lock before yaffs_get_inode() */
++ yaffs_gross_unlock(dev);
++
++ /* Create root inode */
++ if (err == YAFFS_OK)
++ inode = yaffs_get_inode(sb, S_IFDIR | 0755, 0, yaffs_root(dev));
++
++ if (!inode)
++ return NULL;
++
++ inode->i_op = &yaffs_dir_inode_operations;
++ inode->i_fop = &yaffs_dir_operations;
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: got root inode");
++
++ root = yaffs_make_root(inode);
++
++ if (!root)
++ return NULL;
++
++ sb->s_root = root;
++ if(!dev->is_checkpointed)
++ yaffs_set_super_dirty(dev);
++
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs_read_super: is_checkpointed %d",
++ dev->is_checkpointed);
++
++ yaffs_trace(YAFFS_TRACE_OS, "yaffs_read_super: done");
++ return sb;
++}
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs_internal_read_super_mtd(struct super_block *sb, void *data,
++ int silent)
++{
++ return yaffs_internal_read_super(1, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++static struct dentry *yaffs_mount(struct file_system_type *fs_type, int flags,
++ const char *dev_name, void *data)
++{
++ return mount_bdev(fs_type, flags, dev_name, data, yaffs_internal_read_super_mtd);
++}
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data, struct vfsmount *mnt)
++{
++
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data)
++{
++
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs_fs_type = {
++ .owner = THIS_MODULE,
++ .name = "yaffs",
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++ .mount = yaffs_mount,
++#else
++ .get_sb = yaffs_read_super,
++#endif
++ .kill_sb = kill_block_super,
++ .fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs_read_super(struct super_block *sb, void *data,
++ int silent)
++{
++ return yaffs_internal_read_super(1, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs_fs_type, "yaffs", yaffs_read_super,
++ FS_REQUIRES_DEV);
++#endif
++
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++static int yaffs2_internal_read_super_mtd(struct super_block *sb, void *data,
++ int silent)
++{
++ return yaffs_internal_read_super(2, sb, data, silent) ? 0 : -EINVAL;
++}
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++static struct dentry *yaffs2_mount(struct file_system_type *fs_type, int flags,
++ const char *dev_name, void *data)
++{
++ return mount_bdev(fs_type, flags, dev_name, data, yaffs2_internal_read_super_mtd);
++}
++#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17))
++static int yaffs2_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name, void *data,
++ struct vfsmount *mnt)
++{
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs2_internal_read_super_mtd, mnt);
++}
++#else
++static struct super_block *yaffs2_read_super(struct file_system_type *fs,
++ int flags, const char *dev_name,
++ void *data)
++{
++
++ return get_sb_bdev(fs, flags, dev_name, data,
++ yaffs2_internal_read_super_mtd);
++}
++#endif
++
++static struct file_system_type yaffs2_fs_type = {
++ .owner = THIS_MODULE,
++ .name = "yaffs2",
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
++ .mount = yaffs2_mount,
++#else
++ .get_sb = yaffs2_read_super,
++#endif
++ .kill_sb = kill_block_super,
++ .fs_flags = FS_REQUIRES_DEV,
++};
++#else
++static struct super_block *yaffs2_read_super(struct super_block *sb,
++ void *data, int silent)
++{
++ return yaffs_internal_read_super(2, sb, data, silent);
++}
++
++static DECLARE_FSTYPE(yaffs2_fs_type, "yaffs2", yaffs2_read_super,
++ FS_REQUIRES_DEV);
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
++static struct proc_dir_entry *my_proc_entry;
++
++static char *yaffs_dump_dev_part0(char *buf, struct yaffs_dev *dev)
++{
++ struct yaffs_param *param = &dev->param;
++ int bs[10];
++
++ yaffs_count_blocks_by_state(dev,bs);
++
++ buf += sprintf(buf, "start_block.......... %d\n", param->start_block);
++ buf += sprintf(buf, "end_block............ %d\n", param->end_block);
++ buf += sprintf(buf, "total_bytes_per_chunk %d\n",
++ param->total_bytes_per_chunk);
++ buf += sprintf(buf, "use_nand_ecc......... %d\n", param->use_nand_ecc);
++ buf += sprintf(buf, "no_tags_ecc.......... %d\n", param->no_tags_ecc);
++ buf += sprintf(buf, "is_yaffs2............ %d\n", param->is_yaffs2);
++ buf += sprintf(buf, "inband_tags.......... %d\n", param->inband_tags);
++ buf += sprintf(buf, "empty_lost_n_found... %d\n",
++ param->empty_lost_n_found);
++ buf += sprintf(buf, "disable_lazy_load.... %d\n",
++ param->disable_lazy_load);
++ buf += sprintf(buf, "disable_bad_block_mrk %d\n",
++ param->disable_bad_block_marking);
++ buf += sprintf(buf, "refresh_period....... %d\n",
++ param->refresh_period);
++ buf += sprintf(buf, "n_caches............. %d\n", param->n_caches);
++ buf += sprintf(buf, "n_reserved_blocks.... %d\n",
++ param->n_reserved_blocks);
++ buf += sprintf(buf, "always_check_erased.. %d\n",
++ param->always_check_erased);
++ buf += sprintf(buf, "\n");
++ buf += sprintf(buf, "block count by state\n");
++ buf += sprintf(buf, "0:%d 1:%d 2:%d 3:%d 4:%d\n",
++ bs[0], bs[1], bs[2], bs[3], bs[4]);
++ buf += sprintf(buf, "5:%d 6:%d 7:%d 8:%d 9:%d\n",
++ bs[5], bs[6], bs[7], bs[8], bs[9]);
++
++ return buf;
++}
++
++static char *yaffs_dump_dev_part1(char *buf, struct yaffs_dev *dev)
++{
++ buf += sprintf(buf, "max file size....... %lld\n",
++ (long long) yaffs_max_file_size(dev));
++ buf += sprintf(buf, "data_bytes_per_chunk. %d\n",
++ dev->data_bytes_per_chunk);
++ buf += sprintf(buf, "chunk_grp_bits....... %d\n", dev->chunk_grp_bits);
++ buf += sprintf(buf, "chunk_grp_size....... %d\n", dev->chunk_grp_size);
++ buf += sprintf(buf, "n_erased_blocks...... %d\n", dev->n_erased_blocks);
++ buf += sprintf(buf, "blocks_in_checkpt.... %d\n",
++ dev->blocks_in_checkpt);
++ buf += sprintf(buf, "\n");
++ buf += sprintf(buf, "n_tnodes............. %d\n", dev->n_tnodes);
++ buf += sprintf(buf, "n_obj................ %d\n", dev->n_obj);
++ buf += sprintf(buf, "n_free_chunks........ %d\n", dev->n_free_chunks);
++ buf += sprintf(buf, "\n");
++ buf += sprintf(buf, "n_page_writes........ %u\n", dev->n_page_writes);
++ buf += sprintf(buf, "n_page_reads......... %u\n", dev->n_page_reads);
++ buf += sprintf(buf, "n_erasures........... %u\n", dev->n_erasures);
++ buf += sprintf(buf, "n_gc_copies.......... %u\n", dev->n_gc_copies);
++ buf += sprintf(buf, "all_gcs.............. %u\n", dev->all_gcs);
++ buf += sprintf(buf, "passive_gc_count..... %u\n",
++ dev->passive_gc_count);
++ buf += sprintf(buf, "oldest_dirty_gc_count %u\n",
++ dev->oldest_dirty_gc_count);
++ buf += sprintf(buf, "n_gc_blocks.......... %u\n", dev->n_gc_blocks);
++ buf += sprintf(buf, "bg_gcs............... %u\n", dev->bg_gcs);
++ buf += sprintf(buf, "n_retried_writes..... %u\n",
++ dev->n_retried_writes);
++ buf += sprintf(buf, "n_retired_blocks..... %u\n",
++ dev->n_retired_blocks);
++ buf += sprintf(buf, "n_ecc_fixed.......... %u\n", dev->n_ecc_fixed);
++ buf += sprintf(buf, "n_ecc_unfixed........ %u\n", dev->n_ecc_unfixed);
++ buf += sprintf(buf, "n_tags_ecc_fixed..... %u\n",
++ dev->n_tags_ecc_fixed);
++ buf += sprintf(buf, "n_tags_ecc_unfixed... %u\n",
++ dev->n_tags_ecc_unfixed);
++ buf += sprintf(buf, "cache_hits........... %u\n", dev->cache_hits);
++ buf += sprintf(buf, "n_deleted_files...... %u\n", dev->n_deleted_files);
++ buf += sprintf(buf, "n_unlinked_files..... %u\n",
++ dev->n_unlinked_files);
++ buf += sprintf(buf, "refresh_count........ %u\n", dev->refresh_count);
++ buf += sprintf(buf, "n_bg_deletions....... %u\n", dev->n_bg_deletions);
++ buf += sprintf(buf, "tags_used............ %u\n", dev->tags_used);
++ buf += sprintf(buf, "summary_used......... %u\n", dev->summary_used);
++
++ return buf;
++}
++
++static int yaffs_proc_read(char *page,
++ char **start,
++ off_t offset, int count, int *eof, void *data)
++{
++ struct list_head *item;
++ char *buf = page;
++ int step = offset;
++ int n = 0;
++
++ /* Get proc_file_read() to step 'offset' by one on each sucessive call.
++ * We use 'offset' (*ppos) to indicate where we are in dev_list.
++ * This also assumes the user has posted a read buffer large
++ * enough to hold the complete output; but that's life in /proc.
++ */
++
++ *(int *)start = 1;
++
++ /* Print header first */
++ if (step == 0)
++ buf +=
++ sprintf(buf,
++ "Multi-version YAFFS built:" __DATE__ " " __TIME__
++ "\n");
++ else if (step == 1)
++ buf += sprintf(buf, "\n");
++ else {
++ step -= 2;
++
++ mutex_lock(&yaffs_context_lock);
++
++ /* Locate and print the Nth entry. Order N-squared but N is small. */
++ list_for_each(item, &yaffs_context_list) {
++ struct yaffs_linux_context *dc =
++ list_entry(item, struct yaffs_linux_context,
++ context_list);
++ struct yaffs_dev *dev = dc->dev;
++
++ if (n < (step & ~1)) {
++ n += 2;
++ continue;
++ }
++ if ((step & 1) == 0) {
++ buf +=
++ sprintf(buf, "\nDevice %d \"%s\"\n", n,
++ dev->param.name);
++ buf = yaffs_dump_dev_part0(buf, dev);
++ } else {
++ buf = yaffs_dump_dev_part1(buf, dev);
++ }
++
++ break;
++ }
++ mutex_unlock(&yaffs_context_lock);
++ }
++
++ return buf - page < count ? buf - page : count;
++}
++
++/**
++ * Set the verbosity of the warnings and error messages.
++ *
++ * Note that the names can only be a..z or _ with the current code.
++ */
++
++static struct {
++ char *mask_name;
++ unsigned mask_bitfield;
++} mask_flags[] = {
++ {"allocate", YAFFS_TRACE_ALLOCATE},
++ {"always", YAFFS_TRACE_ALWAYS},
++ {"background", YAFFS_TRACE_BACKGROUND},
++ {"bad_blocks", YAFFS_TRACE_BAD_BLOCKS},
++ {"buffers", YAFFS_TRACE_BUFFERS},
++ {"bug", YAFFS_TRACE_BUG},
++ {"checkpt", YAFFS_TRACE_CHECKPOINT},
++ {"deletion", YAFFS_TRACE_DELETION},
++ {"erase", YAFFS_TRACE_ERASE},
++ {"error", YAFFS_TRACE_ERROR},
++ {"gc_detail", YAFFS_TRACE_GC_DETAIL},
++ {"gc", YAFFS_TRACE_GC},
++ {"lock", YAFFS_TRACE_LOCK},
++ {"mtd", YAFFS_TRACE_MTD},
++ {"nandaccess", YAFFS_TRACE_NANDACCESS},
++ {"os", YAFFS_TRACE_OS},
++ {"scan_debug", YAFFS_TRACE_SCAN_DEBUG},
++ {"scan", YAFFS_TRACE_SCAN},
++ {"mount", YAFFS_TRACE_MOUNT},
++ {"tracing", YAFFS_TRACE_TRACING},
++ {"sync", YAFFS_TRACE_SYNC},
++ {"write", YAFFS_TRACE_WRITE},
++ {"verify", YAFFS_TRACE_VERIFY},
++ {"verify_nand", YAFFS_TRACE_VERIFY_NAND},
++ {"verify_full", YAFFS_TRACE_VERIFY_FULL},
++ {"verify_all", YAFFS_TRACE_VERIFY_ALL},
++ {"all", 0xffffffff},
++ {"none", 0},
++ {NULL, 0},
++};
++
++#define MAX_MASK_NAME_LENGTH 40
++static int yaffs_proc_write_trace_options(struct file *file, const char *buf,
++ unsigned long count, void *data)
++{
++ unsigned rg = 0, mask_bitfield;
++ char *end;
++ char *mask_name;
++ const char *x;
++ char substring[MAX_MASK_NAME_LENGTH + 1];
++ int i;
++ int done = 0;
++ int add, len = 0;
++ int pos = 0;
++
++ rg = yaffs_trace_mask;
++
++ while (!done && (pos < count)) {
++ done = 1;
++ while ((pos < count) && isspace(buf[pos]))
++ pos++;
++
++ switch (buf[pos]) {
++ case '+':
++ case '-':
++ case '=':
++ add = buf[pos];
++ pos++;
++ break;
++
++ default:
++ add = ' ';
++ break;
++ }
++ mask_name = NULL;
++
++ mask_bitfield = simple_strtoul(buf + pos, &end, 0);
++
++ if (end > buf + pos) {
++ mask_name = "numeral";
++ len = end - (buf + pos);
++ pos += len;
++ done = 0;
++ } else {
++ for (x = buf + pos, i = 0;
++ (*x == '_' || (*x >= 'a' && *x <= 'z')) &&
++ i < MAX_MASK_NAME_LENGTH; x++, i++, pos++)
++ substring[i] = *x;
++ substring[i] = '\0';
++
++ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++ if (strcmp(substring, mask_flags[i].mask_name)
++ == 0) {
++ mask_name = mask_flags[i].mask_name;
++ mask_bitfield =
++ mask_flags[i].mask_bitfield;
++ done = 0;
++ break;
++ }
++ }
++ }
++
++ if (mask_name != NULL) {
++ done = 0;
++ switch (add) {
++ case '-':
++ rg &= ~mask_bitfield;
++ break;
++ case '+':
++ rg |= mask_bitfield;
++ break;
++ case '=':
++ rg = mask_bitfield;
++ break;
++ default:
++ rg |= mask_bitfield;
++ break;
++ }
++ }
++ }
++
++ yaffs_trace_mask = rg | YAFFS_TRACE_ALWAYS;
++
++ printk(KERN_DEBUG "new trace = 0x%08X\n", yaffs_trace_mask);
++
++ if (rg & YAFFS_TRACE_ALWAYS) {
++ for (i = 0; mask_flags[i].mask_name != NULL; i++) {
++ char flag;
++ flag = ((rg & mask_flags[i].mask_bitfield) ==
++ mask_flags[i].mask_bitfield) ? '+' : '-';
++ printk(KERN_DEBUG "%c%s\n", flag,
++ mask_flags[i].mask_name);
++ }
++ }
++
++ return count;
++}
++
++/* Debug strings are of the form:
++ * .bnnn print info on block n
++ * .cobjn,chunkn print nand chunk id for objn:chunkn
++ */
++
++static int yaffs_proc_debug_write(struct file *file, const char *buf,
++ unsigned long count, void *data)
++{
++
++ char str[100];
++ char *p0;
++ char *p1;
++ long p1_val;
++ long p0_val;
++ char cmd;
++ struct list_head *item;
++
++ memset(str, 0, sizeof(str));
++ memcpy(str, buf, min(count, sizeof(str) -1));
++
++ cmd = str[1];
++
++ p0 = str + 2;
++
++ p1 = p0;
++
++ while (*p1 && *p1 != ',') {
++ p1++;
++ }
++ *p1 = '\0';
++ p1++;
++
++ p0_val = simple_strtol(p0, NULL, 0);
++ p1_val = simple_strtol(p1, NULL, 0);
++
++
++ mutex_lock(&yaffs_context_lock);
++
++ /* Locate and print the Nth entry. Order N-squared but N is small. */
++ list_for_each(item, &yaffs_context_list) {
++ struct yaffs_linux_context *dc =
++ list_entry(item, struct yaffs_linux_context,
++ context_list);
++ struct yaffs_dev *dev = dc->dev;
++
++ if (cmd == 'b') {
++ struct yaffs_block_info *bi;
++
++ bi = yaffs_get_block_info(dev,p0_val);
++
++ if(bi) {
++ printk("Block %d: state %d, retire %d, use %d, seq %d\n",
++ (int)p0_val, bi->block_state,
++ bi->needs_retiring, bi->pages_in_use,
++ bi->seq_number);
++ }
++ } else if (cmd == 'c') {
++ struct yaffs_obj *obj;
++ int nand_chunk;
++
++ obj = yaffs_find_by_number(dev, p0_val);
++ if (!obj)
++ printk("No obj %d\n", (int)p0_val);
++ else {
++ if(p1_val == 0)
++ nand_chunk = obj->hdr_chunk;
++ else
++ nand_chunk =
++ yaffs_find_chunk_in_file(obj,
++ p1_val, NULL);
++ printk("Nand chunk for %d:%d is %d\n",
++ (int)p0_val, (int)p1_val, nand_chunk);
++ }
++ }
++ }
++
++ mutex_unlock(&yaffs_context_lock);
++
++ return count;
++}
++
++static int yaffs_proc_write(struct file *file, const char *buf,
++ unsigned long count, void *data)
++{
++ if (buf[0] == '.')
++ return yaffs_proc_debug_write(file, buf, count, data);
++ return yaffs_proc_write_trace_options(file, buf, count, data);
++}
++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
++
++/* Stuff to handle installation of file systems */
++struct file_system_to_install {
++ struct file_system_type *fst;
++ int installed;
++};
++
++static struct file_system_to_install fs_to_install[] = {
++ {&yaffs_fs_type, 0},
++ {&yaffs2_fs_type, 0},
++ {NULL, 0}
++};
++
++static int __init init_yaffs_fs(void)
++{
++ int error = 0;
++ struct file_system_to_install *fsinst;
++
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs built " __DATE__ " " __TIME__ " Installing.");
++
++ mutex_init(&yaffs_context_lock);
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
++ /* Install the proc_fs entries */
++ my_proc_entry = create_proc_entry("yaffs",
++ S_IRUGO | S_IFREG, YPROC_ROOT);
++
++ if (my_proc_entry) {
++ my_proc_entry->write_proc = yaffs_proc_write;
++ my_proc_entry->read_proc = yaffs_proc_read;
++ my_proc_entry->data = NULL;
++ } else {
++ return -ENOMEM;
++ }
++#endif
++
++ /* Now add the file system entries */
++
++ fsinst = fs_to_install;
++
++ while (fsinst->fst && !error) {
++ error = register_filesystem(fsinst->fst);
++ if (!error)
++ fsinst->installed = 1;
++ fsinst++;
++ }
++
++ /* Any errors? uninstall */
++ if (error) {
++ fsinst = fs_to_install;
++
++ while (fsinst->fst) {
++ if (fsinst->installed) {
++ unregister_filesystem(fsinst->fst);
++ fsinst->installed = 0;
++ }
++ fsinst++;
++ }
++ }
++
++ return error;
++}
++
++static void __exit exit_yaffs_fs(void)
++{
++
++ struct file_system_to_install *fsinst;
++
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "yaffs built " __DATE__ " " __TIME__ " removing.");
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
++ remove_proc_entry("yaffs", YPROC_ROOT);
++#endif
++
++ fsinst = fs_to_install;
++
++ while (fsinst->fst) {
++ if (fsinst->installed) {
++ unregister_filesystem(fsinst->fst);
++ fsinst->installed = 0;
++ }
++ fsinst++;
++ }
++}
++
++module_init(init_yaffs_fs)
++ module_exit(exit_yaffs_fs)
++
++ MODULE_DESCRIPTION("YAFFS2 - a NAND specific flash file system");
++MODULE_AUTHOR("Charles Manning, Aleph One Ltd., 2002-2011");
++MODULE_LICENSE("GPL");
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_yaffs1.c linux-3.14.4/fs/yaffs2/yaffs_yaffs1.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_yaffs1.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_yaffs1.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,422 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_yaffs1.h"
++#include "yportenv.h"
++#include "yaffs_trace.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_nand.h"
++#include "yaffs_attribs.h"
++
++int yaffs1_scan(struct yaffs_dev *dev)
++{
++ struct yaffs_ext_tags tags;
++ int blk;
++ int result;
++ int chunk;
++ int c;
++ int deleted;
++ enum yaffs_block_state state;
++ LIST_HEAD(hard_list);
++ struct yaffs_block_info *bi;
++ u32 seq_number;
++ struct yaffs_obj_hdr *oh;
++ struct yaffs_obj *in;
++ struct yaffs_obj *parent;
++ int alloc_failed = 0;
++ struct yaffs_shadow_fixer *shadow_fixers = NULL;
++ u8 *chunk_data;
++
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "yaffs1_scan starts intstartblk %d intendblk %d...",
++ dev->internal_start_block, dev->internal_end_block);
++
++ chunk_data = yaffs_get_temp_buffer(dev);
++
++ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++ /* Scan all the blocks to determine their state */
++ bi = dev->block_info;
++ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
++ blk++) {
++ yaffs_clear_chunk_bits(dev, blk);
++ bi->pages_in_use = 0;
++ bi->soft_del_pages = 0;
++
++ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
++
++ bi->block_state = state;
++ bi->seq_number = seq_number;
++
++ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
++ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
++
++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
++ "Block scanning block %d state %d seq %d",
++ blk, state, seq_number);
++
++ if (state == YAFFS_BLOCK_STATE_DEAD) {
++ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
++ "block %d is bad", blk);
++ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
++ dev->n_erased_blocks++;
++ dev->n_free_chunks += dev->param.chunks_per_block;
++ }
++ bi++;
++ }
++
++ /* For each block.... */
++ for (blk = dev->internal_start_block;
++ !alloc_failed && blk <= dev->internal_end_block; blk++) {
++
++ cond_resched();
++
++ bi = yaffs_get_block_info(dev, blk);
++ state = bi->block_state;
++
++ deleted = 0;
++
++ /* For each chunk in each block that needs scanning.... */
++ for (c = 0;
++ !alloc_failed && c < dev->param.chunks_per_block &&
++ state == YAFFS_BLOCK_STATE_NEEDS_SCAN; c++) {
++ /* Read the tags and decide what to do */
++ chunk = blk * dev->param.chunks_per_block + c;
++
++ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
++ &tags);
++
++ /* Let's have a good look at this chunk... */
++
++ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED ||
++ tags.is_deleted) {
++ /* YAFFS1 only...
++ * A deleted chunk
++ */
++ deleted++;
++ dev->n_free_chunks++;
++ } else if (!tags.chunk_used) {
++ /* An unassigned chunk in the block
++ * This means that either the block is empty or
++ * this is the one being allocated from
++ */
++
++ if (c == 0) {
++ /* We're looking at the first chunk in
++ *the block so the block is unused */
++ state = YAFFS_BLOCK_STATE_EMPTY;
++ dev->n_erased_blocks++;
++ } else {
++ /* this is the block being allocated */
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ " Allocating from %d %d",
++ blk, c);
++ state = YAFFS_BLOCK_STATE_ALLOCATING;
++ dev->alloc_block = blk;
++ dev->alloc_page = c;
++ dev->alloc_block_finder = blk;
++
++ }
++
++ dev->n_free_chunks +=
++ (dev->param.chunks_per_block - c);
++ } else if (tags.chunk_id > 0) {
++ /* chunk_id > 0 so it is a data chunk... */
++ unsigned int endpos;
++
++ yaffs_set_chunk_bit(dev, blk, c);
++ bi->pages_in_use++;
++
++ in = yaffs_find_or_create_by_number(dev,
++ tags.obj_id,
++ YAFFS_OBJECT_TYPE_FILE);
++ /* PutChunkIntoFile checks for a clash
++ * (two data chunks with the same chunk_id).
++ */
++
++ if (!in)
++ alloc_failed = 1;
++
++ if (in) {
++ if (!yaffs_put_chunk_in_file
++ (in, tags.chunk_id, chunk, 1))
++ alloc_failed = 1;
++ }
++
++ endpos =
++ (tags.chunk_id - 1) *
++ dev->data_bytes_per_chunk +
++ tags.n_bytes;
++ if (in &&
++ in->variant_type ==
++ YAFFS_OBJECT_TYPE_FILE &&
++ in->variant.file_variant.scanned_size <
++ endpos) {
++ in->variant.file_variant.scanned_size =
++ endpos;
++ if (!dev->param.use_header_file_size) {
++ in->variant.
++ file_variant.file_size =
++ in->variant.
++ file_variant.scanned_size;
++ }
++
++ }
++ } else {
++ /* chunk_id == 0, so it is an ObjectHeader.
++ * Make the object
++ */
++ yaffs_set_chunk_bit(dev, blk, c);
++ bi->pages_in_use++;
++
++ result = yaffs_rd_chunk_tags_nand(dev, chunk,
++ chunk_data,
++ NULL);
++
++ oh = (struct yaffs_obj_hdr *)chunk_data;
++
++ in = yaffs_find_by_number(dev, tags.obj_id);
++ if (in && in->variant_type != oh->type) {
++ /* This should not happen, but somehow
++ * Wev'e ended up with an obj_id that
++ * has been reused but not yet deleted,
++ * and worse still it has changed type.
++ * Delete the old object.
++ */
++
++ yaffs_del_obj(in);
++ in = NULL;
++ }
++
++ in = yaffs_find_or_create_by_number(dev,
++ tags.obj_id,
++ oh->type);
++
++ if (!in)
++ alloc_failed = 1;
++
++ if (in && oh->shadows_obj > 0) {
++
++ struct yaffs_shadow_fixer *fixer;
++ fixer =
++ kmalloc(sizeof
++ (struct yaffs_shadow_fixer),
++ GFP_NOFS);
++ if (fixer) {
++ fixer->next = shadow_fixers;
++ shadow_fixers = fixer;
++ fixer->obj_id = tags.obj_id;
++ fixer->shadowed_id =
++ oh->shadows_obj;
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ " Shadow fixer: %d shadows %d",
++ fixer->obj_id,
++ fixer->shadowed_id);
++
++ }
++
++ }
++
++ if (in && in->valid) {
++ /* We have already filled this one.
++ * We have a duplicate and need to
++ * resolve it. */
++
++ unsigned existing_serial = in->serial;
++ unsigned new_serial =
++ tags.serial_number;
++
++ if (((existing_serial + 1) & 3) ==
++ new_serial) {
++ /* Use new one - destroy the
++ * exisiting one */
++ yaffs_chunk_del(dev,
++ in->hdr_chunk,
++ 1, __LINE__);
++ in->valid = 0;
++ } else {
++ /* Use existing - destroy
++ * this one. */
++ yaffs_chunk_del(dev, chunk, 1,
++ __LINE__);
++ }
++ }
++
++ if (in && !in->valid &&
++ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++ tags.obj_id ==
++ YAFFS_OBJECTID_LOSTNFOUND)) {
++ /* We only load some info, don't fiddle
++ * with directory structure */
++ in->valid = 1;
++ in->variant_type = oh->type;
++
++ in->yst_mode = oh->yst_mode;
++ yaffs_load_attribs(in, oh);
++ in->hdr_chunk = chunk;
++ in->serial = tags.serial_number;
++
++ } else if (in && !in->valid) {
++ /* we need to load this info */
++
++ in->valid = 1;
++ in->variant_type = oh->type;
++
++ in->yst_mode = oh->yst_mode;
++ yaffs_load_attribs(in, oh);
++ in->hdr_chunk = chunk;
++ in->serial = tags.serial_number;
++
++ yaffs_set_obj_name_from_oh(in, oh);
++ in->dirty = 0;
++
++ /* directory stuff...
++ * hook up to parent
++ */
++
++ parent =
++ yaffs_find_or_create_by_number
++ (dev, oh->parent_obj_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ if (!parent)
++ alloc_failed = 1;
++ if (parent && parent->variant_type ==
++ YAFFS_OBJECT_TYPE_UNKNOWN) {
++ /* Set up as a directory */
++ parent->variant_type =
++ YAFFS_OBJECT_TYPE_DIRECTORY;
++ INIT_LIST_HEAD(&parent->
++ variant.dir_variant.
++ children);
++ } else if (!parent ||
++ parent->variant_type !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
++ /* Hoosterman, a problem....
++ * We're trying to use a
++ * non-directory as a directory
++ */
++
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
++ );
++ parent = dev->lost_n_found;
++ }
++
++ yaffs_add_obj_to_dir(parent, in);
++
++ switch (in->variant_type) {
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* Todo got a problem */
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ if (dev->param.
++ use_header_file_size)
++ in->variant.
++ file_variant.file_size
++ = yaffs_oh_to_size(oh);
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ in->variant.
++ hardlink_variant.equiv_id =
++ oh->equiv_id;
++ list_add(&in->hard_links,
++ &hard_list);
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ in->variant.symlink_variant.
++ alias =
++ yaffs_clone_str(oh->alias);
++ if (!in->variant.
++ symlink_variant.alias)
++ alloc_failed = 1;
++ break;
++ }
++ }
++ }
++ }
++
++ if (state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
++ /* If we got this far while scanning,
++ * then the block is fully allocated. */
++ state = YAFFS_BLOCK_STATE_FULL;
++ }
++
++ if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
++ /* If the block was partially allocated then
++ * treat it as fully allocated. */
++ state = YAFFS_BLOCK_STATE_FULL;
++ dev->alloc_block = -1;
++ }
++
++ bi->block_state = state;
++
++ /* Now let's see if it was dirty */
++ if (bi->pages_in_use == 0 &&
++ !bi->has_shrink_hdr &&
++ bi->block_state == YAFFS_BLOCK_STATE_FULL)
++ yaffs_block_became_dirty(dev, blk);
++ }
++
++ /* Ok, we've done all the scanning.
++ * Fix up the hard link chains.
++ * We should now have scanned all the objects, now it's time to add
++ * these hardlinks.
++ */
++
++ yaffs_link_fixup(dev, &hard_list);
++
++ /*
++ * Fix up any shadowed objects.
++ * There should not be more than one of these.
++ */
++ {
++ struct yaffs_shadow_fixer *fixer;
++ struct yaffs_obj *obj;
++
++ while (shadow_fixers) {
++ fixer = shadow_fixers;
++ shadow_fixers = fixer->next;
++ /* Complete the rename transaction by deleting the
++ * shadowed object then setting the object header
++ to unshadowed.
++ */
++ obj = yaffs_find_by_number(dev, fixer->shadowed_id);
++ if (obj)
++ yaffs_del_obj(obj);
++
++ obj = yaffs_find_by_number(dev, fixer->obj_id);
++
++ if (obj)
++ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
++
++ kfree(fixer);
++ }
++ }
++
++ yaffs_release_temp_buffer(dev, chunk_data);
++
++ if (alloc_failed)
++ return YAFFS_FAIL;
++
++ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends");
++
++ return YAFFS_OK;
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_yaffs1.h linux-3.14.4/fs/yaffs2/yaffs_yaffs1.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_yaffs1.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_yaffs1.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,22 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_YAFFS1_H__
++#define __YAFFS_YAFFS1_H__
++
++#include "yaffs_guts.h"
++int yaffs1_scan(struct yaffs_dev *dev);
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_yaffs2.c linux-3.14.4/fs/yaffs2/yaffs_yaffs2.c
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_yaffs2.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_yaffs2.c 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,1534 @@
++/*
++ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include "yaffs_guts.h"
++#include "yaffs_trace.h"
++#include "yaffs_yaffs2.h"
++#include "yaffs_checkptrw.h"
++#include "yaffs_bitmap.h"
++#include "yaffs_nand.h"
++#include "yaffs_getblockinfo.h"
++#include "yaffs_verify.h"
++#include "yaffs_attribs.h"
++#include "yaffs_summary.h"
++
++/*
++ * Checkpoints are really no benefit on very small partitions.
++ *
++ * To save space on small partitions don't bother with checkpoints unless
++ * the partition is at least this big.
++ */
++#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
++#define YAFFS_SMALL_HOLE_THRESHOLD 4
++
++/*
++ * Oldest Dirty Sequence Number handling.
++ */
++
++/* yaffs_calc_oldest_dirty_seq()
++ * yaffs2_find_oldest_dirty_seq()
++ * Calculate the oldest dirty sequence number if we don't know it.
++ */
++void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
++{
++ int i;
++ unsigned seq;
++ unsigned block_no = 0;
++ struct yaffs_block_info *b;
++
++ if (!dev->param.is_yaffs2)
++ return;
++
++ /* Find the oldest dirty sequence number. */
++ seq = dev->seq_number + 1;
++ b = dev->block_info;
++ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
++ if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
++ (b->pages_in_use - b->soft_del_pages) <
++ dev->param.chunks_per_block &&
++ b->seq_number < seq) {
++ seq = b->seq_number;
++ block_no = i;
++ }
++ b++;
++ }
++
++ if (block_no) {
++ dev->oldest_dirty_seq = seq;
++ dev->oldest_dirty_block = block_no;
++ }
++}
++
++void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev)
++{
++ if (!dev->param.is_yaffs2)
++ return;
++
++ if (!dev->oldest_dirty_seq)
++ yaffs_calc_oldest_dirty_seq(dev);
++}
++
++/*
++ * yaffs_clear_oldest_dirty_seq()
++ * Called when a block is erased or marked bad. (ie. when its seq_number
++ * becomes invalid). If the value matches the oldest then we clear
++ * dev->oldest_dirty_seq to force its recomputation.
++ */
++void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi)
++{
++
++ if (!dev->param.is_yaffs2)
++ return;
++
++ if (!bi || bi->seq_number == dev->oldest_dirty_seq) {
++ dev->oldest_dirty_seq = 0;
++ dev->oldest_dirty_block = 0;
++ }
++}
++
++/*
++ * yaffs2_update_oldest_dirty_seq()
++ * Update the oldest dirty sequence number whenever we dirty a block.
++ * Only do this if the oldest_dirty_seq is actually being tracked.
++ */
++void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
++ struct yaffs_block_info *bi)
++{
++ if (!dev->param.is_yaffs2)
++ return;
++
++ if (dev->oldest_dirty_seq) {
++ if (dev->oldest_dirty_seq > bi->seq_number) {
++ dev->oldest_dirty_seq = bi->seq_number;
++ dev->oldest_dirty_block = block_no;
++ }
++ }
++}
++
++int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi)
++{
++
++ if (!dev->param.is_yaffs2)
++ return 1; /* disqualification only applies to yaffs2. */
++
++ if (!bi->has_shrink_hdr)
++ return 1; /* can gc */
++
++ yaffs2_find_oldest_dirty_seq(dev);
++
++ /* Can't do gc of this block if there are any blocks older than this
++ * one that have discarded pages.
++ */
++ return (bi->seq_number <= dev->oldest_dirty_seq);
++}
++
++/*
++ * yaffs2_find_refresh_block()
++ * periodically finds the oldest full block by sequence number for refreshing.
++ * Only for yaffs2.
++ */
++u32 yaffs2_find_refresh_block(struct yaffs_dev *dev)
++{
++ u32 b;
++ u32 oldest = 0;
++ u32 oldest_seq = 0;
++ struct yaffs_block_info *bi;
++
++ if (!dev->param.is_yaffs2)
++ return oldest;
++
++ /*
++ * If refresh period < 10 then refreshing is disabled.
++ */
++ if (dev->param.refresh_period < 10)
++ return oldest;
++
++ /*
++ * Fix broken values.
++ */
++ if (dev->refresh_skip > dev->param.refresh_period)
++ dev->refresh_skip = dev->param.refresh_period;
++
++ if (dev->refresh_skip > 0)
++ return oldest;
++
++ /*
++ * Refresh skip is now zero.
++ * We'll do a refresh this time around....
++ * Update the refresh skip and find the oldest block.
++ */
++ dev->refresh_skip = dev->param.refresh_period;
++ dev->refresh_count++;
++ bi = dev->block_info;
++ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_FULL) {
++
++ if (oldest < 1 || bi->seq_number < oldest_seq) {
++ oldest = b;
++ oldest_seq = bi->seq_number;
++ }
++ }
++ bi++;
++ }
++
++ if (oldest > 0) {
++ yaffs_trace(YAFFS_TRACE_GC,
++ "GC refresh count %d selected block %d with seq_number %d",
++ dev->refresh_count, oldest, oldest_seq);
++ }
++
++ return oldest;
++}
++
++int yaffs2_checkpt_required(struct yaffs_dev *dev)
++{
++ int nblocks;
++
++ if (!dev->param.is_yaffs2)
++ return 0;
++
++ nblocks = dev->internal_end_block - dev->internal_start_block + 1;
++
++ return !dev->param.skip_checkpt_wr &&
++ !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
++}
++
++int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev)
++{
++ int retval;
++ int n_bytes = 0;
++ int n_blocks;
++ int dev_blocks;
++
++ if (!dev->param.is_yaffs2)
++ return 0;
++
++ if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) {
++ /* Not a valid value so recalculate */
++ dev_blocks = dev->param.end_block - dev->param.start_block + 1;
++ n_bytes += sizeof(struct yaffs_checkpt_validity);
++ n_bytes += sizeof(struct yaffs_checkpt_dev);
++ n_bytes += dev_blocks * sizeof(struct yaffs_block_info);
++ n_bytes += dev_blocks * dev->chunk_bit_stride;
++ n_bytes +=
++ (sizeof(struct yaffs_checkpt_obj) + sizeof(u32)) *
++ dev->n_obj;
++ n_bytes += (dev->tnode_size + sizeof(u32)) * dev->n_tnodes;
++ n_bytes += sizeof(struct yaffs_checkpt_validity);
++ n_bytes += sizeof(u32); /* checksum */
++
++ /* Round up and add 2 blocks to allow for some bad blocks,
++ * so add 3 */
++
++ n_blocks =
++ (n_bytes /
++ (dev->data_bytes_per_chunk *
++ dev->param.chunks_per_block)) + 3;
++
++ dev->checkpoint_blocks_required = n_blocks;
++ }
++
++ retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
++ if (retval < 0)
++ retval = 0;
++ return retval;
++}
++
++/*--------------------- Checkpointing --------------------*/
++
++static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head)
++{
++ struct yaffs_checkpt_validity cp;
++
++ memset(&cp, 0, sizeof(cp));
++
++ cp.struct_type = sizeof(cp);
++ cp.magic = YAFFS_MAGIC;
++ cp.version = YAFFS_CHECKPOINT_VERSION;
++ cp.head = (head) ? 1 : 0;
++
++ return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head)
++{
++ struct yaffs_checkpt_validity cp;
++ int ok;
++
++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++ if (ok)
++ ok = (cp.struct_type == sizeof(cp)) &&
++ (cp.magic == YAFFS_MAGIC) &&
++ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
++ (cp.head == ((head) ? 1 : 0));
++ return ok ? 1 : 0;
++}
++
++static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp,
++ struct yaffs_dev *dev)
++{
++ cp->n_erased_blocks = dev->n_erased_blocks;
++ cp->alloc_block = dev->alloc_block;
++ cp->alloc_page = dev->alloc_page;
++ cp->n_free_chunks = dev->n_free_chunks;
++
++ cp->n_deleted_files = dev->n_deleted_files;
++ cp->n_unlinked_files = dev->n_unlinked_files;
++ cp->n_bg_deletions = dev->n_bg_deletions;
++ cp->seq_number = dev->seq_number;
++
++}
++
++static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev,
++ struct yaffs_checkpt_dev *cp)
++{
++ dev->n_erased_blocks = cp->n_erased_blocks;
++ dev->alloc_block = cp->alloc_block;
++ dev->alloc_page = cp->alloc_page;
++ dev->n_free_chunks = cp->n_free_chunks;
++
++ dev->n_deleted_files = cp->n_deleted_files;
++ dev->n_unlinked_files = cp->n_unlinked_files;
++ dev->n_bg_deletions = cp->n_bg_deletions;
++ dev->seq_number = cp->seq_number;
++}
++
++static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
++{
++ struct yaffs_checkpt_dev cp;
++ u32 n_bytes;
++ u32 n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
++ int ok;
++
++ /* Write device runtime values */
++ yaffs2_dev_to_checkpt_dev(&cp, dev);
++ cp.struct_type = sizeof(cp);
++
++ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (!ok)
++ return 0;
++
++ /* Write block info */
++ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
++ ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) == n_bytes);
++ if (!ok)
++ return 0;
++
++ /* Write chunk bits */
++ n_bytes = n_blocks * dev->chunk_bit_stride;
++ ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) == n_bytes);
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
++{
++ struct yaffs_checkpt_dev cp;
++ u32 n_bytes;
++ u32 n_blocks =
++ (dev->internal_end_block - dev->internal_start_block + 1);
++ int ok;
++
++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (!ok)
++ return 0;
++
++ if (cp.struct_type != sizeof(cp))
++ return 0;
++
++ yaffs_checkpt_dev_to_dev(dev, &cp);
++
++ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
++
++ ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
++
++ if (!ok)
++ return 0;
++
++ n_bytes = n_blocks * dev->chunk_bit_stride;
++
++ ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
++
++ return ok ? 1 : 0;
++}
++
++static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp,
++ struct yaffs_obj *obj)
++{
++ cp->obj_id = obj->obj_id;
++ cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
++ cp->hdr_chunk = obj->hdr_chunk;
++ cp->variant_type = obj->variant_type;
++ cp->deleted = obj->deleted;
++ cp->soft_del = obj->soft_del;
++ cp->unlinked = obj->unlinked;
++ cp->fake = obj->fake;
++ cp->rename_allowed = obj->rename_allowed;
++ cp->unlink_allowed = obj->unlink_allowed;
++ cp->serial = obj->serial;
++ cp->n_data_chunks = obj->n_data_chunks;
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
++ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
++}
++
++static int yaffs2_checkpt_obj_to_obj(struct yaffs_obj *obj,
++ struct yaffs_checkpt_obj *cp)
++{
++ struct yaffs_obj *parent;
++
++ if (obj->variant_type != cp->variant_type) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "Checkpoint read object %d type %d chunk %d does not match existing object type %d",
++ cp->obj_id, cp->variant_type, cp->hdr_chunk,
++ obj->variant_type);
++ return 0;
++ }
++
++ obj->obj_id = cp->obj_id;
++
++ if (cp->parent_id)
++ parent = yaffs_find_or_create_by_number(obj->my_dev,
++ cp->parent_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ else
++ parent = NULL;
++
++ if (parent) {
++ if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
++ yaffs_trace(YAFFS_TRACE_ALWAYS,
++ "Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory",
++ cp->obj_id, cp->parent_id,
++ cp->variant_type, cp->hdr_chunk,
++ parent->variant_type);
++ return 0;
++ }
++ yaffs_add_obj_to_dir(parent, obj);
++ }
++
++ obj->hdr_chunk = cp->hdr_chunk;
++ obj->variant_type = cp->variant_type;
++ obj->deleted = cp->deleted;
++ obj->soft_del = cp->soft_del;
++ obj->unlinked = cp->unlinked;
++ obj->fake = cp->fake;
++ obj->rename_allowed = cp->rename_allowed;
++ obj->unlink_allowed = cp->unlink_allowed;
++ obj->serial = cp->serial;
++ obj->n_data_chunks = cp->n_data_chunks;
++
++ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
++ obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
++ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
++ obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
++
++ if (obj->hdr_chunk > 0)
++ obj->lazy_loaded = 1;
++ return 1;
++}
++
++static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in,
++ struct yaffs_tnode *tn, u32 level,
++ int chunk_offset)
++{
++ int i;
++ struct yaffs_dev *dev = in->my_dev;
++ int ok = 1;
++ u32 base_offset;
++
++ if (!tn)
++ return 1;
++
++ if (level > 0) {
++ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
++ if (!tn->internal[i])
++ continue;
++ ok = yaffs2_checkpt_tnode_worker(in,
++ tn->internal[i],
++ level - 1,
++ (chunk_offset <<
++ YAFFS_TNODES_INTERNAL_BITS) + i);
++ }
++ return ok;
++ }
++
++ /* Level 0 tnode */
++ base_offset = chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
++ ok = (yaffs2_checkpt_wr(dev, &base_offset, sizeof(base_offset)) ==
++ sizeof(base_offset));
++ if (ok)
++ ok = (yaffs2_checkpt_wr(dev, tn, dev->tnode_size) ==
++ dev->tnode_size);
++
++ return ok;
++}
++
++static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj)
++{
++ u32 end_marker = ~0;
++ int ok = 1;
++
++ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
++ return ok;
++
++ ok = yaffs2_checkpt_tnode_worker(obj,
++ obj->variant.file_variant.top,
++ obj->variant.file_variant.
++ top_level, 0);
++ if (ok)
++ ok = (yaffs2_checkpt_wr(obj->my_dev, &end_marker,
++ sizeof(end_marker)) == sizeof(end_marker));
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj)
++{
++ u32 base_chunk;
++ int ok = 1;
++ struct yaffs_dev *dev = obj->my_dev;
++ struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant;
++ struct yaffs_tnode *tn;
++ int nread = 0;
++
++ ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) ==
++ sizeof(base_chunk));
++
++ while (ok && (~base_chunk)) {
++ nread++;
++ /* Read level 0 tnode */
++
++ tn = yaffs_get_tnode(dev);
++ if (tn)
++ ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
++ dev->tnode_size);
++ else
++ ok = 0;
++
++ if (tn && ok)
++ ok = yaffs_add_find_tnode_0(dev,
++ file_stuct_ptr,
++ base_chunk, tn) ? 1 : 0;
++
++ if (ok)
++ ok = (yaffs2_checkpt_rd
++ (dev, &base_chunk,
++ sizeof(base_chunk)) == sizeof(base_chunk));
++ }
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "Checkpoint read tnodes %d records, last %d. ok %d",
++ nread, base_chunk, ok);
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_checkpt_obj cp;
++ int i;
++ int ok = 1;
++ struct list_head *lh;
++
++ /* Iterate through the objects in each hash entry,
++ * dumping them to the checkpointing stream.
++ */
++
++ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
++ list_for_each(lh, &dev->obj_bucket[i].list) {
++ obj = list_entry(lh, struct yaffs_obj, hash_link);
++ if (!obj->defered_free) {
++ yaffs2_obj_checkpt_obj(&cp, obj);
++ cp.struct_type = sizeof(cp);
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "Checkpoint write object %d parent %d type %d chunk %d obj addr %p",
++ cp.obj_id, cp.parent_id,
++ cp.variant_type, cp.hdr_chunk, obj);
++
++ ok = (yaffs2_checkpt_wr(dev, &cp,
++ sizeof(cp)) == sizeof(cp));
++
++ if (ok &&
++ obj->variant_type ==
++ YAFFS_OBJECT_TYPE_FILE)
++ ok = yaffs2_wr_checkpt_tnodes(obj);
++ }
++ }
++ }
++
++ /* Dump end of list */
++ memset(&cp, 0xff, sizeof(struct yaffs_checkpt_obj));
++ cp.struct_type = sizeof(cp);
++
++ if (ok)
++ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev)
++{
++ struct yaffs_obj *obj;
++ struct yaffs_checkpt_obj cp;
++ int ok = 1;
++ int done = 0;
++ LIST_HEAD(hard_list);
++
++
++ while (ok && !done) {
++ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
++ if (cp.struct_type != sizeof(cp)) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "struct size %d instead of %d ok %d",
++ cp.struct_type, (int)sizeof(cp), ok);
++ ok = 0;
++ }
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "Checkpoint read object %d parent %d type %d chunk %d ",
++ cp.obj_id, cp.parent_id, cp.variant_type,
++ cp.hdr_chunk);
++
++ if (ok && cp.obj_id == ~0) {
++ done = 1;
++ } else if (ok) {
++ obj =
++ yaffs_find_or_create_by_number(dev, cp.obj_id,
++ cp.variant_type);
++ if (obj) {
++ ok = yaffs2_checkpt_obj_to_obj(obj, &cp);
++ if (!ok)
++ break;
++ if (obj->variant_type ==
++ YAFFS_OBJECT_TYPE_FILE) {
++ ok = yaffs2_rd_checkpt_tnodes(obj);
++ } else if (obj->variant_type ==
++ YAFFS_OBJECT_TYPE_HARDLINK) {
++ list_add(&obj->hard_links, &hard_list);
++ }
++ } else {
++ ok = 0;
++ }
++ }
++ }
++
++ if (ok)
++ yaffs_link_fixup(dev, &hard_list);
++
++ return ok ? 1 : 0;
++}
++
++static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev)
++{
++ u32 checkpt_sum;
++ int ok;
++
++ yaffs2_get_checkpt_sum(dev, &checkpt_sum);
++
++ ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) ==
++ sizeof(checkpt_sum));
++
++ if (!ok)
++ return 0;
++
++ return 1;
++}
++
++static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev)
++{
++ u32 checkpt_sum0;
++ u32 checkpt_sum1;
++ int ok;
++
++ yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
++
++ ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) ==
++ sizeof(checkpt_sum1));
++
++ if (!ok)
++ return 0;
++
++ if (checkpt_sum0 != checkpt_sum1)
++ return 0;
++
++ return 1;
++}
++
++static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev)
++{
++ int ok = 1;
++
++ if (!yaffs2_checkpt_required(dev)) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "skipping checkpoint write");
++ ok = 0;
++ }
++
++ if (ok)
++ ok = yaffs2_checkpt_open(dev, 1);
++
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "write checkpoint validity");
++ ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
++ }
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "write checkpoint device");
++ ok = yaffs2_wr_checkpt_dev(dev);
++ }
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "write checkpoint objects");
++ ok = yaffs2_wr_checkpt_objs(dev);
++ }
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "write checkpoint validity");
++ ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
++ }
++
++ if (ok)
++ ok = yaffs2_wr_checkpt_sum(dev);
++
++ if (!yaffs_checkpt_close(dev))
++ ok = 0;
++
++ if (ok)
++ dev->is_checkpointed = 1;
++ else
++ dev->is_checkpointed = 0;
++
++ return dev->is_checkpointed;
++}
++
++static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev)
++{
++ int ok = 1;
++
++ if (!dev->param.is_yaffs2)
++ ok = 0;
++
++ if (ok && dev->param.skip_checkpt_rd) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "skipping checkpoint read");
++ ok = 0;
++ }
++
++ if (ok)
++ ok = yaffs2_checkpt_open(dev, 0); /* open for read */
++
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "read checkpoint validity");
++ ok = yaffs2_rd_checkpt_validity_marker(dev, 1);
++ }
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "read checkpoint device");
++ ok = yaffs2_rd_checkpt_dev(dev);
++ }
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "read checkpoint objects");
++ ok = yaffs2_rd_checkpt_objs(dev);
++ }
++ if (ok) {
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "read checkpoint validity");
++ ok = yaffs2_rd_checkpt_validity_marker(dev, 0);
++ }
++
++ if (ok) {
++ ok = yaffs2_rd_checkpt_sum(dev);
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "read checkpoint checksum %d", ok);
++ }
++
++ if (!yaffs_checkpt_close(dev))
++ ok = 0;
++
++ if (ok)
++ dev->is_checkpointed = 1;
++ else
++ dev->is_checkpointed = 0;
++
++ return ok ? 1 : 0;
++}
++
++void yaffs2_checkpt_invalidate(struct yaffs_dev *dev)
++{
++ if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) {
++ dev->is_checkpointed = 0;
++ yaffs2_checkpt_invalidate_stream(dev);
++ }
++ if (dev->param.sb_dirty_fn)
++ dev->param.sb_dirty_fn(dev);
++}
++
++int yaffs_checkpoint_save(struct yaffs_dev *dev)
++{
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "save entry: is_checkpointed %d",
++ dev->is_checkpointed);
++
++ yaffs_verify_objects(dev);
++ yaffs_verify_blocks(dev);
++ yaffs_verify_free_chunks(dev);
++
++ if (!dev->is_checkpointed) {
++ yaffs2_checkpt_invalidate(dev);
++ yaffs2_wr_checkpt_data(dev);
++ }
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
++ "save exit: is_checkpointed %d",
++ dev->is_checkpointed);
++
++ return dev->is_checkpointed;
++}
++
++int yaffs2_checkpt_restore(struct yaffs_dev *dev)
++{
++ int retval;
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "restore entry: is_checkpointed %d",
++ dev->is_checkpointed);
++
++ retval = yaffs2_rd_checkpt_data(dev);
++
++ if (dev->is_checkpointed) {
++ yaffs_verify_objects(dev);
++ yaffs_verify_blocks(dev);
++ yaffs_verify_free_chunks(dev);
++ }
++
++ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
++ "restore exit: is_checkpointed %d",
++ dev->is_checkpointed);
++
++ return retval;
++}
++
++int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size)
++{
++ /* if new_size > old_file_size.
++ * We're going to be writing a hole.
++ * If the hole is small then write zeros otherwise write a start
++ * of hole marker.
++ */
++ loff_t old_file_size;
++ loff_t increase;
++ int small_hole;
++ int result = YAFFS_OK;
++ struct yaffs_dev *dev = NULL;
++ u8 *local_buffer = NULL;
++ int small_increase_ok = 0;
++
++ if (!obj)
++ return YAFFS_FAIL;
++
++ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
++ return YAFFS_FAIL;
++
++ dev = obj->my_dev;
++
++ /* Bail out if not yaffs2 mode */
++ if (!dev->param.is_yaffs2)
++ return YAFFS_OK;
++
++ old_file_size = obj->variant.file_variant.file_size;
++
++ if (new_size <= old_file_size)
++ return YAFFS_OK;
++
++ increase = new_size - old_file_size;
++
++ if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
++ yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
++ small_hole = 1;
++ else
++ small_hole = 0;
++
++ if (small_hole)
++ local_buffer = yaffs_get_temp_buffer(dev);
++
++ if (local_buffer) {
++ /* fill hole with zero bytes */
++ loff_t pos = old_file_size;
++ int this_write;
++ int written;
++ memset(local_buffer, 0, dev->data_bytes_per_chunk);
++ small_increase_ok = 1;
++
++ while (increase > 0 && small_increase_ok) {
++ this_write = increase;
++ if (this_write > dev->data_bytes_per_chunk)
++ this_write = dev->data_bytes_per_chunk;
++ written =
++ yaffs_do_file_wr(obj, local_buffer, pos, this_write,
++ 0);
++ if (written == this_write) {
++ pos += this_write;
++ increase -= this_write;
++ } else {
++ small_increase_ok = 0;
++ }
++ }
++
++ yaffs_release_temp_buffer(dev, local_buffer);
++
++ /* If out of space then reverse any chunks we've added */
++ if (!small_increase_ok)
++ yaffs_resize_file_down(obj, old_file_size);
++ }
++
++ if (!small_increase_ok &&
++ obj->parent &&
++ obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
++ obj->parent->obj_id != YAFFS_OBJECTID_DELETED) {
++ /* Write a hole start header with the old file size */
++ yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
++ }
++
++ return result;
++}
++
++struct yaffs_block_index {
++ int seq;
++ int block;
++};
++
++static int yaffs2_ybicmp(const void *a, const void *b)
++{
++ int aseq = ((struct yaffs_block_index *)a)->seq;
++ int bseq = ((struct yaffs_block_index *)b)->seq;
++ int ablock = ((struct yaffs_block_index *)a)->block;
++ int bblock = ((struct yaffs_block_index *)b)->block;
++
++ if (aseq == bseq)
++ return ablock - bblock;
++
++ return aseq - bseq;
++}
++
++static inline int yaffs2_scan_chunk(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi,
++ int blk, int chunk_in_block,
++ int *found_chunks,
++ u8 *chunk_data,
++ struct list_head *hard_list,
++ int summary_available)
++{
++ struct yaffs_obj_hdr *oh;
++ struct yaffs_obj *in;
++ struct yaffs_obj *parent;
++ int equiv_id;
++ loff_t file_size;
++ int is_shrink;
++ int is_unlinked;
++ struct yaffs_ext_tags tags;
++ int result;
++ int alloc_failed = 0;
++ int chunk = blk * dev->param.chunks_per_block + chunk_in_block;
++ struct yaffs_file_var *file_var;
++ struct yaffs_hardlink_var *hl_var;
++ struct yaffs_symlink_var *sl_var;
++
++ if (summary_available) {
++ result = yaffs_summary_fetch(dev, &tags, chunk_in_block);
++ tags.seq_number = bi->seq_number;
++ }
++
++ if (!summary_available || tags.obj_id == 0) {
++ result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags);
++ dev->tags_used++;
++ } else {
++ dev->summary_used++;
++ }
++
++ /* Let's have a good look at this chunk... */
++
++ if (!tags.chunk_used) {
++ /* An unassigned chunk in the block.
++ * If there are used chunks after this one, then
++ * it is a chunk that was skipped due to failing
++ * the erased check. Just skip it so that it can
++ * be deleted.
++ * But, more typically, We get here when this is
++ * an unallocated chunk and his means that
++ * either the block is empty or this is the one
++ * being allocated from
++ */
++
++ if (*found_chunks) {
++ /* This is a chunk that was skipped due
++ * to failing the erased check */
++ } else if (chunk_in_block == 0) {
++ /* We're looking at the first chunk in
++ * the block so the block is unused */
++ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
++ dev->n_erased_blocks++;
++ } else {
++ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
++ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
++ if (dev->seq_number == bi->seq_number) {
++ /* Allocating from this block*/
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ " Allocating from %d %d",
++ blk, chunk_in_block);
++
++ bi->block_state =
++ YAFFS_BLOCK_STATE_ALLOCATING;
++ dev->alloc_block = blk;
++ dev->alloc_page = chunk_in_block;
++ dev->alloc_block_finder = blk;
++ } else {
++ /* This is a partially written block
++ * that is not the current
++ * allocation block.
++ */
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "Partially written block %d detected. gc will fix this.",
++ blk);
++ }
++ }
++ }
++
++ dev->n_free_chunks++;
++
++ } else if (tags.ecc_result ==
++ YAFFS_ECC_RESULT_UNFIXED) {
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ " Unfixed ECC in chunk(%d:%d), chunk ignored",
++ blk, chunk_in_block);
++ dev->n_free_chunks++;
++ } else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
++ tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
++ tags.obj_id == YAFFS_OBJECTID_SUMMARY ||
++ (tags.chunk_id > 0 &&
++ tags.n_bytes > dev->data_bytes_per_chunk) ||
++ tags.seq_number != bi->seq_number) {
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored",
++ blk, chunk_in_block, tags.obj_id,
++ tags.chunk_id, tags.n_bytes);
++ dev->n_free_chunks++;
++ } else if (tags.chunk_id > 0) {
++ /* chunk_id > 0 so it is a data chunk... */
++ loff_t endpos;
++ loff_t chunk_base = (tags.chunk_id - 1) *
++ dev->data_bytes_per_chunk;
++
++ *found_chunks = 1;
++
++ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
++ bi->pages_in_use++;
++
++ in = yaffs_find_or_create_by_number(dev,
++ tags.obj_id,
++ YAFFS_OBJECT_TYPE_FILE);
++ if (!in)
++ /* Out of memory */
++ alloc_failed = 1;
++
++ if (in &&
++ in->variant_type == YAFFS_OBJECT_TYPE_FILE &&
++ chunk_base < in->variant.file_variant.shrink_size) {
++ /* This has not been invalidated by
++ * a resize */
++ if (!yaffs_put_chunk_in_file(in, tags.chunk_id,
++ chunk, -1))
++ alloc_failed = 1;
++
++ /* File size is calculated by looking at
++ * the data chunks if we have not
++ * seen an object header yet.
++ * Stop this practice once we find an
++ * object header.
++ */
++ endpos = chunk_base + tags.n_bytes;
++
++ if (!in->valid &&
++ in->variant.file_variant.scanned_size < endpos) {
++ in->variant.file_variant.
++ scanned_size = endpos;
++ in->variant.file_variant.
++ file_size = endpos;
++ }
++ } else if (in) {
++ /* This chunk has been invalidated by a
++ * resize, or a past file deletion
++ * so delete the chunk*/
++ yaffs_chunk_del(dev, chunk, 1, __LINE__);
++ }
++ } else {
++ /* chunk_id == 0, so it is an ObjectHeader.
++ * Thus, we read in the object header and make
++ * the object
++ */
++ *found_chunks = 1;
++
++ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
++ bi->pages_in_use++;
++
++ oh = NULL;
++ in = NULL;
++
++ if (tags.extra_available) {
++ in = yaffs_find_or_create_by_number(dev,
++ tags.obj_id,
++ tags.extra_obj_type);
++ if (!in)
++ alloc_failed = 1;
++ }
++
++ if (!in ||
++ (!in->valid && dev->param.disable_lazy_load) ||
++ tags.extra_shadows ||
++ (!in->valid && (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND))) {
++
++ /* If we don't have valid info then we
++ * need to read the chunk
++ * TODO In future we can probably defer
++ * reading the chunk and living with
++ * invalid data until needed.
++ */
++
++ result = yaffs_rd_chunk_tags_nand(dev,
++ chunk,
++ chunk_data,
++ NULL);
++
++ oh = (struct yaffs_obj_hdr *)chunk_data;
++
++ if (dev->param.inband_tags) {
++ /* Fix up the header if they got
++ * corrupted by inband tags */
++ oh->shadows_obj =
++ oh->inband_shadowed_obj_id;
++ oh->is_shrink =
++ oh->inband_is_shrink;
++ }
++
++ if (!in) {
++ in = yaffs_find_or_create_by_number(dev,
++ tags.obj_id, oh->type);
++ if (!in)
++ alloc_failed = 1;
++ }
++ }
++
++ if (!in) {
++ /* TODO Hoosterman we have a problem! */
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: Could not make object for object %d at chunk %d during scan",
++ tags.obj_id, chunk);
++ return YAFFS_FAIL;
++ }
++
++ if (in->valid) {
++ /* We have already filled this one.
++ * We have a duplicate that will be
++ * discarded, but we first have to suck
++ * out resize info if it is a file.
++ */
++ if ((in->variant_type == YAFFS_OBJECT_TYPE_FILE) &&
++ ((oh && oh->type == YAFFS_OBJECT_TYPE_FILE) ||
++ (tags.extra_available &&
++ tags.extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
++ )) {
++ loff_t this_size = (oh) ?
++ yaffs_oh_to_size(oh) :
++ tags.extra_file_size;
++ u32 parent_obj_id = (oh) ?
++ oh->parent_obj_id :
++ tags.extra_parent_id;
++
++ is_shrink = (oh) ?
++ oh->is_shrink :
++ tags.extra_is_shrink;
++
++ /* If it is deleted (unlinked
++ * at start also means deleted)
++ * we treat the file size as
++ * being zeroed at this point.
++ */
++ if (parent_obj_id == YAFFS_OBJECTID_DELETED ||
++ parent_obj_id == YAFFS_OBJECTID_UNLINKED) {
++ this_size = 0;
++ is_shrink = 1;
++ }
++
++ if (is_shrink &&
++ in->variant.file_variant.shrink_size >
++ this_size)
++ in->variant.file_variant.shrink_size =
++ this_size;
++
++ if (is_shrink)
++ bi->has_shrink_hdr = 1;
++ }
++ /* Use existing - destroy this one. */
++ yaffs_chunk_del(dev, chunk, 1, __LINE__);
++ }
++
++ if (!in->valid && in->variant_type !=
++ (oh ? oh->type : tags.extra_obj_type)) {
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: Bad type, %d != %d, for object %d at chunk %d during scan",
++ oh ? oh->type : tags.extra_obj_type,
++ in->variant_type, tags.obj_id,
++ chunk);
++ in = yaffs_retype_obj(in, oh ? oh->type : tags.extra_obj_type);
++ }
++
++ if (!in->valid &&
++ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
++ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) {
++ /* We only load some info, don't fiddle
++ * with directory structure */
++ in->valid = 1;
++
++ if (oh) {
++ in->yst_mode = oh->yst_mode;
++ yaffs_load_attribs(in, oh);
++ in->lazy_loaded = 0;
++ } else {
++ in->lazy_loaded = 1;
++ }
++ in->hdr_chunk = chunk;
++
++ } else if (!in->valid) {
++ /* we need to load this info */
++ in->valid = 1;
++ in->hdr_chunk = chunk;
++ if (oh) {
++ in->variant_type = oh->type;
++ in->yst_mode = oh->yst_mode;
++ yaffs_load_attribs(in, oh);
++
++ if (oh->shadows_obj > 0)
++ yaffs_handle_shadowed_obj(dev,
++ oh->shadows_obj, 1);
++
++ yaffs_set_obj_name_from_oh(in, oh);
++ parent = yaffs_find_or_create_by_number(dev,
++ oh->parent_obj_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ file_size = yaffs_oh_to_size(oh);
++ is_shrink = oh->is_shrink;
++ equiv_id = oh->equiv_id;
++ } else {
++ in->variant_type = tags.extra_obj_type;
++ parent = yaffs_find_or_create_by_number(dev,
++ tags.extra_parent_id,
++ YAFFS_OBJECT_TYPE_DIRECTORY);
++ file_size = tags.extra_file_size;
++ is_shrink = tags.extra_is_shrink;
++ equiv_id = tags.extra_equiv_id;
++ in->lazy_loaded = 1;
++ }
++ in->dirty = 0;
++
++ if (!parent)
++ alloc_failed = 1;
++
++ /* directory stuff...
++ * hook up to parent
++ */
++
++ if (parent &&
++ parent->variant_type == YAFFS_OBJECT_TYPE_UNKNOWN) {
++ /* Set up as a directory */
++ parent->variant_type =
++ YAFFS_OBJECT_TYPE_DIRECTORY;
++ INIT_LIST_HEAD(&parent->
++ variant.dir_variant.children);
++ } else if (!parent ||
++ parent->variant_type !=
++ YAFFS_OBJECT_TYPE_DIRECTORY) {
++ /* Hoosterman, another problem....
++ * Trying to use a non-directory as a directory
++ */
++
++ yaffs_trace(YAFFS_TRACE_ERROR,
++ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
++ );
++ parent = dev->lost_n_found;
++ }
++ yaffs_add_obj_to_dir(parent, in);
++
++ is_unlinked = (parent == dev->del_dir) ||
++ (parent == dev->unlinked_dir);
++
++ if (is_shrink)
++ /* Mark the block */
++ bi->has_shrink_hdr = 1;
++
++ /* Note re hardlinks.
++ * Since we might scan a hardlink before its equivalent
++ * object is scanned we put them all in a list.
++ * After scanning is complete, we should have all the
++ * objects, so we run through this list and fix up all
++ * the chains.
++ */
++
++ switch (in->variant_type) {
++ case YAFFS_OBJECT_TYPE_UNKNOWN:
++ /* Todo got a problem */
++ break;
++ case YAFFS_OBJECT_TYPE_FILE:
++ file_var = &in->variant.file_variant;
++ if (file_var->scanned_size < file_size) {
++ /* This covers the case where the file
++ * size is greater than the data held.
++ * This will happen if the file is
++ * resized to be larger than its
++ * current data extents.
++ */
++ file_var->file_size = file_size;
++ file_var->scanned_size = file_size;
++ }
++
++ if (file_var->shrink_size > file_size)
++ file_var->shrink_size = file_size;
++
++ break;
++ case YAFFS_OBJECT_TYPE_HARDLINK:
++ hl_var = &in->variant.hardlink_variant;
++ if (!is_unlinked) {
++ hl_var->equiv_id = equiv_id;
++ list_add(&in->hard_links, hard_list);
++ }
++ break;
++ case YAFFS_OBJECT_TYPE_DIRECTORY:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SPECIAL:
++ /* Do nothing */
++ break;
++ case YAFFS_OBJECT_TYPE_SYMLINK:
++ sl_var = &in->variant.symlink_variant;
++ if (oh) {
++ sl_var->alias =
++ yaffs_clone_str(oh->alias);
++ if (!sl_var->alias)
++ alloc_failed = 1;
++ }
++ break;
++ }
++ }
++ }
++ return alloc_failed ? YAFFS_FAIL : YAFFS_OK;
++}
++
++int yaffs2_scan_backwards(struct yaffs_dev *dev)
++{
++ int blk;
++ int block_iter;
++ int start_iter;
++ int end_iter;
++ int n_to_scan = 0;
++ enum yaffs_block_state state;
++ int c;
++ int deleted;
++ LIST_HEAD(hard_list);
++ struct yaffs_block_info *bi;
++ u32 seq_number;
++ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
++ u8 *chunk_data;
++ int found_chunks;
++ int alloc_failed = 0;
++ struct yaffs_block_index *block_index = NULL;
++ int alt_block_index = 0;
++ int summary_available;
++
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "yaffs2_scan_backwards starts intstartblk %d intendblk %d...",
++ dev->internal_start_block, dev->internal_end_block);
++
++ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
++
++ block_index =
++ kmalloc(n_blocks * sizeof(struct yaffs_block_index), GFP_NOFS);
++
++ if (!block_index) {
++ block_index =
++ vmalloc(n_blocks * sizeof(struct yaffs_block_index));
++ alt_block_index = 1;
++ }
++
++ if (!block_index) {
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "yaffs2_scan_backwards() could not allocate block index!"
++ );
++ return YAFFS_FAIL;
++ }
++
++ dev->blocks_in_checkpt = 0;
++
++ chunk_data = yaffs_get_temp_buffer(dev);
++
++ /* Scan all the blocks to determine their state */
++ bi = dev->block_info;
++ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
++ blk++) {
++ yaffs_clear_chunk_bits(dev, blk);
++ bi->pages_in_use = 0;
++ bi->soft_del_pages = 0;
++
++ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
++
++ bi->block_state = state;
++ bi->seq_number = seq_number;
++
++ if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
++ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
++ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
++ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
++
++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
++ "Block scanning block %d state %d seq %d",
++ blk, bi->block_state, seq_number);
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
++ dev->blocks_in_checkpt++;
++
++ } else if (bi->block_state == YAFFS_BLOCK_STATE_DEAD) {
++ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
++ "block %d is bad", blk);
++ } else if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
++ dev->n_erased_blocks++;
++ dev->n_free_chunks += dev->param.chunks_per_block;
++ } else if (bi->block_state ==
++ YAFFS_BLOCK_STATE_NEEDS_SCAN) {
++ /* Determine the highest sequence number */
++ if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
++ seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
++ block_index[n_to_scan].seq = seq_number;
++ block_index[n_to_scan].block = blk;
++ n_to_scan++;
++ if (seq_number >= dev->seq_number)
++ dev->seq_number = seq_number;
++ } else {
++ /* TODO: Nasty sequence number! */
++ yaffs_trace(YAFFS_TRACE_SCAN,
++ "Block scanning block %d has bad sequence number %d",
++ blk, seq_number);
++ }
++ }
++ bi++;
++ }
++
++ yaffs_trace(YAFFS_TRACE_ALWAYS, "%d blocks to be sorted...", n_to_scan);
++
++ cond_resched();
++
++ /* Sort the blocks by sequence number */
++ sort(block_index, n_to_scan, sizeof(struct yaffs_block_index),
++ yaffs2_ybicmp, NULL);
++
++ cond_resched();
++
++ yaffs_trace(YAFFS_TRACE_SCAN, "...done");
++
++ /* Now scan the blocks looking at the data. */
++ start_iter = 0;
++ end_iter = n_to_scan - 1;
++ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan);
++
++ /* For each block.... backwards */
++ for (block_iter = end_iter;
++ !alloc_failed && block_iter >= start_iter;
++ block_iter--) {
++ /* Cooperative multitasking! This loop can run for so
++ long that watchdog timers expire. */
++ cond_resched();
++
++ /* get the block to scan in the correct order */
++ blk = block_index[block_iter].block;
++ bi = yaffs_get_block_info(dev, blk);
++ deleted = 0;
++
++ summary_available = yaffs_summary_read(dev, dev->sum_tags, blk);
++
++ /* For each chunk in each block that needs scanning.... */
++ found_chunks = 0;
++ if (summary_available)
++ c = dev->chunks_per_summary - 1;
++ else
++ c = dev->param.chunks_per_block - 1;
++
++ for (/* c is already initialised */;
++ !alloc_failed && c >= 0 &&
++ (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
++ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING);
++ c--) {
++ /* Scan backwards...
++ * Read the tags and decide what to do
++ */
++ if (yaffs2_scan_chunk(dev, bi, blk, c,
++ &found_chunks, chunk_data,
++ &hard_list, summary_available) ==
++ YAFFS_FAIL)
++ alloc_failed = 1;
++ }
++
++ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
++ /* If we got this far while scanning, then the block
++ * is fully allocated. */
++ bi->block_state = YAFFS_BLOCK_STATE_FULL;
++ }
++
++ /* Now let's see if it was dirty */
++ if (bi->pages_in_use == 0 &&
++ !bi->has_shrink_hdr &&
++ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
++ yaffs_block_became_dirty(dev, blk);
++ }
++ }
++
++ yaffs_skip_rest_of_block(dev);
++
++ if (alt_block_index)
++ vfree(block_index);
++ else
++ kfree(block_index);
++
++ /* Ok, we've done all the scanning.
++ * Fix up the hard link chains.
++ * We have scanned all the objects, now it's time to add these
++ * hardlinks.
++ */
++ yaffs_link_fixup(dev, &hard_list);
++
++ yaffs_release_temp_buffer(dev, chunk_data);
++
++ if (alloc_failed)
++ return YAFFS_FAIL;
++
++ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends");
++
++ return YAFFS_OK;
++}
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yaffs_yaffs2.h linux-3.14.4/fs/yaffs2/yaffs_yaffs2.h
+--- linux-3.14.4.orig/fs/yaffs2/yaffs_yaffs2.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yaffs_yaffs2.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,39 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YAFFS_YAFFS2_H__
++#define __YAFFS_YAFFS2_H__
++
++#include "yaffs_guts.h"
++
++void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev);
++void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev);
++void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
++ struct yaffs_block_info *bi);
++void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
++ struct yaffs_block_info *bi);
++int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi);
++u32 yaffs2_find_refresh_block(struct yaffs_dev *dev);
++int yaffs2_checkpt_required(struct yaffs_dev *dev);
++int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev);
++
++void yaffs2_checkpt_invalidate(struct yaffs_dev *dev);
++int yaffs2_checkpt_save(struct yaffs_dev *dev);
++int yaffs2_checkpt_restore(struct yaffs_dev *dev);
++
++int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size);
++int yaffs2_scan_backwards(struct yaffs_dev *dev);
++
++#endif
+diff -Nur linux-3.14.4.orig/fs/yaffs2/yportenv.h linux-3.14.4/fs/yaffs2/yportenv.h
+--- linux-3.14.4.orig/fs/yaffs2/yportenv.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-3.14.4/fs/yaffs2/yportenv.h 2014-05-17 02:22:30.000000000 +0200
+@@ -0,0 +1,85 @@
++/*
++ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
++ *
++ * Copyright (C) 2002-2011 Aleph One Ltd.
++ * for Toby Churchill Ltd and Brightstar Engineering
++ *
++ * Created by Charles Manning <charles@aleph1.co.uk>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU Lesser General Public License version 2.1 as
++ * published by the Free Software Foundation.
++ *
++ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
++ */
++
++#ifndef __YPORTENV_H__
++#define __YPORTENV_H__
++
++/*
++ * Define the MTD version in terms of Linux Kernel versions
++ * This allows yaffs to be used independantly of the kernel
++ * as well as with it.
++ */
++
++#define MTD_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
++
++#ifdef YAFFS_OUT_OF_TREE
++#include "moduleconfig.h"
++#endif
++
++#include <linux/version.h>
++#define MTD_VERSION_CODE LINUX_VERSION_CODE
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19))
++#include <linux/config.h>
++#endif
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/xattr.h>
++#include <linux/list.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/sort.h>
++#include <linux/bitops.h>
++
++/* These type wrappings are used to support Unicode names in WinCE. */
++#define YCHAR char
++#define YUCHAR unsigned char
++#define _Y(x) x
++
++#define YAFFS_LOSTNFOUND_NAME "lost+found"
++#define YAFFS_LOSTNFOUND_PREFIX "obj"
++
++
++#define YAFFS_ROOT_MODE 0755
++#define YAFFS_LOSTNFOUND_MODE 0700
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 0))
++#define Y_CURRENT_TIME CURRENT_TIME.tv_sec
++#define Y_TIME_CONVERT(x) (x).tv_sec
++#else
++#define Y_CURRENT_TIME CURRENT_TIME
++#define Y_TIME_CONVERT(x) (x)
++#endif
++
++#define compile_time_assertion(assertion) \
++ ({ int x = __builtin_choose_expr(assertion, 0, (void)0); (void) x; })
++
++
++#define yaffs_printf(msk, fmt, ...) \
++ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__)
++
++#define yaffs_trace(msk, fmt, ...) do { \
++ if (yaffs_trace_mask & (msk)) \
++ printk(KERN_DEBUG "yaffs: " fmt "\n", ##__VA_ARGS__); \
++} while (0)
++
++
++#endif
diff --git a/target/linux/patches/3.14.43/zlib-inflate.patch b/target/linux/patches/3.14.43/zlib-inflate.patch
new file mode 100644
index 000000000..58e1f6d21
--- /dev/null
+++ b/target/linux/patches/3.14.43/zlib-inflate.patch
@@ -0,0 +1,12 @@
+diff -Nur linux-2.6.37.orig/lib/Kconfig linux-2.6.37/lib/Kconfig
+--- linux-2.6.37.orig/lib/Kconfig 2011-01-05 01:50:19.000000000 +0100
++++ linux-2.6.37/lib/Kconfig 2011-03-01 20:10:29.833370667 +0100
+@@ -95,7 +95,7 @@
+ # compression support is select'ed if needed
+ #
+ config ZLIB_INFLATE
+- tristate
++ boolean
+
+ config ZLIB_DEFLATE
+ tristate